summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-07-08 16:40:11 -0700
committerRyan Dahl <ry@tinyclouds.org>2011-07-08 16:40:11 -0700
commite5564a3f29e0a818832a97c7c3b28d7c8b3b0460 (patch)
tree4b48a6577080d5e44da4d2cbebb7fe7951660de8 /deps/v8/src/mips
parent0df2f74d364826053641395b01c2fcb1345057a9 (diff)
downloadnode-e5564a3f29e0a818832a97c7c3b28d7c8b3b0460.tar.gz
Upgrade V8 to 3.4.10
Diffstat (limited to 'deps/v8/src/mips')
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h183
-rw-r--r--deps/v8/src/mips/assembler-mips.cc1416
-rw-r--r--deps/v8/src/mips/assembler-mips.h855
-rw-r--r--deps/v8/src/mips/builtins-mips.cc1543
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc6889
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h660
-rw-r--r--deps/v8/src/mips/codegen-mips-inl.h70
-rw-r--r--deps/v8/src/mips/codegen-mips.cc1401
-rw-r--r--deps/v8/src/mips/codegen-mips.h383
-rw-r--r--deps/v8/src/mips/constants-mips.cc76
-rw-r--r--deps/v8/src/mips/constants-mips.h360
-rw-r--r--deps/v8/src/mips/cpu-mips.cc33
-rw-r--r--deps/v8/src/mips/debug-mips.cc207
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc (renamed from deps/v8/src/mips/fast-codegen-mips.cc)67
-rw-r--r--deps/v8/src/mips/disasm-mips.cc609
-rw-r--r--deps/v8/src/mips/frames-mips.cc53
-rw-r--r--deps/v8/src/mips/frames-mips.h83
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc4177
-rw-r--r--deps/v8/src/mips/ic-mips.cc1654
-rw-r--r--deps/v8/src/mips/jump-target-mips.cc175
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h (renamed from deps/v8/src/mips/register-allocator-mips.h)39
-rw-r--r--deps/v8/src/mips/lithium-mips.h307
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc3805
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h1112
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc1251
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.h252
-rw-r--r--deps/v8/src/mips/register-allocator-mips-inl.h137
-rw-r--r--deps/v8/src/mips/register-allocator-mips.cc63
-rw-r--r--deps/v8/src/mips/simulator-mips.cc1796
-rw-r--r--deps/v8/src/mips/simulator-mips.h210
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc4203
-rw-r--r--deps/v8/src/mips/virtual-frame-mips.cc319
-rw-r--r--deps/v8/src/mips/virtual-frame-mips.h548
33 files changed, 29674 insertions, 5262 deletions
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 2e634617c..b5ffe7391 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
@@ -38,22 +38,14 @@
#include "mips/assembler-mips.h"
#include "cpu.h"
+#include "debug.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
-// Condition
-
-Condition NegateCondition(Condition cc) {
- ASSERT(cc != cc_always);
- return static_cast<Condition>(cc ^ 1);
-}
-
-
-// -----------------------------------------------------------------------------
-// Operand and MemOperand
+// Operand and MemOperand.
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
@@ -61,17 +53,13 @@ Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rmode_ = rmode;
}
+
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
-Operand::Operand(const char* s) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(s);
- rmode_ = RelocInfo::EMBEDDED_STRING;
-}
Operand::Operand(Smi* value) {
rm_ = no_reg;
@@ -79,10 +67,12 @@ Operand::Operand(Smi* value) {
rmode_ = RelocInfo::NONE;
}
+
Operand::Operand(Register rm) {
rm_ = rm;
}
+
bool Operand::is_reg() const {
return rm_.is_valid();
}
@@ -90,11 +80,15 @@ bool Operand::is_reg() const {
// -----------------------------------------------------------------------------
-// RelocInfo
+// RelocInfo.
void RelocInfo::apply(intptr_t delta) {
- // On MIPS we do not use pc relative addressing, so we don't need to patch the
- // code here.
+ if (IsInternalReference(rmode_)) {
+ // Absolute code pointer inside code object moves with the code object.
+ byte* p = reinterpret_cast<byte*>(pc_);
+ int count = Assembler::RelocateInternalReference(p, delta);
+ CPU::FlushICache(p, count * sizeof(uint32_t));
+ }
}
@@ -110,6 +104,11 @@ Address RelocInfo::target_address_address() {
}
+int RelocInfo::target_address_size() {
+ return Assembler::kExternalTargetSize;
+}
+
+
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
@@ -130,8 +129,12 @@ Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
Object** RelocInfo::target_object_address() {
+ // Provide a "natural pointer" to the embedded object,
+ // which can be de-referenced during heap iteration.
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(pc_);
+ reconstructed_obj_ptr_ =
+ reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+ return &reconstructed_obj_ptr_;
}
@@ -143,23 +146,52 @@ void RelocInfo::set_target_object(Object* target) {
Address* RelocInfo::target_reference_address() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(pc_);
+ reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
+ return &reconstructed_adr_ptr_;
+}
+
+
+Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+ Address address = Memory::Address_at(pc_);
+ return Handle<JSGlobalPropertyCell>(
+ reinterpret_cast<JSGlobalPropertyCell**>(address));
+}
+
+
+JSGlobalPropertyCell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+ Address address = Memory::Address_at(pc_);
+ Object* object = HeapObject::FromAddress(
+ address - JSGlobalPropertyCell::kValueOffset);
+ return reinterpret_cast<JSGlobalPropertyCell*>(object);
+}
+
+
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+ Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+ Memory::Address_at(pc_) = address;
}
Address RelocInfo::call_address() {
- ASSERT(IsPatchedReturnSequence());
- // The 2 instructions offset assumes patched return sequence.
- ASSERT(IsJSReturn(rmode()));
- return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ // The pc_ offset of 0 assumes mips patched return sequence per
+ // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
+ // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
+ return Assembler::target_address_at(pc_);
}
void RelocInfo::set_call_address(Address target) {
- ASSERT(IsPatchedReturnSequence());
- // The 2 instructions offset assumes patched return sequence.
- ASSERT(IsJSReturn(rmode()));
- Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ // The pc_ offset of 0 assumes mips patched return sequence per
+ // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
+ // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
+ Assembler::set_target_address_at(pc_, target);
}
@@ -169,9 +201,8 @@ Object* RelocInfo::call_object() {
Object** RelocInfo::call_object_address() {
- ASSERT(IsPatchedReturnSequence());
- // The 2 instructions offset assumes patched return sequence.
- ASSERT(IsJSReturn(rmode()));
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
}
@@ -182,18 +213,80 @@ void RelocInfo::set_call_object(Object* target) {
bool RelocInfo::IsPatchedReturnSequence() {
-#ifdef DEBUG
- PrintF("%s - %d - %s : Checking for jal(r)",
- __FILE__, __LINE__, __func__);
+ Instr instr0 = Assembler::instr_at(pc_);
+ Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
+ Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);
+ bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
+ (instr1 & kOpcodeMask) == ORI &&
+ (instr2 & kOpcodeMask) == SPECIAL &&
+ (instr2 & kFunctionFieldMask) == JALR);
+ return patched_return;
+}
+
+
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+ Instr current_instr = Assembler::instr_at(pc_);
+ return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
+
+
+void RelocInfo::Visit(ObjectVisitor* visitor) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ Object** p = target_object_address();
+ Object* orig = *p;
+ visitor->VisitPointer(p);
+ if (*p != orig) {
+ set_target_object(*p);
+ }
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+ visitor->VisitGlobalPropertyCell(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ visitor->VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // TODO(isolates): Get a cached isolate below.
+ } else if (((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence())) &&
+ Isolate::Current()->debug()->has_break_points()) {
+ visitor->VisitDebugTarget(this);
#endif
- return ((Assembler::instr_at(pc_) & kOpcodeMask) == SPECIAL) &&
- (((Assembler::instr_at(pc_) & kFunctionFieldMask) == JAL) ||
- ((Assembler::instr_at(pc_) & kFunctionFieldMask) == JALR));
+ } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ visitor->VisitRuntimeEntry(this);
+ }
+}
+
+
+template<typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitPointer(heap, target_object_address());
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(heap, this);
+ } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+ StaticVisitor::VisitGlobalPropertyCell(heap, this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (heap->isolate()->debug()->has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(heap, this);
+#endif
+ } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
}
// -----------------------------------------------------------------------------
-// Assembler
+// Assembler.
void Assembler::CheckBuffer() {
@@ -203,10 +296,20 @@ void Assembler::CheckBuffer() {
}
+void Assembler::CheckTrampolinePoolQuick() {
+ if (pc_offset() >= next_buffer_check_) {
+ CheckTrampolinePool();
+ }
+}
+
+
void Assembler::emit(Instr x) {
- CheckBuffer();
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
+ CheckTrampolinePoolQuick();
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index a3b316b14..4ca6a91aa 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
#include "v8.h"
@@ -40,82 +40,42 @@
#include "mips/assembler-mips-inl.h"
#include "serialize.h"
-
namespace v8 {
namespace internal {
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+
+void CpuFeatures::Probe() {
+ ASSERT(!initialized_);
+#ifdef DEBUG
+ initialized_ = true;
+#endif
+ // If the compiler is allowed to use fpu then we can use fpu too in our
+ // code generation.
+#if !defined(__mips__)
+ // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
+ if (FLAG_enable_fpu) {
+ supported_ |= 1u << FPU;
+ }
+#else
+ if (Serializer::enabled()) {
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
+ return; // No features if we might serialize.
+ }
+ if (OS::MipsCpuHasFeature(FPU)) {
+ // This implementation also sets the FPU flags if
+ // runtime detection of FPU returns true.
+ supported_ |= 1u << FPU;
+ found_by_runtime_probing_ |= 1u << FPU;
+ }
+#endif
+}
-const Register no_reg = { -1 };
-
-const Register zero_reg = { 0 };
-const Register at = { 1 };
-const Register v0 = { 2 };
-const Register v1 = { 3 };
-const Register a0 = { 4 };
-const Register a1 = { 5 };
-const Register a2 = { 6 };
-const Register a3 = { 7 };
-const Register t0 = { 8 };
-const Register t1 = { 9 };
-const Register t2 = { 10 };
-const Register t3 = { 11 };
-const Register t4 = { 12 };
-const Register t5 = { 13 };
-const Register t6 = { 14 };
-const Register t7 = { 15 };
-const Register s0 = { 16 };
-const Register s1 = { 17 };
-const Register s2 = { 18 };
-const Register s3 = { 19 };
-const Register s4 = { 20 };
-const Register s5 = { 21 };
-const Register s6 = { 22 };
-const Register s7 = { 23 };
-const Register t8 = { 24 };
-const Register t9 = { 25 };
-const Register k0 = { 26 };
-const Register k1 = { 27 };
-const Register gp = { 28 };
-const Register sp = { 29 };
-const Register s8_fp = { 30 };
-const Register ra = { 31 };
-
-
-const FPURegister no_creg = { -1 };
-
-const FPURegister f0 = { 0 };
-const FPURegister f1 = { 1 };
-const FPURegister f2 = { 2 };
-const FPURegister f3 = { 3 };
-const FPURegister f4 = { 4 };
-const FPURegister f5 = { 5 };
-const FPURegister f6 = { 6 };
-const FPURegister f7 = { 7 };
-const FPURegister f8 = { 8 };
-const FPURegister f9 = { 9 };
-const FPURegister f10 = { 10 };
-const FPURegister f11 = { 11 };
-const FPURegister f12 = { 12 };
-const FPURegister f13 = { 13 };
-const FPURegister f14 = { 14 };
-const FPURegister f15 = { 15 };
-const FPURegister f16 = { 16 };
-const FPURegister f17 = { 17 };
-const FPURegister f18 = { 18 };
-const FPURegister f19 = { 19 };
-const FPURegister f20 = { 20 };
-const FPURegister f21 = { 21 };
-const FPURegister f22 = { 22 };
-const FPURegister f23 = { 23 };
-const FPURegister f24 = { 24 };
-const FPURegister f25 = { 25 };
-const FPURegister f26 = { 26 };
-const FPURegister f27 = { 27 };
-const FPURegister f28 = { 28 };
-const FPURegister f29 = { 29 };
-const FPURegister f30 = { 30 };
-const FPURegister f31 = { 31 };
int ToNumber(Register reg) {
ASSERT(reg.is_valid());
@@ -156,6 +116,7 @@ int ToNumber(Register reg) {
return kNumbers[reg.code()];
}
+
Register ToRegister(int num) {
ASSERT(num >= 0 && num < kNumRegisters);
const Register kRegisters[] = {
@@ -179,7 +140,16 @@ Register ToRegister(int num) {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo.
-const int RelocInfo::kApplyMask = 0;
+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
+
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on MIPS means that it is a lui/ori instruction, and that is
+ // always the case inside code objects.
+ return true;
+}
+
// Patch the code at the current address with the supplied instructions.
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
@@ -210,7 +180,7 @@ Operand::Operand(Handle<Object> handle) {
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!Heap::InNewSpace(obj));
+ ASSERT(!HEAP->InNewSpace(obj));
if (obj->IsHeapObject()) {
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
@@ -221,26 +191,64 @@ Operand::Operand(Handle<Object> handle) {
}
}
-MemOperand::MemOperand(Register rm, int16_t offset) : Operand(rm) {
+
+MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
offset_ = offset;
}
// -----------------------------------------------------------------------------
-// Implementation of Assembler.
-
-static const int kMinimalBufferSize = 4*KB;
-static byte* spare_buffer_ = NULL;
-
-Assembler::Assembler(void* buffer, int buffer_size) {
+// Specific instructions, constants, and masks.
+
+static const int kNegOffset = 0x00008000;
+// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
+// operations as post-increment of sp.
+const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift)
+ | (sp.code() << kRtShift) | (kPointerSize & kImm16Mask);
+// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
+const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift)
+ | (sp.code() << kRtShift) | (-kPointerSize & kImm16Mask);
+// sw(r, MemOperand(sp, 0))
+const Instr kPushRegPattern = SW | (sp.code() << kRsShift)
+ | (0 & kImm16Mask);
+// lw(r, MemOperand(sp, 0))
+const Instr kPopRegPattern = LW | (sp.code() << kRsShift)
+ | (0 & kImm16Mask);
+
+const Instr kLwRegFpOffsetPattern = LW | (s8_fp.code() << kRsShift)
+ | (0 & kImm16Mask);
+
+const Instr kSwRegFpOffsetPattern = SW | (s8_fp.code() << kRsShift)
+ | (0 & kImm16Mask);
+
+const Instr kLwRegFpNegOffsetPattern = LW | (s8_fp.code() << kRsShift)
+ | (kNegOffset & kImm16Mask);
+
+const Instr kSwRegFpNegOffsetPattern = SW | (s8_fp.code() << kRsShift)
+ | (kNegOffset & kImm16Mask);
+// A mask for the Rt register for push, pop, lw, sw instructions.
+const Instr kRtMask = kRtFieldMask;
+const Instr kLwSwInstrTypeMask = 0xffe00000;
+const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
+const Instr kLwSwOffsetMask = kImm16Mask;
+
+
+// Spare buffer.
+static const int kMinimalBufferSize = 4 * KB;
+
+
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+ : AssemblerBase(arg_isolate),
+ positions_recorder_(this),
+ emit_debug_code_(FLAG_debug_code) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
- if (spare_buffer_ != NULL) {
- buffer = spare_buffer_;
- spare_buffer_ = NULL;
+ if (isolate()->assembler_spare_buffer() != NULL) {
+ buffer = isolate()->assembler_spare_buffer();
+ isolate()->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) {
@@ -263,17 +271,29 @@ Assembler::Assembler(void* buffer, int buffer_size) {
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
- current_statement_position_ = RelocInfo::kNoPosition;
- current_position_ = RelocInfo::kNoPosition;
- written_statement_position_ = current_statement_position_;
- written_position_ = current_position_;
+
+ last_trampoline_pool_end_ = 0;
+ no_trampoline_pool_before_ = 0;
+ trampoline_pool_blocked_nesting_ = 0;
+ // We leave space (16 * kTrampolineSlotsSize)
+ // for BlockTrampolinePoolScope buffer.
+ next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
+ internal_trampoline_exception_ = false;
+ last_bound_pos_ = 0;
+
+ trampoline_emitted_ = false;
+ unbound_labels_count_ = 0;
+ block_buffer_growth_ = false;
+
+ ast_id_for_reloc_info_ = kNoASTId;
}
Assembler::~Assembler() {
if (own_buffer_) {
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
+ if (isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
@@ -282,7 +302,7 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
- ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap
+ ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
// Setup code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -291,6 +311,139 @@ void Assembler::GetCode(CodeDesc* desc) {
}
+void Assembler::Align(int m) {
+ ASSERT(m >= 4 && IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+void Assembler::CodeTargetAlign() {
+ // No advantage to aligning branch/call targets to more than
+ // single instruction, that I am aware of.
+ Align(4);
+}
+
+
+Register Assembler::GetRtReg(Instr instr) {
+ Register rt;
+ rt.code_ = (instr & kRtFieldMask) >> kRtShift;
+ return rt;
+}
+
+
+Register Assembler::GetRsReg(Instr instr) {
+ Register rs;
+ rs.code_ = (instr & kRsFieldMask) >> kRsShift;
+ return rs;
+}
+
+
+Register Assembler::GetRdReg(Instr instr) {
+ Register rd;
+ rd.code_ = (instr & kRdFieldMask) >> kRdShift;
+ return rd;
+}
+
+
+uint32_t Assembler::GetRt(Instr instr) {
+ return (instr & kRtFieldMask) >> kRtShift;
+}
+
+
+uint32_t Assembler::GetRtField(Instr instr) {
+ return instr & kRtFieldMask;
+}
+
+
+uint32_t Assembler::GetRs(Instr instr) {
+ return (instr & kRsFieldMask) >> kRsShift;
+}
+
+
+uint32_t Assembler::GetRsField(Instr instr) {
+ return instr & kRsFieldMask;
+}
+
+
+uint32_t Assembler::GetRd(Instr instr) {
+ return (instr & kRdFieldMask) >> kRdShift;
+}
+
+
+uint32_t Assembler::GetRdField(Instr instr) {
+ return instr & kRdFieldMask;
+}
+
+
+uint32_t Assembler::GetSa(Instr instr) {
+ return (instr & kSaFieldMask) >> kSaShift;
+}
+
+
+uint32_t Assembler::GetSaField(Instr instr) {
+ return instr & kSaFieldMask;
+}
+
+
+uint32_t Assembler::GetOpcodeField(Instr instr) {
+ return instr & kOpcodeMask;
+}
+
+
+uint32_t Assembler::GetFunction(Instr instr) {
+ return (instr & kFunctionFieldMask) >> kFunctionShift;
+}
+
+
+uint32_t Assembler::GetFunctionField(Instr instr) {
+ return instr & kFunctionFieldMask;
+}
+
+
+uint32_t Assembler::GetImmediate16(Instr instr) {
+ return instr & kImm16Mask;
+}
+
+
+uint32_t Assembler::GetLabelConst(Instr instr) {
+ return instr & ~kImm16Mask;
+}
+
+
+bool Assembler::IsPop(Instr instr) {
+ return (instr & ~kRtMask) == kPopRegPattern;
+}
+
+
+bool Assembler::IsPush(Instr instr) {
+ return (instr & ~kRtMask) == kPushRegPattern;
+}
+
+
+bool Assembler::IsSwRegFpOffset(Instr instr) {
+ return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsLwRegFpOffset(Instr instr) {
+ return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsSwRegFpNegOffset(Instr instr) {
+ return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
+ kSwRegFpNegOffsetPattern);
+}
+
+
+bool Assembler::IsLwRegFpNegOffset(Instr instr) {
+ return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
+ kLwRegFpNegOffsetPattern);
+}
+
+
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@@ -301,14 +454,21 @@ void Assembler::GetCode(CodeDesc* desc) {
// to be generated; pos() is the position of the last
// instruction using the label.
+// The link chain is terminated by a value in the instruction of -1,
+// which is an otherwise illegal value (branch -1 is inf loop).
+// The instruction 16-bit offset field addresses 32-bit words, but in
+// code is conv to an 18-bit value addressing bytes, hence the -4 value.
-// The link chain is terminated by a negative code position (must be aligned).
const int kEndOfChain = -4;
+// Determines the end of the Jump chain (a subset of the label link chain).
+const int kEndOfJumpChain = 0;
-bool Assembler::is_branch(Instr instr) {
- uint32_t opcode = ((instr & kOpcodeMask));
- uint32_t rt_field = ((instr & kRtFieldMask));
- uint32_t rs_field = ((instr & kRsFieldMask));
+
+bool Assembler::IsBranch(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rt_field = GetRtField(instr);
+ uint32_t rs_field = GetRsField(instr);
+ uint32_t label_constant = GetLabelConst(instr);
// Checks if the instruction is a branch.
return opcode == BEQ ||
opcode == BNE ||
@@ -317,10 +477,130 @@ bool Assembler::is_branch(Instr instr) {
opcode == BEQL ||
opcode == BNEL ||
opcode == BLEZL ||
- opcode == BGTZL||
+ opcode == BGTZL ||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) ||
- (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
+ (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
+ label_constant == 0; // Emitted label const in reg-exp engine.
+}
+
+
+bool Assembler::IsBeq(Instr instr) {
+ return GetOpcodeField(instr) == BEQ;
+}
+
+
+bool Assembler::IsBne(Instr instr) {
+ return GetOpcodeField(instr) == BNE;
+}
+
+
+bool Assembler::IsJump(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rt_field = GetRtField(instr);
+ uint32_t rd_field = GetRdField(instr);
+ uint32_t function_field = GetFunctionField(instr);
+ // Checks if the instruction is a jump.
+ return opcode == J || opcode == JAL ||
+ (opcode == SPECIAL && rt_field == 0 &&
+ ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
+}
+
+
+bool Assembler::IsJ(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a jump.
+ return opcode == J;
+}
+
+
+bool Assembler::IsLui(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a load upper immediate.
+ return opcode == LUI;
+}
+
+
+bool Assembler::IsOri(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a load upper immediate.
+ return opcode == ORI;
+}
+
+
+bool Assembler::IsNop(Instr instr, unsigned int type) {
+ // See Assembler::nop(type).
+ ASSERT(type < 32);
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rt = GetRt(instr);
+ uint32_t rs = GetRs(instr);
+ uint32_t sa = GetSa(instr);
+
+ // nop(type) == sll(zero_reg, zero_reg, type);
+ // Technically all these values will be 0 but
+ // this makes more sense to the reader.
+
+ bool ret = (opcode == SLL &&
+ rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+ rs == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+ sa == type);
+
+ return ret;
+}
+
+
+int32_t Assembler::GetBranchOffset(Instr instr) {
+ ASSERT(IsBranch(instr));
+ return ((int16_t)(instr & kImm16Mask)) << 2;
+}
+
+
+bool Assembler::IsLw(Instr instr) {
+ return ((instr & kOpcodeMask) == LW);
+}
+
+
+int16_t Assembler::GetLwOffset(Instr instr) {
+ ASSERT(IsLw(instr));
+ return ((instr & kImm16Mask));
+}
+
+
+Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
+ ASSERT(IsLw(instr));
+
+ // We actually create a new lw instruction based on the original one.
+ Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
+ | (offset & kImm16Mask);
+
+ return temp_instr;
+}
+
+
+bool Assembler::IsSw(Instr instr) {
+ return ((instr & kOpcodeMask) == SW);
+}
+
+
+Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
+ ASSERT(IsSw(instr));
+ return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
+}
+
+
+bool Assembler::IsAddImmediate(Instr instr) {
+ return ((instr & kOpcodeMask) == ADDIU);
+}
+
+
+Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
+ ASSERT(IsAddImmediate(instr));
+ return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
+}
+
+
+bool Assembler::IsAndImmediate(Instr instr) {
+ return GetOpcodeField(instr) == ANDI;
}
@@ -328,16 +608,55 @@ int Assembler::target_at(int32_t pos) {
Instr instr = instr_at(pos);
if ((instr & ~kImm16Mask) == 0) {
// Emitted label constant, not part of a branch.
- return instr - (Code::kHeaderSize - kHeapObjectTag);
+ if (instr == 0) {
+ return kEndOfChain;
+ } else {
+ int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+ return (imm18 + pos);
+ }
}
- // Check we have a branch instruction.
- ASSERT(is_branch(instr));
+ // Check we have a branch or jump instruction.
+ ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
// Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
// the compiler uses arithmectic shifts for signed integers.
- int32_t imm18 = ((instr &
- static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+ if (IsBranch(instr)) {
+ int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
- return pos + kBranchPCOffset + imm18;
+ if (imm18 == kEndOfChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ return pos + kBranchPCOffset + imm18;
+ }
+ } else if (IsLui(instr)) {
+ Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
+ ASSERT(IsOri(instr_ori));
+ int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+
+ if (imm == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
+ int32_t delta = instr_address - imm;
+ ASSERT(pos > delta);
+ return pos - delta;
+ }
+ } else {
+ int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+ if (imm28 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
+ instr_address &= kImm28Mask;
+ int32_t delta = instr_address - imm28;
+ ASSERT(pos > delta);
+ return pos - delta;
+ }
+ }
}
@@ -351,15 +670,41 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
return;
}
- ASSERT(is_branch(instr));
- int32_t imm18 = target_pos - (pos + kBranchPCOffset);
- ASSERT((imm18 & 3) == 0);
+ ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
+ if (IsBranch(instr)) {
+ int32_t imm18 = target_pos - (pos + kBranchPCOffset);
+ ASSERT((imm18 & 3) == 0);
+
+ instr &= ~kImm16Mask;
+ int32_t imm16 = imm18 >> 2;
+ ASSERT(is_int16(imm16));
+
+ instr_at_put(pos, instr | (imm16 & kImm16Mask));
+ } else if (IsLui(instr)) {
+ Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
+ ASSERT(IsOri(instr_ori));
+ uint32_t imm = (uint32_t)buffer_ + target_pos;
+ ASSERT((imm & 3) == 0);
+
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
+
+ instr_at_put(pos + 0 * Assembler::kInstrSize,
+ instr_lui | ((imm & kHiMask) >> kLuiShift));
+ instr_at_put(pos + 1 * Assembler::kInstrSize,
+ instr_ori | (imm & kImm16Mask));
+ } else {
+ uint32_t imm28 = (uint32_t)buffer_ + target_pos;
+ imm28 &= kImm28Mask;
+ ASSERT((imm28 & 3) == 0);
- instr &= ~kImm16Mask;
- int32_t imm16 = imm18 >> 2;
- ASSERT(is_int16(imm16));
+ instr &= ~kImm26Mask;
+ uint32_t imm26 = imm28 >> 2;
+ ASSERT(is_uint26(imm26));
- instr_at_put(pos, instr | (imm16 & kImm16Mask));
+ instr_at_put(pos, instr | (imm26 & kImm26Mask));
+ }
}
@@ -388,11 +733,34 @@ void Assembler::print(Label* L) {
void Assembler::bind_to(Label* L, int pos) {
- ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
+ ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
+ int32_t trampoline_pos = kInvalidSlotPos;
+ if (L->is_linked() && !trampoline_emitted_) {
+ unbound_labels_count_--;
+ next_buffer_check_ += kTrampolineSlotsSize;
+ }
+
while (L->is_linked()) {
int32_t fixup_pos = L->pos();
- next(L); // call next before overwriting link with target at fixup_pos
- target_at_put(fixup_pos, pos);
+ int32_t dist = pos - fixup_pos;
+ next(L); // Call next before overwriting link with target at fixup_pos.
+ Instr instr = instr_at(fixup_pos);
+ if (IsBranch(instr)) {
+ if (dist > kMaxBranchOffset) {
+ if (trampoline_pos == kInvalidSlotPos) {
+ trampoline_pos = get_trampoline_entry(fixup_pos);
+ CHECK(trampoline_pos != kInvalidSlotPos);
+ }
+ ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
+ target_at_put(fixup_pos, trampoline_pos);
+ fixup_pos = trampoline_pos;
+ dist = pos - fixup_pos;
+ }
+ target_at_put(fixup_pos, pos);
+ } else {
+ ASSERT(IsJ(instr) || IsLui(instr));
+ target_at_put(fixup_pos, pos);
+ }
}
L->bind_to(pos);
@@ -403,29 +771,8 @@ void Assembler::bind_to(Label* L, int pos) {
}
-void Assembler::link_to(Label* L, Label* appendix) {
- if (appendix->is_linked()) {
- if (L->is_linked()) {
- // Append appendix to L's list.
- int fixup_pos;
- int link = L->pos();
- do {
- fixup_pos = link;
- link = target_at(fixup_pos);
- } while (link > 0);
- ASSERT(link == kEndOfChain);
- target_at_put(fixup_pos, appendix->pos());
- } else {
- // L is empty, simply use appendix
- *L = *appendix;
- }
- }
- appendix->Unuse(); // appendix should not be used anymore
-}
-
-
void Assembler::bind(Label* L) {
- ASSERT(!L->is_bound()); // label can only be bound once
+ ASSERT(!L->is_bound()); // Label can only be bound once.
bind_to(L, pc_offset());
}
@@ -433,26 +780,27 @@ void Assembler::bind(Label* L) {
void Assembler::next(Label* L) {
ASSERT(L->is_linked());
int link = target_at(L->pos());
- if (link > 0) {
- L->link_to(link);
- } else {
- ASSERT(link == kEndOfChain);
+ ASSERT(link > 0 || link == kEndOfChain);
+ if (link == kEndOfChain) {
L->Unuse();
+ } else if (link > 0) {
+ L->link_to(link);
}
}
+bool Assembler::is_near(Label* L) {
+ if (L->is_bound()) {
+ return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
+ }
+ return false;
+}
// We have to use a temporary register for things that can be relocated even
// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
-bool Assembler::MustUseAt(RelocInfo::Mode rmode) {
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- return Serializer::enabled();
- } else if (rmode == RelocInfo::NONE) {
- return false;
- }
- return true;
+bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
+ return rmode != RelocInfo::NONE;
}
@@ -470,14 +818,28 @@ void Assembler::GenInstrRegister(Opcode opcode,
void Assembler::GenInstrRegister(Opcode opcode,
+ Register rs,
+ Register rt,
+ uint16_t msb,
+ uint16_t lsb,
+ SecondaryField func) {
+ ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+ | (msb << kRdShift) | (lsb << kSaShift) | func;
+ emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
SecondaryField fmt,
FPURegister ft,
FPURegister fs,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
- Instr instr = opcode | fmt | (ft.code() << 16) | (fs.code() << kFsShift)
- | (fd.code() << 6) | func;
+ ASSERT(CpuFeatures::IsEnabled(FPU));
+ Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
+ | (fd.code() << kFdShift) | func;
emit(instr);
}
@@ -489,8 +851,22 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
+ ASSERT(CpuFeatures::IsEnabled(FPU));
Instr instr = opcode | fmt | (rt.code() << kRtShift)
- | (fs.code() << kFsShift) | (fd.code() << 6) | func;
+ | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
+ emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+ SecondaryField fmt,
+ Register rt,
+ FPUControlRegister fs,
+ SecondaryField func) {
+ ASSERT(fs.is_valid() && rt.is_valid());
+ ASSERT(CpuFeatures::IsEnabled(FPU));
+ Instr instr =
+ opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
emit(instr);
}
@@ -523,35 +899,85 @@ void Assembler::GenInstrImmediate(Opcode opcode,
FPURegister ft,
int32_t j) {
ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
+ ASSERT(CpuFeatures::IsEnabled(FPU));
Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
| (j & kImm16Mask);
emit(instr);
}
-// Registers are in the order of the instruction encoding, from left to right.
void Assembler::GenInstrJump(Opcode opcode,
uint32_t address) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
ASSERT(is_uint26(address));
Instr instr = opcode | address;
emit(instr);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+}
+
+
+// Returns the next free trampoline entry.
+int32_t Assembler::get_trampoline_entry(int32_t pos) {
+ int32_t trampoline_entry = kInvalidSlotPos;
+
+ if (!internal_trampoline_exception_) {
+ if (trampoline_.start() > pos) {
+ trampoline_entry = trampoline_.take_slot();
+ }
+
+ if (kInvalidSlotPos == trampoline_entry) {
+ internal_trampoline_exception_ = true;
+ }
+ }
+ return trampoline_entry;
+}
+
+
+uint32_t Assembler::jump_address(Label* L) {
+ int32_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ return kEndOfJumpChain;
+ }
+ }
+
+ uint32_t imm = (uint32_t)buffer_ + target_pos;
+ ASSERT((imm & 3) == 0);
+
+ return imm;
}
int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
int32_t target_pos;
+
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
- target_pos = L->pos(); // L's link
+ target_pos = L->pos();
+ L->link_to(pc_offset());
} else {
- target_pos = kEndOfChain;
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
}
- L->link_to(pc_offset());
}
int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+ ASSERT((offset & 3) == 0);
+ ASSERT(is_int16(offset >> 2));
+
return offset;
}
@@ -560,14 +986,24 @@ void Assembler::label_at_put(Label* L, int at_offset) {
int target_pos;
if (L->is_bound()) {
target_pos = L->pos();
+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
} else {
if (L->is_linked()) {
- target_pos = L->pos(); // L's link
+ target_pos = L->pos(); // L's link.
+ int32_t imm18 = target_pos - at_offset;
+ ASSERT((imm18 & 3) == 0);
+ int32_t imm16 = imm18 >> 2;
+ ASSERT(is_int16(imm16));
+ instr_at_put(at_offset, (imm16 & kImm16Mask));
} else {
target_pos = kEndOfChain;
+ instr_at_put(at_offset, 0);
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
}
L->link_to(at_offset);
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
}
}
@@ -580,47 +1016,66 @@ void Assembler::b(int16_t offset) {
void Assembler::bal(int16_t offset) {
+ positions_recorder()->WriteRecordedPositions();
bgezal(zero_reg, offset);
}
void Assembler::beq(Register rs, Register rt, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BEQ, rs, rt, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bgez(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BGEZ, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bgezal(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bgtz(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BGTZ, rs, zero_reg, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::blez(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BLEZ, rs, zero_reg, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bltz(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZ, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bltzal(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bne(Register rs, Register rt, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BNE, rs, rt, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -631,18 +1086,27 @@ void Assembler::j(int32_t target) {
void Assembler::jr(Register rs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (rs.is(ra)) {
+ positions_recorder()->WriteRecordedPositions();
+ }
GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::jal(int32_t target) {
+ positions_recorder()->WriteRecordedPositions();
ASSERT(is_uint28(target) && ((target & 3) == 0));
GenInstrJump(JAL, target >> 2);
}
void Assembler::jalr(Register rs, Register rd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -650,31 +1114,16 @@ void Assembler::jalr(Register rs, Register rd) {
// Arithmetic.
-void Assembler::add(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADD);
-}
-
-
void Assembler::addu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
}
-void Assembler::addi(Register rd, Register rs, int32_t j) {
- GenInstrImmediate(ADDI, rs, rd, j);
-}
-
-
void Assembler::addiu(Register rd, Register rs, int32_t j) {
GenInstrImmediate(ADDIU, rs, rd, j);
}
-void Assembler::sub(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUB);
-}
-
-
void Assembler::subu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
}
@@ -743,7 +1192,15 @@ void Assembler::nor(Register rd, Register rs, Register rt) {
// Shifts.
-void Assembler::sll(Register rd, Register rt, uint16_t sa) {
+void Assembler::sll(Register rd,
+ Register rt,
+ uint16_t sa,
+ bool coming_from_nop) {
+ // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
+ // generated using the sll instruction. They must be generated using
+ // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
+ // instructions.
+ ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
}
@@ -773,30 +1230,134 @@ void Assembler::srav(Register rd, Register rt, Register rs) {
}
+void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
+ // Should be called via MacroAssembler::Ror.
+ ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
+ ASSERT(mips32r2);
+ Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
+ | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
+ emit(instr);
+}
+
+
+void Assembler::rotrv(Register rd, Register rt, Register rs) {
+ // Should be called via MacroAssembler::Ror.
+ ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
+ ASSERT(mips32r2);
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+ | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
+ emit(instr);
+}
+
+
//------------Memory-instructions-------------
+// Helper for base-reg + offset, when offset is larger than int16.
+void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
+ ASSERT(!src.rm().is(at));
+ lui(at, src.offset_ >> kLuiShift);
+ ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
+ addu(at, at, src.rm()); // Add base register.
+}
+
+
void Assembler::lb(Register rd, const MemOperand& rs) {
- GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
+ }
}
void Assembler::lbu(Register rd, const MemOperand& rs) {
- GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::lh(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::lhu(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
+ }
}
void Assembler::lw(Register rd, const MemOperand& rs) {
- GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::lwl(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lwr(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
}
void Assembler::sb(Register rd, const MemOperand& rs) {
- GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::sh(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
+ }
}
void Assembler::sw(Register rd, const MemOperand& rs) {
- GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::swl(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::swr(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
}
@@ -808,13 +1369,37 @@ void Assembler::lui(Register rd, int32_t j) {
//-------------Misc-instructions--------------
// Break / Trap instructions.
-void Assembler::break_(uint32_t code) {
+void Assembler::break_(uint32_t code, bool break_as_stop) {
ASSERT((code & ~0xfffff) == 0);
+ // We need to invalidate breaks that could be stops as well because the
+ // simulator expects a char pointer after the stop instruction.
+ // See constants-mips.h for explanation.
+ ASSERT((break_as_stop &&
+ code <= kMaxStopCode &&
+ code > kMaxWatchpointCode) ||
+ (!break_as_stop &&
+ (code > kMaxStopCode ||
+ code <= kMaxWatchpointCode)));
Instr break_instr = SPECIAL | BREAK | (code << 6);
emit(break_instr);
}
+void Assembler::stop(const char* msg, uint32_t code) {
+ ASSERT(code > kMaxWatchpointCode);
+ ASSERT(code <= kMaxStopCode);
+#if defined(V8_HOST_ARCH_MIPS)
+ break_(0x54321);
+#else // V8_HOST_ARCH_MIPS
+ BlockTrampolinePoolFor(2);
+ // The Simulator will handle the stop instruction and get the message address.
+ // On MIPS stop() is just a special kind of break_().
+ break_(code, true);
+ emit(reinterpret_cast<Instr>(msg));
+#endif
+}
+
+
void Assembler::tge(Register rs, Register rt, uint16_t code) {
ASSERT(is_uint10(code));
Instr instr = SPECIAL | TGE | rs.code() << kRsShift
@@ -841,7 +1426,8 @@ void Assembler::tlt(Register rs, Register rt, uint16_t code) {
void Assembler::tltu(Register rs, Register rt, uint16_t code) {
ASSERT(is_uint10(code));
- Instr instr = SPECIAL | TLTU | rs.code() << kRsShift
+ Instr instr =
+ SPECIAL | TLTU | rs.code() << kRsShift
| rt.code() << kRtShift | code << 6;
emit(instr);
}
@@ -896,6 +1482,54 @@ void Assembler::sltiu(Register rt, Register rs, int32_t j) {
}
+// Conditional move.
+void Assembler::movz(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
+}
+
+
+void Assembler::movn(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
+}
+
+
+void Assembler::movt(Register rd, Register rs, uint16_t cc) {
+ Register rt;
+ rt.code_ = (cc & 0x0007) << 2 | 1;
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
+}
+
+
+void Assembler::movf(Register rd, Register rs, uint16_t cc) {
+ Register rt;
+ rt.code_ = (cc & 0x0007) << 2 | 0;
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
+}
+
+
+// Bit twiddling.
+void Assembler::clz(Register rd, Register rs) {
+ // Clz instr requires same GPR number in 'rd' and 'rt' fields.
+ GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+}
+
+
+void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Ins.
+ // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
+ ASSERT(mips32r2);
+ GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
+}
+
+
+void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Ext.
+ // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ ASSERT(mips32r2);
+ GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
+}
+
+
//--------Coprocessor-instructions----------------
// Load, store, move.
@@ -905,7 +1539,12 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
- GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
+ // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
+ // load to two 32-bit loads.
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
}
@@ -915,27 +1554,74 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
- GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
+ // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
+ // store to two 32-bit stores.
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
}
-void Assembler::mtc1(FPURegister fs, Register rt) {
+void Assembler::mtc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MTC1, rt, fs, f0);
}
-void Assembler::mthc1(FPURegister fs, Register rt) {
- GenInstrRegister(COP1, MTHC1, rt, fs, f0);
+void Assembler::mfc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, MFC1, rt, fs, f0);
}
-void Assembler::mfc1(FPURegister fs, Register rt) {
- GenInstrRegister(COP1, MFC1, rt, fs, f0);
+void Assembler::ctc1(Register rt, FPUControlRegister fs) {
+ GenInstrRegister(COP1, CTC1, rt, fs);
+}
+
+
+void Assembler::cfc1(Register rt, FPUControlRegister fs) {
+ GenInstrRegister(COP1, CFC1, rt, fs);
+}
+
+
+// Arithmetic.
+
+void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
+}
+
+
+void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
+}
+
+
+void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
+}
+
+
+void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
}
-void Assembler::mfhc1(FPURegister fs, Register rt) {
- GenInstrRegister(COP1, MFHC1, rt, fs, f0);
+void Assembler::abs_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
+}
+
+
+void Assembler::mov_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
+}
+
+
+void Assembler::neg_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
+}
+
+
+void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
}
@@ -951,22 +1637,107 @@ void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
}
+void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
+}
+
+
+void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
+}
+
+
+void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
+}
+
+
+void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
+}
+
+
+void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
+}
+
+
+void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
+}
+
+
+void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
+}
+
+
+void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
+}
+
+
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
+void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
+ GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
+}
+
+
+void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
+ GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
+}
+
+
+void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
+}
+
+
+void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
+}
+
+
+void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
+}
+
+
+void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
+}
+
+
+void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
+}
+
+
+void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
+}
+
+
void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
}
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
@@ -982,6 +1753,7 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
@@ -993,7 +1765,8 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
// Conditions.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
- FPURegister ft, FPURegister fs, uint16_t cc) {
+ FPURegister fs, FPURegister ft, uint16_t cc) {
+ ASSERT(CpuFeatures::IsEnabled(FPU));
ASSERT(is_uint3(cc));
ASSERT((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
@@ -1002,7 +1775,18 @@ void Assembler::c(FPUCondition cond, SecondaryField fmt,
}
+void Assembler::fcmp(FPURegister src1, const double src2,
+ FPUCondition cond) {
+ ASSERT(CpuFeatures::IsEnabled(FPU));
+ ASSERT(src2 == 0.0);
+ mtc1(zero_reg, f14);
+ cvt_d_w(f14, f14);
+ c(cond, D, src1, f14, 0);
+}
+
+
void Assembler::bc1f(int16_t offset, uint16_t cc) {
+ ASSERT(CpuFeatures::IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
emit(instr);
@@ -1010,6 +1794,7 @@ void Assembler::bc1f(int16_t offset, uint16_t cc) {
void Assembler::bc1t(int16_t offset, uint16_t cc) {
+ ASSERT(CpuFeatures::IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
emit(instr);
@@ -1018,58 +1803,66 @@ void Assembler::bc1t(int16_t offset, uint16_t cc) {
// Debugging.
void Assembler::RecordJSReturn() {
- WriteRecordedPositions();
+ positions_recorder()->WriteRecordedPositions();
CheckBuffer();
RecordRelocInfo(RelocInfo::JS_RETURN);
}
+void Assembler::RecordDebugBreakSlot() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
void Assembler::RecordComment(const char* msg) {
- if (FLAG_debug_code) {
+ if (FLAG_code_comments) {
CheckBuffer();
RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
}
}
-void Assembler::RecordPosition(int pos) {
- if (pos == RelocInfo::kNoPosition) return;
- ASSERT(pos >= 0);
- current_position_ = pos;
-}
-
-
-void Assembler::RecordStatementPosition(int pos) {
- if (pos == RelocInfo::kNoPosition) return;
- ASSERT(pos >= 0);
- current_statement_position_ = pos;
-}
+int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
+ Instr instr = instr_at(pc);
+ ASSERT(IsJ(instr) || IsLui(instr));
+ if (IsLui(instr)) {
+ Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
+ ASSERT(IsOri(instr_ori));
+ int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ if (imm == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+ imm += pc_delta;
+ ASSERT((imm & 3) == 0);
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
-bool Assembler::WriteRecordedPositions() {
- bool written = false;
+ instr_at_put(pc + 0 * Assembler::kInstrSize,
+ instr_lui | ((imm >> kLuiShift) & kImm16Mask));
+ instr_at_put(pc + 1 * Assembler::kInstrSize,
+ instr_ori | (imm & kImm16Mask));
+ return 2; // Number of instructions patched.
+ } else {
+ uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+ if ((int32_t)imm28 == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+ imm28 += pc_delta;
+ imm28 &= kImm28Mask;
+ ASSERT((imm28 & 3) == 0);
- // Write the statement position if it is different from what was written last
- // time.
- if (current_statement_position_ != written_statement_position_) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
- written_statement_position_ = current_statement_position_;
- written = true;
- }
+ instr &= ~kImm26Mask;
+ uint32_t imm26 = imm28 >> 2;
+ ASSERT(is_uint26(imm26));
- // Write the position if it is different from what was written last time and
- // also different from the written statement position.
- if (current_position_ != written_position_ &&
- current_position_ != written_statement_position_) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::POSITION, current_position_);
- written_position_ = current_position_;
- written = true;
+ instr_at_put(pc, instr | (imm26 & kImm26Mask));
+ return 1; // Number of instructions patched.
}
-
- // Return whether something was written.
- return written;
}
@@ -1077,7 +1870,7 @@ void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
// Compute new buffer size.
- CodeDesc desc; // the new buffer
+ CodeDesc desc; // The new buffer.
if (buffer_size_ < 4*KB) {
desc.buffer_size = 4*KB;
} else if (buffer_size_ < 1*MB) {
@@ -1085,7 +1878,7 @@ void Assembler::GrowBuffer() {
} else {
desc.buffer_size = buffer_size_ + 1*MB;
}
- CHECK_GT(desc.buffer_size, 0); // no overflow
+ CHECK_GT(desc.buffer_size, 0); // No overflow.
// Setup new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
@@ -1108,20 +1901,39 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
-
- // On ia32 and ARM pc relative addressing is used, and we thus need to apply a
- // shift by pc_delta. But on MIPS the target address it directly loaded, so
- // we do not need to relocate here.
+ // Relocate runtime entries.
+ for (RelocIterator it(desc); !it.done(); it.next()) {
+ RelocInfo::Mode rmode = it.rinfo()->rmode();
+ if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
+ RelocateInternalReference(p, pc_delta);
+ }
+ }
ASSERT(!overflow());
}
+void Assembler::db(uint8_t data) {
+ CheckBuffer();
+ *reinterpret_cast<uint8_t*>(pc_) = data;
+ pc_ += sizeof(uint8_t);
+}
+
+
+void Assembler::dd(uint32_t data) {
+ CheckBuffer();
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
+}
+
+
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
- if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
+ RelocInfo rinfo(pc_, rmode, data); // We do not try to reuse pool constants.
+ if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
// Adjust code for new modes.
- ASSERT(RelocInfo::IsJSReturn(rmode)
+ ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
+ || RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode));
// These modes do not need an entry in the constant pool.
@@ -1133,82 +1945,120 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
!FLAG_debug_code) {
return;
}
- ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- reloc_info_writer.Write(&rinfo);
+ ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+ ASSERT(ast_id_for_reloc_info_ != kNoASTId);
+ RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_);
+ ast_id_for_reloc_info_ = kNoASTId;
+ reloc_info_writer.Write(&reloc_info_with_ast_id);
+ } else {
+ reloc_info_writer.Write(&rinfo);
+ }
}
}
+void Assembler::BlockTrampolinePoolFor(int instructions) {
+ BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+
+void Assembler::CheckTrampolinePool() {
+ // Some small sequences of instructions must not be broken up by the
+ // insertion of a trampoline pool; such sequences are protected by setting
+ // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
+ // which are both checked here. Also, recursive calls to CheckTrampolinePool
+ // are blocked by trampoline_pool_blocked_nesting_.
+ if ((trampoline_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_trampoline_pool_before_)) {
+ // Emission is currently blocked; make sure we try again as soon as
+ // possible.
+ if (trampoline_pool_blocked_nesting_ > 0) {
+ next_buffer_check_ = pc_offset() + kInstrSize;
+ } else {
+ next_buffer_check_ = no_trampoline_pool_before_;
+ }
+ return;
+ }
+
+ ASSERT(!trampoline_emitted_);
+ ASSERT(unbound_labels_count_ >= 0);
+ if (unbound_labels_count_ > 0) {
+ // First we emit jump (2 instructions), then we emit trampoline pool.
+ { BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label after_pool;
+ b(&after_pool);
+ nop();
+
+ int pool_start = pc_offset();
+ for (int i = 0; i < unbound_labels_count_; i++) {
+ uint32_t imm32;
+ imm32 = jump_address(&after_pool);
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal
+ // references until associated instructions are emitted and available
+ // to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ jr(at);
+ nop();
+ }
+ bind(&after_pool);
+ trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+
+ trampoline_emitted_ = true;
+ // As we are only going to emit trampoline once, we need to prevent any
+ // further emission.
+ next_buffer_check_ = kMaxInt;
+ }
+ } else {
+ // Number of branches to unbound label at this point is zero, so we can
+ // move next buffer check to maximum.
+ next_buffer_check_ = pc_offset() +
+ kMaxBranchOffset - kTrampolineSlotsSize * 16;
+ }
+ return;
+}
+
+
Address Assembler::target_address_at(Address pc) {
Instr instr1 = instr_at(pc);
Instr instr2 = instr_at(pc + kInstrSize);
- // Check we have 2 instructions generated by li.
- ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
- ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI ||
- (instr2 & kOpcodeMask) == ORI ||
- (instr2 & kOpcodeMask) == LUI)));
- // Interpret these 2 instructions.
- if (instr1 == nopInstr) {
- if ((instr2 & kOpcodeMask) == ADDI) {
- return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16);
- } else if ((instr2 & kOpcodeMask) == ORI) {
- return reinterpret_cast<Address>(instr2 & kImm16Mask);
- } else if ((instr2 & kOpcodeMask) == LUI) {
- return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
- }
- } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
- // 32 bits value.
+ // Interpret 2 instructions generated by li: lui/ori
+ if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
+ // Assemble the 32 bit value.
return reinterpret_cast<Address>(
- (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
+ (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
}
- // We should never get here.
+ // We should never get here, force a bad address if we do.
UNREACHABLE();
return (Address)0x0;
}
void Assembler::set_target_address_at(Address pc, Address target) {
- // On MIPS we need to patch the code to generate.
+ // On MIPS we patch the address into lui/ori instruction pair.
- // First check we have a li.
+ // First check we have an li (lui/ori pair).
Instr instr2 = instr_at(pc + kInstrSize);
#ifdef DEBUG
Instr instr1 = instr_at(pc);
- // Check we have indeed the result from a li with MustUseAt true.
- CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
- ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
- (instr2 & kOpcodeMask)== ORI ||
- (instr2 & kOpcodeMask)== LUI)));
+ // Check we have indeed the result from a li with MustUseReg true.
+ CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
#endif
-
- uint32_t rt_code = (instr2 & kRtFieldMask);
+ uint32_t rt_code = GetRtField(instr2);
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
uint32_t itarget = reinterpret_cast<uint32_t>(target);
- if (is_int16(itarget)) {
- // nop
- // addiu rt zero_reg j
- *p = nopInstr;
- *(p+1) = ADDIU | rt_code | (itarget & LOMask);
- } else if (!(itarget & HIMask)) {
- // nop
- // ori rt zero_reg j
- *p = nopInstr;
- *(p+1) = ORI | rt_code | (itarget & LOMask);
- } else if (!(itarget & LOMask)) {
- // nop
- // lui rt (HIMask & itarget)>>16
- *p = nopInstr;
- *(p+1) = LUI | rt_code | ((itarget & HIMask)>>16);
- } else {
- // lui rt (HIMask & itarget)>>16
- // ori rt rt, (LOMask & itarget)
- *p = LUI | rt_code | ((itarget & HIMask)>>16);
- *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & LOMask);
- }
+ // lui rt, high-16.
+ // ori rt rt, low-16.
+ *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
+ *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
CPU::FlushICache(pc, 2 * sizeof(int32_t));
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index a687c2b8f..92c958b96 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
@@ -41,8 +41,6 @@
#include "constants-mips.h"
#include "serialize.h"
-using namespace assembler::mips;
-
namespace v8 {
namespace internal {
@@ -69,10 +67,49 @@ namespace internal {
// -----------------------------------------------------------------------------
-// Implementation of Register and FPURegister
+// Implementation of Register and FPURegister.
// Core register.
struct Register {
+ static const int kNumRegisters = v8::internal::kNumRegisters;
+ static const int kNumAllocatableRegisters = 14; // v0 through t7.
+ static const int kSizeInBytes = 4;
+
+ static int ToAllocationIndex(Register reg) {
+ return reg.code() - 2; // zero_reg and 'at' are skipped.
+ }
+
+ static Register FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ return from_code(index + 2); // zero_reg and 'at' are skipped.
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ const char* const names[] = {
+ "v0",
+ "v1",
+ "a0",
+ "a1",
+ "a2",
+ "a3",
+ "t0",
+ "t1",
+ "t2",
+ "t3",
+ "t4",
+ "t5",
+ "t6",
+ "t7",
+ };
+ return names[index];
+ }
+
+ static Register from_code(int code) {
+ Register r = { code };
+ return r;
+ }
+
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
bool is(Register reg) const { return code_ == reg.code_; }
int code() const {
@@ -88,40 +125,41 @@ struct Register {
int code_;
};
-extern const Register no_reg;
-
-extern const Register zero_reg;
-extern const Register at;
-extern const Register v0;
-extern const Register v1;
-extern const Register a0;
-extern const Register a1;
-extern const Register a2;
-extern const Register a3;
-extern const Register t0;
-extern const Register t1;
-extern const Register t2;
-extern const Register t3;
-extern const Register t4;
-extern const Register t5;
-extern const Register t6;
-extern const Register t7;
-extern const Register s0;
-extern const Register s1;
-extern const Register s2;
-extern const Register s3;
-extern const Register s4;
-extern const Register s5;
-extern const Register s6;
-extern const Register s7;
-extern const Register t8;
-extern const Register t9;
-extern const Register k0;
-extern const Register k1;
-extern const Register gp;
-extern const Register sp;
-extern const Register s8_fp;
-extern const Register ra;
+const Register no_reg = { -1 };
+
+const Register zero_reg = { 0 };
+const Register at = { 1 };
+const Register v0 = { 2 };
+const Register v1 = { 3 };
+const Register a0 = { 4 };
+const Register a1 = { 5 };
+const Register a2 = { 6 };
+const Register a3 = { 7 };
+const Register t0 = { 8 };
+const Register t1 = { 9 };
+const Register t2 = { 10 };
+const Register t3 = { 11 };
+const Register t4 = { 12 };
+const Register t5 = { 13 };
+const Register t6 = { 14 };
+const Register t7 = { 15 };
+const Register s0 = { 16 };
+const Register s1 = { 17 };
+const Register s2 = { 18 };
+const Register s3 = { 19 };
+const Register s4 = { 20 };
+const Register s5 = { 21 };
+const Register s6 = { 22 };
+const Register s7 = { 23 };
+const Register t8 = { 24 };
+const Register t9 = { 25 };
+const Register k0 = { 26 };
+const Register k1 = { 27 };
+const Register gp = { 28 };
+const Register sp = { 29 };
+const Register s8_fp = { 30 };
+const Register ra = { 31 };
+
int ToNumber(Register reg);
@@ -129,7 +167,50 @@ Register ToRegister(int num);
// Coprocessor register.
struct FPURegister {
- bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegister ; }
+ static const int kNumRegisters = v8::internal::kNumFPURegisters;
+ // f0 has been excluded from allocation. This is following ia32
+ // where xmm0 is excluded.
+ static const int kNumAllocatableRegisters = 15;
+
+ static int ToAllocationIndex(FPURegister reg) {
+ ASSERT(reg.code() != 0);
+ ASSERT(reg.code() % 2 == 0);
+ return (reg.code() / 2) - 1;
+ }
+
+ static FPURegister FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ return from_code((index + 1) * 2);
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ const char* const names[] = {
+ "f2",
+ "f4",
+ "f6",
+ "f8",
+ "f10",
+ "f12",
+ "f14",
+ "f16",
+ "f18",
+ "f20",
+ "f22",
+ "f24",
+ "f26",
+ "f28",
+ "f30"
+ };
+ return names[index];
+ }
+
+ static FPURegister from_code(int code) {
+ FPURegister r = { code };
+ return r;
+ }
+
+ bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; }
bool is(FPURegister creg) const { return code_ == creg.code_; }
int code() const {
ASSERT(is_valid());
@@ -139,84 +220,74 @@ struct FPURegister {
ASSERT(is_valid());
return 1 << code_;
}
-
+ void setcode(int f) {
+ code_ = f;
+ ASSERT(is_valid());
+ }
// Unfortunately we can't make this private in a struct.
int code_;
};
-extern const FPURegister no_creg;
-
-extern const FPURegister f0;
-extern const FPURegister f1;
-extern const FPURegister f2;
-extern const FPURegister f3;
-extern const FPURegister f4;
-extern const FPURegister f5;
-extern const FPURegister f6;
-extern const FPURegister f7;
-extern const FPURegister f8;
-extern const FPURegister f9;
-extern const FPURegister f10;
-extern const FPURegister f11;
-extern const FPURegister f12; // arg
-extern const FPURegister f13;
-extern const FPURegister f14; // arg
-extern const FPURegister f15;
-extern const FPURegister f16;
-extern const FPURegister f17;
-extern const FPURegister f18;
-extern const FPURegister f19;
-extern const FPURegister f20;
-extern const FPURegister f21;
-extern const FPURegister f22;
-extern const FPURegister f23;
-extern const FPURegister f24;
-extern const FPURegister f25;
-extern const FPURegister f26;
-extern const FPURegister f27;
-extern const FPURegister f28;
-extern const FPURegister f29;
-extern const FPURegister f30;
-extern const FPURegister f31;
-
-
-// Returns the equivalent of !cc.
-// Negation of the default no_condition (-1) results in a non-default
-// no_condition value (-2). As long as tests for no_condition check
-// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc);
-
-inline Condition ReverseCondition(Condition cc) {
- switch (cc) {
- case Uless:
- return Ugreater;
- case Ugreater:
- return Uless;
- case Ugreater_equal:
- return Uless_equal;
- case Uless_equal:
- return Ugreater_equal;
- case less:
- return greater;
- case greater:
- return less;
- case greater_equal:
- return less_equal;
- case less_equal:
- return greater_equal;
- default:
- return cc;
- };
-}
-
-
-enum Hint {
- no_hint = 0
+typedef FPURegister DoubleRegister;
+
+const FPURegister no_creg = { -1 };
+
+const FPURegister f0 = { 0 }; // Return value in hard float mode.
+const FPURegister f1 = { 1 };
+const FPURegister f2 = { 2 };
+const FPURegister f3 = { 3 };
+const FPURegister f4 = { 4 };
+const FPURegister f5 = { 5 };
+const FPURegister f6 = { 6 };
+const FPURegister f7 = { 7 };
+const FPURegister f8 = { 8 };
+const FPURegister f9 = { 9 };
+const FPURegister f10 = { 10 };
+const FPURegister f11 = { 11 };
+const FPURegister f12 = { 12 }; // Arg 0 in hard float mode.
+const FPURegister f13 = { 13 };
+const FPURegister f14 = { 14 }; // Arg 1 in hard float mode.
+const FPURegister f15 = { 15 };
+const FPURegister f16 = { 16 };
+const FPURegister f17 = { 17 };
+const FPURegister f18 = { 18 };
+const FPURegister f19 = { 19 };
+const FPURegister f20 = { 20 };
+const FPURegister f21 = { 21 };
+const FPURegister f22 = { 22 };
+const FPURegister f23 = { 23 };
+const FPURegister f24 = { 24 };
+const FPURegister f25 = { 25 };
+const FPURegister f26 = { 26 };
+const FPURegister f27 = { 27 };
+const FPURegister f28 = { 28 };
+const FPURegister f29 = { 29 };
+const FPURegister f30 = { 30 };
+const FPURegister f31 = { 31 };
+
+// FPU (coprocessor 1) control registers.
+// Currently only FCSR (#31) is implemented.
+struct FPUControlRegister {
+ bool is_valid() const { return code_ == kFCSRRegister; }
+ bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+ void setcode(int f) {
+ code_ = f;
+ ASSERT(is_valid());
+ }
+ // Unfortunately we can't make this private in a struct.
+ int code_;
};
-inline Hint NegateHint(Hint hint) {
- return no_hint;
-}
+const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
+const FPUControlRegister FCSR = { kFCSRRegister };
// -----------------------------------------------------------------------------
@@ -245,7 +316,7 @@ class Operand BASE_EMBEDDED {
private:
Register rm_;
- int32_t imm32_; // Valid if rm_ == no_reg
+ int32_t imm32_; // Valid if rm_ == no_reg.
RelocInfo::Mode rmode_;
friend class Assembler;
@@ -257,17 +328,119 @@ class Operand BASE_EMBEDDED {
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
public:
-
- explicit MemOperand(Register rn, int16_t offset = 0);
+ explicit MemOperand(Register rn, int32_t offset = 0);
+ int32_t offset() const { return offset_; }
private:
- int16_t offset_;
+ int32_t offset_;
friend class Assembler;
};
-class Assembler : public Malloced {
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+class CpuFeatures : public AllStatic {
+ public:
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
+
+ // Check whether a feature is supported by the target CPU.
+ static bool IsSupported(CpuFeature f) {
+ ASSERT(initialized_);
+ if (f == FPU && !FLAG_enable_fpu) return false;
+ return (supported_ & (1u << f)) != 0;
+ }
+
+
+#ifdef DEBUG
+ // Check whether a feature is currently enabled.
+ static bool IsEnabled(CpuFeature f) {
+ ASSERT(initialized_);
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL) {
+ // When no isolate is available, work as if we're running in
+ // release mode.
+ return IsSupported(f);
+ }
+ unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
+ return (enabled & (1u << f)) != 0;
+ }
+#endif
+
+ // Enable a specified feature within a scope.
+ class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+
+ public:
+ explicit Scope(CpuFeature f) {
+ unsigned mask = 1u << f;
+ ASSERT(CpuFeatures::IsSupported(f));
+ ASSERT(!Serializer::enabled() ||
+ (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+ isolate_ = Isolate::UncheckedCurrent();
+ old_enabled_ = 0;
+ if (isolate_ != NULL) {
+ old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
+ isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+ }
+ }
+ ~Scope() {
+ ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+ if (isolate_ != NULL) {
+ isolate_->set_enabled_cpu_features(old_enabled_);
+ }
+ }
+
+ private:
+ Isolate* isolate_;
+ unsigned old_enabled_;
+#else
+
+ public:
+ explicit Scope(CpuFeature f) {}
+#endif
+ };
+
+ class TryForceFeatureScope BASE_EMBEDDED {
+ public:
+ explicit TryForceFeatureScope(CpuFeature f)
+ : old_supported_(CpuFeatures::supported_) {
+ if (CanForce()) {
+ CpuFeatures::supported_ |= (1u << f);
+ }
+ }
+
+ ~TryForceFeatureScope() {
+ if (CanForce()) {
+ CpuFeatures::supported_ = old_supported_;
+ }
+ }
+
+ private:
+ static bool CanForce() {
+ // It's only safe to temporarily force support of CPU features
+ // when there's only a single isolate, which is guaranteed when
+ // the serializer is enabled.
+ return Serializer::enabled();
+ }
+
+ const unsigned old_supported_;
+ };
+
+ private:
+#ifdef DEBUG
+ static bool initialized_;
+#endif
+ static unsigned supported_;
+ static unsigned found_by_runtime_probing_;
+
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
+};
+
+
+class Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
@@ -282,9 +455,12 @@ class Assembler : public Malloced {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
- Assembler(void* buffer, int buffer_size);
+ Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
+ // Overrides the default provided by FLAG_debug_code.
+ void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -304,10 +480,13 @@ class Assembler : public Malloced {
//
// Note: The same Label can be used for forward and backward branches
// but it may be bound only once.
- void bind(Label* L); // binds an unbound label L to the current code position
+ void bind(Label* L); // Binds an unbound label L to current code position.
+ // Determines if Label is bound and near enough so that branch instruction
+ // can be used to reach it, instead of jump instruction.
+ bool is_near(Label* L);
- // Returns the branch offset to the given label from the current code position
- // Links the label to the current position if it is still unbound
+ // Returns the branch offset to the given label from the current code
+ // position. Links the label to the current position if it is still unbound.
// Manages the jump elimination optimization if the second parameter is true.
int32_t branch_offset(Label* L, bool jump_elimination_allowed);
int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
@@ -315,17 +494,12 @@ class Assembler : public Malloced {
ASSERT((o & 3) == 0); // Assert the offset is aligned.
return o >> 2;
}
+ uint32_t jump_address(Label* L);
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
void label_at_put(Label* L, int at_offset);
- // Size of an instruction.
- static const int kInstrSize = sizeof(Instr);
-
- // Difference between address of current opcode and target address offset.
- static const int kBranchPCOffset = 4;
-
// Read/Modify the code target address in the branch/call instruction at pc.
static Address target_address_at(Address pc);
static void set_target_address_at(Address pc, Address target);
@@ -344,8 +518,25 @@ class Assembler : public Malloced {
set_target_address_at(instruction_payload, target);
}
- static const int kCallTargetSize = 3 * kPointerSize;
- static const int kExternalTargetSize = 3 * kPointerSize;
+ // Size of an instruction.
+ static const int kInstrSize = sizeof(Instr);
+
+ // Difference between address of current opcode and target address offset.
+ static const int kBranchPCOffset = 4;
+
+ // Here we are patching the address in the LUI/ORI instruction pair.
+ // These values are used in the serialization process and must be zero for
+ // MIPS platform, as Code, Embedded Object or External-reference pointers
+ // are split across two consecutive instructions and don't exist separately
+ // in the code, so the serializer should not step forwards in memory after
+ // a target is resolved and written.
+ static const int kCallTargetSize = 0 * kInstrSize;
+ static const int kExternalTargetSize = 0 * kInstrSize;
+
+ // Number of consecutive instructions used to store 32bit constant.
+ // Used in RelocInfo::target_address_address() function to tell serializer
+ // address of the instruction that follows LUI/ORI instruction pair.
+ static const int kInstructionsFor32BitConstant = 2;
// Distance between the instruction referring to the address of the call
// target and the return address.
@@ -353,19 +544,56 @@ class Assembler : public Malloced {
// Distance between start of patched return sequence and the emitted address
// to jump to.
- static const int kPatchReturnSequenceAddressOffset = kInstrSize;
+ static const int kPatchReturnSequenceAddressOffset = 0;
// Distance between start of patched debug break slot and the emitted address
// to jump to.
- static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
+ static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
+
+ // Difference between address of current opcode and value read from pc
+ // register.
+ static const int kPcLoadDelta = 4;
+
+ // Number of instructions used for the JS return sequence. The constant is
+ // used by the debugger to patch the JS return sequence.
+ static const int kJSReturnSequenceInstructions = 7;
+ static const int kDebugBreakSlotInstructions = 4;
+ static const int kDebugBreakSlotLength =
+ kDebugBreakSlotInstructions * kInstrSize;
+
// ---------------------------------------------------------------------------
// Code generation.
- void nop() { sll(zero_reg, zero_reg, 0); }
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
+
+ // Different nop operations are used by the code generator to detect certain
+ // states of the generated code.
+ enum NopMarkerTypes {
+ NON_MARKING_NOP = 0,
+ DEBUG_BREAK_NOP,
+ // IC markers.
+ PROPERTY_ACCESS_INLINED,
+ PROPERTY_ACCESS_INLINED_CONTEXT,
+ PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
+ // Helper values.
+ LAST_CODE_MARKER,
+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
+ };
+
+ // Type == 0 is the default non-marking type.
+ void nop(unsigned int type = 0) {
+ ASSERT(type < 32);
+ sll(zero_reg, zero_reg, type, true);
+ }
- //------- Branch and jump instructions --------
+ // --------Branch-and-jump-instructions----------
// We don't use likely variant of instructions.
void b(int16_t offset);
void b(Label* L) { b(branch_offset(L, false)>>2); }
@@ -388,7 +616,7 @@ class Assembler : public Malloced {
}
// Never use the int16_t b(l)cond version with a branch offset
- // instead of using the Label* version. See Twiki for infos.
+ // instead of using the Label* version.
// Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
void j(int32_t target);
@@ -400,9 +628,7 @@ class Assembler : public Malloced {
//-------Data-processing-instructions---------
// Arithmetic.
- void add(Register rd, Register rs, Register rt);
void addu(Register rd, Register rs, Register rt);
- void sub(Register rd, Register rs, Register rt);
void subu(Register rd, Register rs, Register rt);
void mult(Register rs, Register rt);
void multu(Register rs, Register rt);
@@ -410,7 +636,6 @@ class Assembler : public Malloced {
void divu(Register rs, Register rt);
void mul(Register rd, Register rs, Register rt);
- void addi(Register rd, Register rs, int32_t j);
void addiu(Register rd, Register rs, int32_t j);
// Logical.
@@ -425,27 +650,40 @@ class Assembler : public Malloced {
void lui(Register rd, int32_t j);
// Shifts.
- void sll(Register rd, Register rt, uint16_t sa);
+ // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
+ // and may cause problems in normal code. coming_from_nop makes sure this
+ // doesn't happen.
+ void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
void sllv(Register rd, Register rt, Register rs);
void srl(Register rd, Register rt, uint16_t sa);
void srlv(Register rd, Register rt, Register rs);
void sra(Register rt, Register rd, uint16_t sa);
void srav(Register rt, Register rd, Register rs);
+ void rotr(Register rd, Register rt, uint16_t sa);
+ void rotrv(Register rd, Register rt, Register rs);
//------------Memory-instructions-------------
void lb(Register rd, const MemOperand& rs);
void lbu(Register rd, const MemOperand& rs);
+ void lh(Register rd, const MemOperand& rs);
+ void lhu(Register rd, const MemOperand& rs);
void lw(Register rd, const MemOperand& rs);
+ void lwl(Register rd, const MemOperand& rs);
+ void lwr(Register rd, const MemOperand& rs);
void sb(Register rd, const MemOperand& rs);
+ void sh(Register rd, const MemOperand& rs);
void sw(Register rd, const MemOperand& rs);
+ void swl(Register rd, const MemOperand& rs);
+ void swr(Register rd, const MemOperand& rs);
//-------------Misc-instructions--------------
// Break / Trap instructions.
- void break_(uint32_t code);
+ void break_(uint32_t code, bool break_as_stop = false);
+ void stop(const char* msg, uint32_t code = kMaxStopCode);
void tge(Register rs, Register rt, uint16_t code);
void tgeu(Register rs, Register rt, uint16_t code);
void tlt(Register rs, Register rt, uint16_t code);
@@ -463,6 +701,16 @@ class Assembler : public Malloced {
void slti(Register rd, Register rs, int32_t j);
void sltiu(Register rd, Register rs, int32_t j);
+ // Conditional move.
+ void movz(Register rd, Register rs, Register rt);
+ void movn(Register rd, Register rs, Register rt);
+ void movt(Register rd, Register rs, uint16_t cc = 0);
+ void movf(Register rd, Register rs, uint16_t cc = 0);
+
+ // Bit twiddling.
+ void clz(Register rd, Register rs);
+ void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
//--------Coprocessor-instructions----------------
@@ -473,19 +721,44 @@ class Assembler : public Malloced {
void swc1(FPURegister fs, const MemOperand& dst);
void sdc1(FPURegister fs, const MemOperand& dst);
- // When paired with MTC1 to write a value to a 64-bit FPR, the MTC1 must be
- // executed first, followed by the MTHC1.
- void mtc1(FPURegister fs, Register rt);
- void mthc1(FPURegister fs, Register rt);
- void mfc1(FPURegister fs, Register rt);
- void mfhc1(FPURegister fs, Register rt);
+ void mtc1(Register rt, FPURegister fs);
+ void mfc1(Register rt, FPURegister fs);
+
+ void ctc1(Register rt, FPUControlRegister fs);
+ void cfc1(Register rt, FPUControlRegister fs);
+
+ // Arithmetic.
+ void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void abs_d(FPURegister fd, FPURegister fs);
+ void mov_d(FPURegister fd, FPURegister fs);
+ void neg_d(FPURegister fd, FPURegister fs);
+ void sqrt_d(FPURegister fd, FPURegister fs);
// Conversion.
void cvt_w_s(FPURegister fd, FPURegister fs);
void cvt_w_d(FPURegister fd, FPURegister fs);
+ void trunc_w_s(FPURegister fd, FPURegister fs);
+ void trunc_w_d(FPURegister fd, FPURegister fs);
+ void round_w_s(FPURegister fd, FPURegister fs);
+ void round_w_d(FPURegister fd, FPURegister fs);
+ void floor_w_s(FPURegister fd, FPURegister fs);
+ void floor_w_d(FPURegister fd, FPURegister fs);
+ void ceil_w_s(FPURegister fd, FPURegister fs);
+ void ceil_w_d(FPURegister fd, FPURegister fs);
void cvt_l_s(FPURegister fd, FPURegister fs);
void cvt_l_d(FPURegister fd, FPURegister fs);
+ void trunc_l_s(FPURegister fd, FPURegister fs);
+ void trunc_l_d(FPURegister fd, FPURegister fs);
+ void round_l_s(FPURegister fd, FPURegister fs);
+ void round_l_d(FPURegister fd, FPURegister fs);
+ void floor_l_s(FPURegister fd, FPURegister fs);
+ void floor_l_d(FPURegister fd, FPURegister fs);
+ void ceil_l_s(FPURegister fd, FPURegister fs);
+ void ceil_l_d(FPURegister fd, FPURegister fs);
void cvt_s_w(FPURegister fd, FPURegister fs);
void cvt_s_l(FPURegister fd, FPURegister fs);
@@ -503,31 +776,78 @@ class Assembler : public Malloced {
void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
void bc1t(int16_t offset, uint16_t cc = 0);
void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
-
+ void fcmp(FPURegister src1, const double src2, FPUCondition cond);
// Check the code size generated from label to here.
int InstructionsGeneratedSince(Label* l) {
return (pc_offset() - l->pos()) / kInstrSize;
}
+ // Class for scoping postponing the trampoline pool generation.
+ class BlockTrampolinePoolScope {
+ public:
+ explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockTrampolinePool();
+ }
+ ~BlockTrampolinePoolScope() {
+ assem_->EndBlockTrampolinePool();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
+ };
+
+ // Class for postponing the assembly buffer growth. Typically used for
+ // sequences of instructions that must be emitted as a unit, before
+ // buffer growth (and relocation) can occur.
+ // This blocking scope is not nestable.
+ class BlockGrowBufferScope {
+ public:
+ explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockGrowBuffer();
+ }
+ ~BlockGrowBufferScope() {
+ assem_->EndBlockGrowBuffer();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
+ };
+
// Debugging.
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot();
+
+ // Record the AST id of the CallIC being compiled, so that it can be placed
+ // in the relocation information.
+ void RecordAstId(unsigned ast_id) { ast_id_for_reloc_info_ = ast_id; }
+
// Record a comment relocation entry that can be used by a disassembler.
- // Use --debug_code to enable.
+ // Use --code-comments to enable.
void RecordComment(const char* msg);
- void RecordPosition(int pos);
- void RecordStatementPosition(int pos);
- bool WriteRecordedPositions();
+ static int RelocateInternalReference(byte* pc, intptr_t pc_delta);
+
+ // Writes a single byte or word of data in the code stream. Used for
+ // inline tables, e.g., jump-tables.
+ void db(uint8_t data);
+ void dd(uint32_t data);
int32_t pc_offset() const { return pc_ - buffer_; }
- int32_t current_position() const { return current_position_; }
- int32_t current_statement_position() const {
- return current_statement_position_;
- }
+
+ PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+
+ // Postpone the generation of the trampoline pool for the specified number of
+ // instructions.
+ void BlockTrampolinePoolFor(int instructions);
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
@@ -537,12 +857,9 @@ class Assembler : public Malloced {
// Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
- protected:
- int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
-
// Read/patch instructions.
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
- void instr_at_put(byte* pc, Instr instr) {
+ static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
@@ -551,7 +868,64 @@ class Assembler : public Malloced {
}
// Check if an instruction is a branch of some kind.
- bool is_branch(Instr instr);
+ static bool IsBranch(Instr instr);
+ static bool IsBeq(Instr instr);
+ static bool IsBne(Instr instr);
+
+ static bool IsJump(Instr instr);
+ static bool IsJ(Instr instr);
+ static bool IsLui(Instr instr);
+ static bool IsOri(Instr instr);
+
+ static bool IsNop(Instr instr, unsigned int type);
+ static bool IsPop(Instr instr);
+ static bool IsPush(Instr instr);
+ static bool IsLwRegFpOffset(Instr instr);
+ static bool IsSwRegFpOffset(Instr instr);
+ static bool IsLwRegFpNegOffset(Instr instr);
+ static bool IsSwRegFpNegOffset(Instr instr);
+
+ static Register GetRtReg(Instr instr);
+ static Register GetRsReg(Instr instr);
+ static Register GetRdReg(Instr instr);
+
+ static uint32_t GetRt(Instr instr);
+ static uint32_t GetRtField(Instr instr);
+ static uint32_t GetRs(Instr instr);
+ static uint32_t GetRsField(Instr instr);
+ static uint32_t GetRd(Instr instr);
+ static uint32_t GetRdField(Instr instr);
+ static uint32_t GetSa(Instr instr);
+ static uint32_t GetSaField(Instr instr);
+ static uint32_t GetOpcodeField(Instr instr);
+ static uint32_t GetFunction(Instr instr);
+ static uint32_t GetFunctionField(Instr instr);
+ static uint32_t GetImmediate16(Instr instr);
+ static uint32_t GetLabelConst(Instr instr);
+
+ static int32_t GetBranchOffset(Instr instr);
+ static bool IsLw(Instr instr);
+ static int16_t GetLwOffset(Instr instr);
+ static Instr SetLwOffset(Instr instr, int16_t offset);
+
+ static bool IsSw(Instr instr);
+ static Instr SetSwOffset(Instr instr, int16_t offset);
+ static bool IsAddImmediate(Instr instr);
+ static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
+
+ static bool IsAndImmediate(Instr instr);
+
+ void CheckTrampolinePool();
+
+ protected:
+ // Relocation for a type-recording IC has the AST id added to it. This
+ // member variable is a way to pass the information from the call site to
+ // the relocation info.
+ unsigned ast_id_for_reloc_info_;
+
+ bool emit_debug_code() const { return emit_debug_code_; }
+
+ int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos.
int target_at(int32_t pos);
@@ -560,11 +934,52 @@ class Assembler : public Malloced {
void target_at_put(int32_t pos, int32_t target_pos);
// Say if we need to relocate with this mode.
- bool MustUseAt(RelocInfo::Mode rmode);
+ bool MustUseReg(RelocInfo::Mode rmode);
// Record reloc info for current pc_.
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ // Block the emission of the trampoline pool before pc_offset.
+ void BlockTrampolinePoolBefore(int pc_offset) {
+ if (no_trampoline_pool_before_ < pc_offset)
+ no_trampoline_pool_before_ = pc_offset;
+ }
+
+ void StartBlockTrampolinePool() {
+ trampoline_pool_blocked_nesting_++;
+ }
+
+ void EndBlockTrampolinePool() {
+ trampoline_pool_blocked_nesting_--;
+ }
+
+ bool is_trampoline_pool_blocked() const {
+ return trampoline_pool_blocked_nesting_ > 0;
+ }
+
+ bool has_exception() const {
+ return internal_trampoline_exception_;
+ }
+
+ bool is_trampoline_emitted() const {
+ return trampoline_emitted_;
+ }
+
+ // Temporarily block automatic assembly buffer growth.
+ void StartBlockGrowBuffer() {
+ ASSERT(!block_buffer_growth_);
+ block_buffer_growth_ = true;
+ }
+
+ void EndBlockGrowBuffer() {
+ ASSERT(block_buffer_growth_);
+ block_buffer_growth_ = false;
+ }
+
+ bool is_buffer_growth_blocked() const {
+ return block_buffer_growth_;
+ }
+
private:
// Code buffer:
// The buffer into which code and relocation info are generated.
@@ -585,6 +1000,25 @@ class Assembler : public Malloced {
static const int kGap = 32;
byte* pc_; // The program counter - moves forward.
+
+ // Repeated checking whether the trampoline pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated.
+ static const int kCheckConstIntervalInst = 32;
+ static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
+
+ int next_buffer_check_; // pc offset of next buffer check.
+
+ // Emission of the trampoline pool may be blocked in some code sequences.
+ int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
+ int no_trampoline_pool_before_; // Block emission before this pc offset.
+
+ // Keep track of the last emitted pool to guarantee a maximal distance.
+ int last_trampoline_pool_end_; // pc offset of the end of the last pool.
+
+ // Automatic growth of the assembly buffer may be blocked for some sequences.
+ bool block_buffer_growth_; // Block growth when true.
+
// Relocation information generation.
// Each relocation is encoded as a variable size value.
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
@@ -593,16 +1027,11 @@ class Assembler : public Malloced {
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
- // Source position information.
- int current_position_;
- int current_statement_position_;
- int written_position_;
- int written_statement_position_;
-
// Code emission.
inline void CheckBuffer();
void GrowBuffer();
inline void emit(Instr x);
+ inline void CheckTrampolinePoolQuick();
// Instruction generation.
// We have 3 different kind of encoding layout on MIPS.
@@ -620,6 +1049,13 @@ class Assembler : public Malloced {
SecondaryField func = NULLSF);
void GenInstrRegister(Opcode opcode,
+ Register rs,
+ Register rt,
+ uint16_t msb,
+ uint16_t lsb,
+ SecondaryField func);
+
+ void GenInstrRegister(Opcode opcode,
SecondaryField fmt,
FPURegister ft,
FPURegister fs,
@@ -633,6 +1069,12 @@ class Assembler : public Malloced {
FPURegister fd,
SecondaryField func = NULLSF);
+ void GenInstrRegister(Opcode opcode,
+ SecondaryField fmt,
+ Register rt,
+ FPUControlRegister fs,
+ SecondaryField func = NULLSF);
+
void GenInstrImmediate(Opcode opcode,
Register rs,
@@ -651,15 +1093,96 @@ class Assembler : public Malloced {
void GenInstrJump(Opcode opcode,
uint32_t address);
+ // Helpers.
+ void LoadRegPlusOffsetToAt(const MemOperand& src);
// Labels.
void print(Label* L);
void bind_to(Label* L, int pos);
- void link_to(Label* L, Label* appendix);
void next(Label* L);
+ // One trampoline consists of:
+ // - space for trampoline slots,
+ // - space for labels.
+ //
+ // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
+ // Space for trampoline slots preceeds space for labels. Each label is of one
+ // instruction size, so total amount for labels is equal to
+ // label_count * kInstrSize.
+ class Trampoline {
+ public:
+ Trampoline() {
+ start_ = 0;
+ next_slot_ = 0;
+ free_slot_count_ = 0;
+ end_ = 0;
+ }
+ Trampoline(int start, int slot_count) {
+ start_ = start;
+ next_slot_ = start;
+ free_slot_count_ = slot_count;
+ end_ = start + slot_count * kTrampolineSlotsSize;
+ }
+ int start() {
+ return start_;
+ }
+ int end() {
+ return end_;
+ }
+ int take_slot() {
+ int trampoline_slot = kInvalidSlotPos;
+ if (free_slot_count_ <= 0) {
+ // We have run out of space on trampolines.
+ // Make sure we fail in debug mode, so we become aware of each case
+ // when this happens.
+ ASSERT(0);
+ // Internal exception will be caught.
+ } else {
+ trampoline_slot = next_slot_;
+ free_slot_count_--;
+ next_slot_ += kTrampolineSlotsSize;
+ }
+ return trampoline_slot;
+ }
+ private:
+ int start_;
+ int end_;
+ int next_slot_;
+ int free_slot_count_;
+ };
+
+ int32_t get_trampoline_entry(int32_t pos);
+ int unbound_labels_count_;
+ // If trampoline is emitted, generated code is becoming large. As this is
+ // already a slow case which can possibly break our code generation for the
+ // extreme case, we use this information to trigger different mode of
+ // branch instruction generation, where we use jump instructions rather
+ // than regular branch instructions.
+ bool trampoline_emitted_;
+ static const int kTrampolineSlotsSize = 4 * kInstrSize;
+ static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+ static const int kInvalidSlotPos = -1;
+
+ Trampoline trampoline_;
+ bool internal_trampoline_exception_;
+
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
+ friend class CodePatcher;
+ friend class BlockTrampolinePoolScope;
+
+ PositionsRecorder positions_recorder_;
+ bool emit_debug_code_;
+ friend class PositionsRecorder;
+ friend class EnsureSpace;
+};
+
+
+class EnsureSpace BASE_EMBEDDED {
+ public:
+ explicit EnsureSpace(Assembler* assembler) {
+ assembler->CheckBuffer();
+ }
};
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 95329389e..4bb1d8cba 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,8 +31,10 @@
#if defined(V8_TARGET_ARCH_MIPS)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
#include "runtime.h"
namespace v8 {
@@ -45,32 +47,971 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments excluding receiver
+ // -- a1 : called function (only guaranteed when
+ // -- extra_args requires it)
+ // -- cp : context
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument
+ // -- sp[4 * agrc] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ __ push(a1);
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToExternalReference expects a0 to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ Addu(a0, a0, Operand(num_extra_args + 1));
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the global context.
+
+ __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(result,
+ FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+ // Load the Array function from the global context.
+ __ lw(result,
+ MemOperand(result,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// This constant has the same value as JSArray::kPreallocatedArrayElements and
+// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
+// below should be reconsidered.
+static const int kLoopUnfoldLimit = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. An elements backing store is allocated with size initial_capacity
+// and filled with the hole values.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+ Register array_function,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int initial_capacity,
+ Label* gc_required) {
+ ASSERT(initial_capacity > 0);
+ // Load the initial map from the array function.
+ __ lw(scratch1, FieldMemOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Allocate the JSArray object together with space for a fixed array with the
+ // requested elements.
+ int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+ __ AllocateInNewSpace(size,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // scratch1: initial map
+ // scratch2: start of next object
+ __ sw(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
+ // Field JSArray::kElementsOffset is initialized later.
+ __ mov(scratch3, zero_reg);
+ __ sw(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // scratch2: start of next object
+ __ Addu(scratch1, result, Operand(JSArray::kSize));
+ __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+
+ // Clear the heap tag on the elements array.
+ __ And(scratch1, scratch1, Operand(~kHeapObjectTagMask));
+
+ // Initialize the FixedArray and fill it with holes. FixedArray length is
+ // stored as a smi.
+ // result: JSObject
+ // scratch1: elements array (untagged)
+ // scratch2: start of next object
+ __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ __ sw(scratch3, MemOperand(scratch1));
+ __ Addu(scratch1, scratch1, kPointerSize);
+ __ li(scratch3, Operand(Smi::FromInt(initial_capacity)));
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ sw(scratch3, MemOperand(scratch1));
+ __ Addu(scratch1, scratch1, kPointerSize);
+
+ // Fill the FixedArray with the hole value.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ ASSERT(initial_capacity <= kLoopUnfoldLimit);
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ for (int i = 0; i < initial_capacity; i++) {
+ __ sw(scratch3, MemOperand(scratch1));
+ __ Addu(scratch1, scratch1, kPointerSize);
+ }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array_storage and elements_array_end
+// (see below for when that is not the case). If the parameter fill_with_holes
+// is true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array_storage is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+ Register array_function, // Array function.
+ Register array_size, // As a smi.
+ Register result,
+ Register elements_array_storage,
+ Register elements_array_end,
+ Register scratch1,
+ Register scratch2,
+ bool fill_with_hole,
+ Label* gc_required) {
+ Label not_empty, allocated;
+
+ // Load the initial map from the array function.
+ __ lw(elements_array_storage,
+ FieldMemOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check whether an empty sized array is requested.
+ __ Branch(&not_empty, ne, array_size, Operand(zero_reg));
+
+ // If an empty array is requested allocate a small elements array anyway. This
+ // keeps the code below free of special casing for the empty array.
+ int size = JSArray::kSize +
+ FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+ __ AllocateInNewSpace(size,
+ result,
+ elements_array_end,
+ scratch1,
+ gc_required,
+ TAG_OBJECT);
+ __ Branch(&allocated);
+
+ // Allocate the JSArray object together with space for a FixedArray with the
+ // requested number of elements.
+ __ bind(&not_empty);
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ li(elements_array_end,
+ (JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
+ __ sra(scratch1, array_size, kSmiTagSize);
+ __ Addu(elements_array_end, elements_array_end, scratch1);
+ __ AllocateInNewSpace(
+ elements_array_end,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // elements_array_storage: initial map
+ // array_size: size of array (smi)
+ __ bind(&allocated);
+ __ sw(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
+ __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(elements_array_storage,
+ FieldMemOperand(result, JSArray::kPropertiesOffset));
+ // Field JSArray::kElementsOffset is initialized later.
+ __ sw(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // array_size: size of array (smi)
+ __ Addu(elements_array_storage, result, Operand(JSArray::kSize));
+ __ sw(elements_array_storage,
+ FieldMemOperand(result, JSArray::kElementsOffset));
+
+ // Clear the heap tag on the elements array.
+ __ And(elements_array_storage,
+ elements_array_storage,
+ Operand(~kHeapObjectTagMask));
+ // Initialize the fixed array and fill it with holes. FixedArray length is
+ // stored as a smi.
+ // result: JSObject
+ // elements_array_storage: elements array (untagged)
+ // array_size: size of array (smi)
+ __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+ __ sw(scratch1, MemOperand(elements_array_storage));
+ __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+ // Length of the FixedArray is the number of pre-allocated elements if
+ // the actual JSArray has length 0 and the size of the JSArray for non-empty
+ // JSArrays. The length of a FixedArray is stored as a smi.
+ ASSERT(kSmiTag == 0);
+ __ li(at, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+ __ movz(array_size, at, array_size);
+
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ sw(array_size, MemOperand(elements_array_storage));
+ __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+ // Calculate elements array and elements array end.
+ // result: JSObject
+ // elements_array_storage: elements array element storage
+ // array_size: smi-tagged size of elements array
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sll(elements_array_end, array_size, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(elements_array_end, elements_array_storage, elements_array_end);
+
+ // Fill the allocated FixedArray with the hole value if requested.
+ // result: JSObject
+ // elements_array_storage: elements array element storage
+ // elements_array_end: start of next object
+ if (fill_with_hole) {
+ Label loop, entry;
+ __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
+ __ Branch(&entry);
+ __ bind(&loop);
+ __ sw(scratch1, MemOperand(elements_array_storage));
+ __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+ __ bind(&entry);
+ __ Branch(&loop, lt, elements_array_storage, Operand(elements_array_end));
+ }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+// a0: argc
+// a1: constructor (built-in Array function)
+// ra: return address
+// sp[0]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in a1 needs to be preserved for
+// entering the generic code. In both cases argc in a0 needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// construct call and normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+ Label* call_generic_code) {
+ Counters* counters = masm->isolate()->counters();
+ Label argc_one_or_more, argc_two_or_more;
+
+ // Check for array construction with zero arguments or one.
+ __ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
+ // Handle construction of an empty array.
+ AllocateEmptyJSArray(masm,
+ a1,
+ a2,
+ a3,
+ t0,
+ t1,
+ JSArray::kPreallocatedArrayElements,
+ call_generic_code);
+ __ IncrementCounter(counters->array_function_native(), 1, a3, t0);
+ // Setup return value, remove receiver from stack and return.
+ __ mov(v0, a2);
+ __ Addu(sp, sp, Operand(kPointerSize));
+ __ Ret();
+
+ // Check for one argument. Bail out if argument is not smi or if it is
+ // negative.
+ __ bind(&argc_one_or_more);
+ __ Branch(&argc_two_or_more, ne, a0, Operand(1));
+
+ ASSERT(kSmiTag == 0);
+ __ lw(a2, MemOperand(sp)); // Get the argument from the stack.
+ __ And(a3, a2, Operand(kIntptrSignBit | kSmiTagMask));
+ __ Branch(call_generic_code, eq, a3, Operand(zero_reg));
+
+ // Handle construction of an empty array of a certain size. Bail out if size
+ // is too large to actually allocate an elements array.
+ ASSERT(kSmiTag == 0);
+ __ Branch(call_generic_code, Ugreater_equal, a2,
+ Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
+
+ // a0: argc
+ // a1: constructor
+ // a2: array_size (smi)
+ // sp[0]: argument
+ AllocateJSArray(masm,
+ a1,
+ a2,
+ a3,
+ t0,
+ t1,
+ t2,
+ t3,
+ true,
+ call_generic_code);
+ __ IncrementCounter(counters->array_function_native(), 1, a2, t0);
+
+ // Setup return value, remove receiver and argument from stack and return.
+ __ mov(v0, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ // Handle construction of an array from a list of arguments.
+ __ bind(&argc_two_or_more);
+ __ sll(a2, a0, kSmiTagSize); // Convert argc to a smi.
+
+ // a0: argc
+ // a1: constructor
+ // a2: array_size (smi)
+ // sp[0]: last argument
+ AllocateJSArray(masm,
+ a1,
+ a2,
+ a3,
+ t0,
+ t1,
+ t2,
+ t3,
+ false,
+ call_generic_code);
+ __ IncrementCounter(counters->array_function_native(), 1, a2, t2);
+
+ // Fill arguments as array elements. Copy from the top of the stack (last
+ // element) to the array backing store filling it backwards. Note:
+ // elements_array_end points after the backing store.
+ // a0: argc
+ // a3: JSArray
+ // t0: elements_array storage start (untagged)
+ // t1: elements_array_end (untagged)
+ // sp[0]: last argument
+
+ Label loop, entry;
+ __ Branch(&entry);
+ __ bind(&loop);
+ __ pop(a2);
+ __ Addu(t1, t1, -kPointerSize);
+ __ sw(a2, MemOperand(t1));
+ __ bind(&entry);
+ __ Branch(&loop, lt, t0, Operand(t1));
+
+ // Remove caller arguments and receiver from the stack, setup return value and
+ // return.
+ // a0: argc
+ // a3: JSArray
+ // sp[0]: receiver
+ __ Addu(sp, sp, Operand(kPointerSize));
+ __ mov(v0, a3);
+ __ Ret();
}
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_array_code;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, a1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array functions should be maps.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ And(t0, a2, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for Array function (1)",
+ t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, t0);
+ __ Assert(eq, "Unexpected initial map for Array function (2)",
+ t0, Operand(MAP_TYPE));
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code if the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
+
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->ArrayCodeGeneric();
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_constructor;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the builtin and internal
+ // Array functions which always have a map.
+ // Initial map for the builtin Array function should be a map.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ And(t0, a2, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for Array function (3)",
+ t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, t0);
+ __ Assert(eq, "Unexpected initial map for Array function (4)",
+ t0, Operand(MAP_TYPE));
+ }
+
+ // Run the native code for the Array function called as a constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3);
+
+ Register function = a1;
+ if (FLAG_debug_code) {
+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2);
+ __ Assert(eq, "Unexpected String function", function, Operand(a2));
+ }
+
+ // Load the first arguments in a0 and get rid of the rest.
+ Label no_arguments;
+ __ Branch(&no_arguments, eq, a0, Operand(zero_reg));
+ // First args = sp[(argc - 1) * 4].
+ __ Subu(a0, a0, Operand(1));
+ __ sll(a0, a0, kPointerSizeLog2);
+ __ Addu(sp, a0, sp);
+ __ lw(a0, MemOperand(sp));
+ // sp now point to args[0], drop args[0] + receiver.
+ __ Drop(2);
+
+ Register argument = a2;
+ Label not_cached, argument_is_string;
+ NumberToStringStub::GenerateLookupNumberStringCache(
+ masm,
+ a0, // Input.
+ argument, // Result.
+ a3, // Scratch.
+ t0, // Scratch.
+ t1, // Scratch.
+ false, // Is it a Smi?
+ &not_cached);
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0);
+ __ bind(&argument_is_string);
+
+ // ----------- S t a t e -------------
+ // -- a2 : argument converted to string
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -----------------------------------
+
+ Label gc_required;
+ __ AllocateInNewSpace(JSValue::kSize,
+ v0, // Result.
+ a3, // Scratch.
+ t0, // Scratch.
+ &gc_required,
+ TAG_OBJECT);
+
+ // Initialising the String Object.
+ Register map = a3;
+ __ LoadGlobalFunctionInitialMap(function, map, t0);
+ if (FLAG_debug_code) {
+ __ lbu(t0, FieldMemOperand(map, Map::kInstanceSizeOffset));
+ __ Assert(eq, "Unexpected string wrapper instance size",
+ t0, Operand(JSValue::kSize >> kPointerSizeLog2));
+ __ lbu(t0, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
+ __ Assert(eq, "Unexpected unused properties of string wrapper",
+ t0, Operand(zero_reg));
+ }
+ __ sw(map, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+ __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ __ sw(argument, FieldMemOperand(v0, JSValue::kValueOffset));
+
+ // Ensure the object is fully initialized.
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+ __ Ret();
+
+ // The argument was not found in the number to string cache. Check
+ // if it's a string already before calling the conversion builtin.
+ Label convert_argument;
+ __ bind(&not_cached);
+ __ JumpIfSmi(a0, &convert_argument);
+
+ // Is it a String?
+ __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ ASSERT(kNotStringTag != 0);
+ __ And(t0, a3, Operand(kIsNotStringMask));
+ __ Branch(&convert_argument, ne, t0, Operand(zero_reg));
+ __ mov(argument, a0);
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
+ __ Branch(&argument_is_string);
+
+ // Invoke the conversion builtin and put the result into a2.
+ __ bind(&convert_argument);
+ __ push(function); // Preserve the function.
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
+ __ EnterInternalFrame();
+ __ push(v0);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ pop(function);
+ __ mov(argument, v0);
+ __ Branch(&argument_is_string);
+
+ // Load the empty string into a2, remove the receiver from the
+ // stack, and jump back to the case where the argument is a string.
+ __ bind(&no_arguments);
+ __ LoadRoot(argument, Heap::kEmptyStringRootIndex);
+ __ Drop(1);
+ __ Branch(&argument_is_string);
+
+ // At this point the argument is already a string. Call runtime to
+ // create a string wrapper.
+ __ bind(&gc_required);
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
+ __ EnterInternalFrame();
+ __ push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ __ LeaveInternalFrame();
+ __ Ret();
}
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ Label non_function_call;
+ // Check that the function is not a smi.
+ __ And(t0, a1, Operand(kSmiTagMask));
+ __ Branch(&non_function_call, eq, t0, Operand(zero_reg));
+ // Check that the function is a JSFunction.
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ // Jump to the function-specific construct stub.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
+ __ Addu(t9, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(Operand(t9));
+
+ // a0: number of arguments
+ // a1: called object
+ __ bind(&non_function_call);
+ // CALL_NON_FUNCTION expects the non-function constructor as receiver
+ // (instead of the original receiver from the call site). The receiver is
+ // stack element argc.
+ // Set expected number of arguments to zero (not changing a0).
+ __ mov(a2, zero_reg);
+ __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ SetCallKind(t1, CALL_AS_METHOD);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool count_constructions) {
+ // Should never count constructions for api objects.
+ ASSERT(!is_api_function || !count_constructions);
+
+ Isolate* isolate = masm->isolate();
+
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ __ EnterConstructFrame();
+
+ // Preserve the two incoming parameters on the stack.
+ __ sll(a0, a0, kSmiTagSize); // Tag arguments count.
+ __ MultiPushReversed(a0.bit() | a1.bit());
+
+ // Use t7 to hold undefined, which is used in several places below.
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+
+ Label rt_call, allocated;
+ // Try to allocate the object without transitioning into C code. If any of the
+ // preconditions is not met, the code bails out to the runtime call.
+ if (FLAG_inline_new) {
+ Label undo_allocation;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ li(a2, Operand(debug_step_in_fp));
+ __ lw(a2, MemOperand(a2));
+ __ Branch(&rt_call, ne, a2, Operand(zero_reg));
+#endif
+
+ // Load the initial map and verify that it is in fact a map.
+ // a1: constructor function
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ And(t0, a2, Operand(kSmiTagMask));
+ __ Branch(&rt_call, eq, t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, t4);
+ __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
+
+ // Check that the constructor is not constructing a JSFunction (see comments
+ // in Runtime_NewObject in runtime.cc). In which case the initial map's
+ // instance type would be JS_FUNCTION_TYPE.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
+
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ MemOperand constructor_count =
+ FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
+ __ lbu(t0, constructor_count);
+ __ Subu(t0, t0, Operand(1));
+ __ sb(t0, constructor_count);
+ __ Branch(&allocate, ne, t0, Operand(zero_reg));
+
+ __ Push(a1, a2);
+
+ __ push(a1); // Constructor.
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+ __ pop(a2);
+ __ pop(a1);
+
+ __ bind(&allocate);
+ }
+
+ // Now allocate the JSObject on the heap.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to initial
+ // map and properties and elements are set to empty fixed array.
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size
+ // t4: JSObject (not tagged)
+ __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(t5, t4);
+ __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+ __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+ __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+ __ Addu(t5, t5, Operand(3*kPointerSize));
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+ ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+ // Fill all the in-object properties with appropriate filler.
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size (in words)
+ // t4: JSObject (not tagged)
+ // t5: First in-object property of JSObject (not tagged)
+ __ sll(t0, a3, kPointerSizeLog2);
+ __ addu(t6, t4, t0); // End of object.
+ ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+ { Label loop, entry;
+ if (count_constructions) {
+ // To allow for truncation.
+ __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
+ } else {
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ }
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sw(t7, MemOperand(t5, 0));
+ __ addiu(t5, t5, kPointerSize);
+ __ bind(&entry);
+ __ Branch(&loop, Uless, t5, Operand(t6));
+ }
+
+ // Add the object tag to make the JSObject real, so that we can continue and
+ // jump into the continuation code at any time from now on. Any failures
+ // need to undo the allocation, so that the heap is in a consistent state
+ // and verifiable.
+ __ Addu(t4, t4, Operand(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed. Continue with allocated
+ // object if not fall through to runtime call if it is.
+ // a1: constructor function
+ // t4: JSObject
+ // t5: start of next object (not tagged)
+ __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields and
+ // in-object properties.
+ __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+ __ And(t6,
+ a0,
+ Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
+ __ srl(t0, t6, Map::kPreAllocatedPropertyFieldsByte * 8);
+ __ Addu(a3, a3, Operand(t0));
+ __ And(t6, a0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
+ __ srl(t0, t6, Map::kInObjectPropertiesByte * 8);
+ __ subu(a3, a3, t0);
+
+ // Done if no extra properties are to be allocated.
+ __ Branch(&allocated, eq, a3, Operand(zero_reg));
+ __ Assert(greater_equal, "Property allocation count failed.",
+ a3, Operand(zero_reg));
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // a1: constructor
+ // a3: number of elements in properties array
+ // t4: JSObject
+ // t5: start of next object
+ __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ AllocateInNewSpace(
+ a0,
+ t5,
+ t6,
+ a2,
+ &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+ // Initialize the FixedArray.
+ // a1: constructor
+ // a3: number of elements in properties array (un-tagged)
+ // t4: JSObject
+ // t5: start of next object
+ __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
+ __ mov(a2, t5);
+ __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
+ __ sll(a0, a3, kSmiTagSize);
+ __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
+ __ Addu(a2, a2, Operand(2 * kPointerSize));
+
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+
+ // Initialize the fields to undefined.
+ // a1: constructor
+ // a2: First element of FixedArray (not tagged)
+ // a3: number of elements in properties array
+ // t4: JSObject
+ // t5: FixedArray (not tagged)
+ __ sll(t3, a3, kPointerSizeLog2);
+ __ addu(t6, a2, t3); // End of object.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ { Label loop, entry;
+ if (count_constructions) {
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ } else if (FLAG_debug_code) {
+ __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
+ __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+ }
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sw(t7, MemOperand(a2));
+ __ addiu(a2, a2, kPointerSize);
+ __ bind(&entry);
+ __ Branch(&loop, less, a2, Operand(t6));
+ }
+
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject.
+ // a1: constructor function
+ // t4: JSObject
+ // t5: FixedArray (not tagged)
+ __ Addu(t5, t5, Operand(kHeapObjectTag)); // Add the heap tag.
+ __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
+
+ // Continue with JSObject being successfully allocated.
+ // a1: constructor function
+ // a4: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // t4: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(t4, t5);
+ }
+
+ __ bind(&rt_call);
+ // Allocate the new receiver object using the runtime call.
+ // a1: constructor function
+ __ push(a1); // Argument for Runtime_NewObject.
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ mov(t4, v0);
+
+ // Receiver for constructor call allocated.
+ // t4: JSObject
+ __ bind(&allocated);
+ __ push(t4);
+
+ // Push the function and the allocated receiver from the stack.
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ lw(a1, MemOperand(sp, kPointerSize));
+ __ MultiPushReversed(a1.bit() | t4.bit());
+
+ // Reload the number of arguments from the stack.
+ // a1: constructor function
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ lw(a3, MemOperand(sp, 4 * kPointerSize));
+
+ // Setup pointer to last argument.
+ __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Setup number of arguments for function call below.
+ __ srl(a0, a3, kSmiTagSize);
+
+ // Copy arguments and receiver to the expression stack.
+ // a0: number of arguments
+ // a1: constructor function
+ // a2: address of last argument (caller sp)
+ // a3: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ Label loop, entry;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ lw(t1, MemOperand(t0));
+ __ push(t1);
+ __ bind(&entry);
+ __ Addu(a3, a3, Operand(-2));
+ __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+
+ // Call the function.
+ // a0: number of arguments
+ // a1: constructor function
+ if (is_api_function) {
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+ } else {
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
+
+ // Pop the function from the stack.
+ // v0: result
+ // sp[0]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ Pop();
+
+ // Restore context from the frame.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ And(t0, v0, Operand(kSmiTagMask));
+ __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(v0, a3, a3);
+ __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ lw(v0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+ __ LeaveConstructFrame();
+ __ sll(t0, a1, kPointerSizeLog2 - 1);
+ __ Addu(sp, sp, t0);
+ __ Addu(sp, sp, kPointerSize);
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+ __ Ret();
+}
+
+
+void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Generate_JSConstructStubHelper(masm, false, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ Generate_JSConstructStubHelper(masm, true, false);
}
@@ -78,23 +1019,16 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from JSEntryStub::GenerateBody
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: reveiver_pointer
- // a3: argc
- // s0: argv
- //
- // Stack:
- // arguments slots
- // handler frame
- // entry frame
- // callee saved registers + ra
- // 4 args slots
- // args
+ // ----------- S t a t e -------------
+ // -- a0: code entry
+ // -- a1: function
+ // -- a2: reveiver_pointer
+ // -- a3: argc
+ // -- s0: argv
+ // -----------------------------------
// Clear the context before we push it when entering the JS frame.
- __ li(cp, Operand(0, RelocInfo::NONE));
+ __ mov(cp, zero_reg);
// Enter an internal frame.
__ EnterInternalFrame();
@@ -103,18 +1037,19 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Set up the roots register.
- ExternalReference roots_address = ExternalReference::roots_address();
+ ExternalReference roots_address =
+ ExternalReference::roots_address(masm->isolate());
__ li(s6, Operand(roots_address));
// Push the function and the receiver onto the stack.
- __ MultiPushReversed(a1.bit() | a2.bit());
+ __ Push(a1, a2);
// Copy arguments to the stack in a loop.
// a3: argc
// s0: argv, ie points to first arg
Label loop, entry;
__ sll(t0, a3, kPointerSizeLog2);
- __ add(t2, s0, t0);
+ __ addu(t2, s0, t0);
__ b(&entry);
__ nop(); // Branch delay slot nop.
// t2 points past last arg.
@@ -122,48 +1057,30 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ lw(t0, MemOperand(s0)); // Read next parameter.
__ addiu(s0, s0, kPointerSize);
__ lw(t0, MemOperand(t0)); // Dereference handle.
- __ Push(t0); // Push parameter.
+ __ push(t0); // Push parameter.
__ bind(&entry);
- __ Branch(ne, &loop, s0, Operand(t2));
-
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: reveiver_pointer
- // a3: argc
- // s0: argv
- // s6: roots_address
- //
- // Stack:
- // arguments
- // receiver
- // function
- // arguments slots
- // handler frame
- // entry frame
- // callee saved registers + ra
- // 4 args slots
- // args
+ __ Branch(&loop, ne, s0, Operand(t2));
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
- __ LoadRoot(t4, Heap::kUndefinedValueRootIndex);
- __ mov(s1, t4);
- __ mov(s2, t4);
- __ mov(s3, t4);
- __ mov(s4, s4);
- __ mov(s5, t4);
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ mov(s1, t0);
+ __ mov(s2, t0);
+ __ mov(s3, t0);
+ __ mov(s4, t0);
+ __ mov(s5, t0);
// s6 holds the root address. Do not clobber.
// s7 is cp. Do not init.
// Invoke the code and pass argc as a0.
__ mov(a0, a3);
if (is_construct) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x164);
+ __ Call(masm->isolate()->builtins()->JSConstructCall(),
+ RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
__ LeaveInternalFrame();
@@ -182,19 +1099,525 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
+void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Preserve the function.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
+
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(a1);
+ // Call the runtime function.
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ // Calculate the entry point.
+ __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore saved function.
+ __ pop(a1);
+
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Do a tail-call of the compiled function.
+ __ Jump(t9);
+}
+
+
+void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Preserve the function.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
+
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(a1);
+ __ CallRuntime(Runtime::kLazyRecompile, 1);
+ // Calculate the entry point.
+ __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore saved function.
+ __ pop(a1);
+
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Do a tail-call of the compiled function.
+ __ Jump(t9);
+}
+
+
+// These functions are called from C++ but cannot be used in live code.
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ __ Abort("Call to unimplemented function in builtins-mips.cc");
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+ __ Abort("Call to unimplemented function in builtins-mips.cc");
+}
+
+
+void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
+ __ Abort("Call to unimplemented function in builtins-mips.cc");
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ __ Abort("Call to unimplemented function in builtins-mips.cc");
+}
+
+
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // 1. Make sure we have at least one argument.
+ // a0: actual number of arguments
+ { Label done;
+ __ Branch(&done, ne, a0, Operand(zero_reg));
+ __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+ __ push(t2);
+ __ Addu(a0, a0, Operand(1));
+ __ bind(&done);
+ }
+
+ // 2. Get the function to call (passed as receiver) from the stack, check
+ // if it is a function.
+ // a0: actual number of arguments
+ Label non_function;
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(at, sp, at);
+ __ lw(a1, MemOperand(at));
+ __ And(at, a1, Operand(kSmiTagMask));
+ __ Branch(&non_function, eq, at, Operand(zero_reg));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ // 3a. Patch the first argument if necessary when calling a function.
+ // a0: actual number of arguments
+ // a1: function
+ Label shift_arguments;
+ { Label convert_to_object, use_global_receiver, patch_receiver;
+ // Change context eagerly in case we need the global receiver.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+
+ // Do not transform the receiver for native (Compilerhints already in a3).
+ __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+
+ // Compute the receiver in non-strict mode.
+ // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(a2, sp, at);
+ __ lw(a2, MemOperand(a2, -kPointerSize));
+ // a0: actual number of arguments
+ // a1: function
+ // a2: first argument
+ __ JumpIfSmi(a2, &convert_to_object, t2);
+
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ Branch(&use_global_receiver, eq, a2, Operand(a3));
+ __ LoadRoot(a3, Heap::kNullValueRootIndex);
+ __ Branch(&use_global_receiver, eq, a2, Operand(a3));
+
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ GetObjectType(a2, a3, a3);
+ __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ __ bind(&convert_to_object);
+ __ EnterInternalFrame(); // In order to preserve argument count.
+ __ sll(a0, a0, kSmiTagSize); // Smi tagged.
+ __ push(a0);
+
+ __ push(a2);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a2, v0);
+
+ __ pop(a0);
+ __ sra(a0, a0, kSmiTagSize); // Un-tag.
+ __ LeaveInternalFrame();
+ // Restore the function to a1.
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(at, sp, at);
+ __ lw(a1, MemOperand(at));
+ __ Branch(&patch_receiver);
+
+ // Use the global receiver object from the called function as the
+ // receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalIndex =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ lw(a2, FieldMemOperand(cp, kGlobalIndex));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+ __ lw(a2, FieldMemOperand(a2, kGlobalIndex));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+
+ __ bind(&patch_receiver);
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(a3, sp, at);
+ __ sw(a2, MemOperand(a3, -kPointerSize));
+
+ __ Branch(&shift_arguments);
+ }
+
+ // 3b. Patch the first argument when calling a non-function. The
+ // CALL_NON_FUNCTION builtin expects the non-function callee as
+ // receiver, so overwrite the first argument which will ultimately
+ // become the receiver.
+ // a0: actual number of arguments
+ // a1: function
+ __ bind(&non_function);
+ // Restore the function in case it has been modified.
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(a2, sp, at);
+ __ sw(a1, MemOperand(a2, -kPointerSize));
+ // Clear a1 to indicate a non-function being called.
+ __ mov(a1, zero_reg);
+
+ // 4. Shift arguments and return address one slot down on the stack
+ // (overwriting the original receiver). Adjust argument count to make
+ // the original first argument the new receiver.
+ // a0: actual number of arguments
+ // a1: function
+ __ bind(&shift_arguments);
+ { Label loop;
+ // Calculate the copy start address (destination). Copy end address is sp.
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(a2, sp, at);
+
+ __ bind(&loop);
+ __ lw(at, MemOperand(a2, -kPointerSize));
+ __ sw(at, MemOperand(a2));
+ __ Subu(a2, a2, Operand(kPointerSize));
+ __ Branch(&loop, ne, a2, Operand(sp));
+ // Adjust the actual number of arguments and remove the top element
+ // (which is a copy of the last argument).
+ __ Subu(a0, a0, Operand(1));
+ __ Pop();
+ }
+
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+ // a0: actual number of arguments
+ // a1: function
+ { Label function;
+ __ Branch(&function, ne, a1, Operand(zero_reg));
+ __ mov(a2, zero_reg); // expected arguments is 0 for CALL_NON_FUNCTION
+ __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+ __ SetCallKind(t1, CALL_AS_METHOD);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ __ bind(&function);
+ }
+
+ // 5b. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing. If so, jump
+ // (tail-call) to the code in register edx without checking arguments.
+ // a0: actual number of arguments
+ // a1: function
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2,
+ FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ sra(a2, a2, kSmiTagSize);
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ SetCallKind(t1, CALL_AS_METHOD);
+ // Check formal and actual parameter counts.
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET, ne, a2, Operand(a0));
+
+ ParameterCount expected(0);
+ __ InvokeCode(a3, expected, expected, JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ const int kIndexOffset = -5 * kPointerSize;
+ const int kLimitOffset = -4 * kPointerSize;
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kRecvOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
+
+ __ EnterInternalFrame();
+
+ __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
+ __ push(a0);
+ __ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
+ __ push(a0);
+ // Returns (in v0) number of arguments to copy to stack as Smi.
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying need to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ // Make a2 the space we have left. The stack might already be overflowed
+ // here which will cause a2 to become negative.
+ __ subu(a2, sp, a2);
+ // Check if the arguments will overflow the stack.
+ __ sll(t0, v0, kPointerSizeLog2 - kSmiTagSize);
+ __ Branch(&okay, gt, a2, Operand(t0)); // Signed comparison.
+
+ // Out of stack space.
+ __ lw(a1, MemOperand(fp, kFunctionOffset));
+ __ push(a1);
+ __ push(v0);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ // End of stack check.
+
+ // Push current limit and index.
+ __ bind(&okay);
+ __ push(v0); // Limit.
+ __ mov(a1, zero_reg); // Initial index.
+ __ push(a1);
+
+ // Change context eagerly to get the right global object if necessary.
+ __ lw(a0, MemOperand(fp, kFunctionOffset));
+ __ lw(cp, FieldMemOperand(a0, JSFunction::kContextOffset));
+ // Load the shared function info while the function is still in a0.
+ __ lw(a1, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+
+ // Compute the receiver.
+ Label call_to_object, use_global_receiver, push_receiver;
+ __ lw(a0, MemOperand(fp, kRecvOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ __ lw(a2, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+
+ // Do not transform the receiver for native (Compilerhints already in a2).
+ __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+
+ // Compute the receiver in non-strict mode.
+ __ And(t0, a0, Operand(kSmiTagMask));
+ __ Branch(&call_to_object, eq, t0, Operand(zero_reg));
+ __ LoadRoot(a1, Heap::kNullValueRootIndex);
+ __ Branch(&use_global_receiver, eq, a0, Operand(a1));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&use_global_receiver, eq, a0, Operand(a2));
+
+ // Check if the receiver is already a JavaScript object.
+ // a0: receiver
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Convert the receiver to a regular object.
+ // a0: receiver
+ __ bind(&call_to_object);
+ __ push(a0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
+ __ Branch(&push_receiver);
+
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+ __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ // a0: receiver
+ __ bind(&push_receiver);
+ __ push(a0);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ lw(a0, MemOperand(fp, kIndexOffset));
+ __ Branch(&entry);
+
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // a0: current argument index
+ __ bind(&loop);
+ __ lw(a1, MemOperand(fp, kArgsOffset));
+ __ push(a1);
+ __ push(a0);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(v0);
+
+ // Use inline caching to access the arguments.
+ __ lw(a0, MemOperand(fp, kIndexOffset));
+ __ Addu(a0, a0, Operand(1 << kSmiTagSize));
+ __ sw(a0, MemOperand(fp, kIndexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ lw(a1, MemOperand(fp, kLimitOffset));
+ __ Branch(&loop, ne, a0, Operand(a1));
+ // Invoke the function.
+ ParameterCount actual(a0);
+ __ sra(a0, a0, kSmiTagSize);
+ __ lw(a1, MemOperand(fp, kFunctionOffset));
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+
+ // Tear down the internal frame and remove function, receiver and args.
+ __ LeaveInternalFrame();
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ __ sll(a0, a0, kSmiTagSize);
+ __ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
+ __ Addu(fp, sp, Operand(3 * kPointerSize));
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- v0 : result being passed through
+ // -----------------------------------
+ // Get the number of arguments passed (as a smi), tear down the frame and
+ // then tear down the parameters.
+ __ lw(a1, MemOperand(fp, -3 * kPointerSize));
+ __ mov(sp, fp);
+ __ MultiPop(fp.bit() | ra.bit());
+ __ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(sp, sp, t0);
+ // Adjust for the receiver.
+ __ Addu(sp, sp, Operand(kPointerSize));
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x201);
+ // State setup as expected by MacroAssembler::InvokePrologue.
+ // ----------- S t a t e -------------
+ // -- a0: actual arguments count
+ // -- a1: function (passed through to callee)
+ // -- a2: expected arguments count
+ // -- a3: callee code entry
+ // -- t1: call kind information
+ // -----------------------------------
+
+ Label invoke, dont_adapt_arguments;
+
+ Label enough, too_few;
+ __ Branch(&dont_adapt_arguments, eq,
+ a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ // We use Uless as the number of argument should always be greater than 0.
+ __ Branch(&too_few, Uless, a0, Operand(a2));
+
+ { // Enough parameters: actual >= expected.
+ // a0: actual number of arguments as a smi
+ // a1: function
+ // a2: expected number of arguments
+ // a3: code entry to call
+ __ bind(&enough);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Calculate copy start address into a0 and copy end address into a2.
+ __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a0, fp, a0);
+ // Adjust for return address and receiver.
+ __ Addu(a0, a0, Operand(2 * kPointerSize));
+ // Compute copy end address.
+ __ sll(a2, a2, kPointerSizeLog2);
+ __ subu(a2, a0, a2);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // a0: copy start address
+ // a1: function
+ // a2: copy end address
+ // a3: code entry to call
+
+ Label copy;
+ __ bind(&copy);
+ __ lw(t0, MemOperand(a0));
+ __ push(t0);
+ __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a2));
+ __ addiu(a0, a0, -kPointerSize); // In delay slot.
+
+ __ jmp(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected.
+ __ bind(&too_few);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // TODO(MIPS): Optimize these loops.
+
+ // Calculate copy start address into a0 and copy end address is fp.
+ // a0: actual number of arguments as a smi
+ // a1: function
+ // a2: expected number of arguments
+ // a3: code entry to call
+ __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a0, fp, a0);
+ // Adjust for return address and receiver.
+ __ Addu(a0, a0, Operand(2 * kPointerSize));
+ // Compute copy end address. Also adjust for return address.
+ __ Addu(t3, fp, kPointerSize);
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // a0: copy start address
+ // a1: function
+ // a2: expected number of arguments
+ // a3: code entry to call
+ // t3: copy end address
+ Label copy;
+ __ bind(&copy);
+ __ lw(t0, MemOperand(a0)); // Adjusted above for return addr and receiver.
+ __ push(t0);
+ __ Subu(a0, a0, kPointerSize);
+ __ Branch(&copy, ne, a0, Operand(t3));
+
+ // Fill the remaining expected arguments with undefined.
+ // a1: function
+ // a2: expected number of arguments
+ // a3: code entry to call
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ sll(t2, a2, kPointerSizeLog2);
+ __ Subu(a2, fp, Operand(t2));
+ __ Addu(a2, a2, Operand(-4 * kPointerSize)); // Adjust for frame.
+
+ Label fill;
+ __ bind(&fill);
+ __ push(t0);
+ __ Branch(&fill, ne, sp, Operand(a2));
+ }
+
+ // Call the entry point.
+ __ bind(&invoke);
+
+ __ Call(a3);
+
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ Ret();
+
+
+ // -------------------------------------------
+ // Don't adapt arguments.
+ // -------------------------------------------
+ __ bind(&dont_adapt_arguments);
+ __ Jump(a3);
}
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
new file mode 100644
index 000000000..d7fac867f
--- /dev/null
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -0,0 +1,6889 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_MIPS)
+
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "codegen.h"
+#include "regexp-macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc,
+ bool never_nan_nan);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* rhs_not_nan,
+ Label* slow,
+ bool strict);
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
+
+
+// Check if the operand is a heap number.
+static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
+ Register scratch1, Register scratch2,
+ Label* not_a_heap_number) {
+ __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
+ __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
+ __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
+}
+
+
+void ToNumberStub::Generate(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in a0.
+ Label check_heap_number, call_builtin;
+ __ JumpIfNotSmi(a0, &check_heap_number);
+ __ mov(v0, a0);
+ __ Ret();
+
+ __ bind(&check_heap_number);
+ EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
+ __ mov(v0, a0);
+ __ Ret();
+
+ __ bind(&call_builtin);
+ __ push(a0);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+}
+
+
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+ // Create a new closure from the given function info in new
+ // space. Set the context to the current context in cp.
+ Label gc;
+
+ // Pop the function info from the stack.
+ __ pop(a3);
+
+ // Attempt to allocate new JSFunction in new space.
+ __ AllocateInNewSpace(JSFunction::kSize,
+ v0,
+ a1,
+ a2,
+ &gc,
+ TAG_OBJECT);
+
+ int map_index = strict_mode_ == kStrictMode
+ ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
+ : Context::FUNCTION_MAP_INDEX;
+
+ // Compute the function map in the current global context and set that
+ // as the map of the allocated object.
+ __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+ __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
+ __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+ // Initialize the rest of the function. We don't have to update the
+ // write barrier because the allocated object is in new space.
+ __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
+ __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
+ __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+ __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
+ __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
+ __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
+
+ // Initialize the code pointer in the function to be the one
+ // found in the shared function info object.
+ __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
+ __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
+
+ // Return result. The argument function info has been popped already.
+ __ Ret();
+
+ // Create a new closure through the slower runtime call.
+ __ bind(&gc);
+ __ LoadRoot(t0, Heap::kFalseValueRootIndex);
+ __ Push(cp, a3, t0);
+ __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+
+ // Attempt to allocate the context in new space.
+ __ AllocateInNewSpace(FixedArray::SizeFor(length),
+ v0,
+ a1,
+ a2,
+ &gc,
+ TAG_OBJECT);
+
+ // Load the function from the stack.
+ __ lw(a3, MemOperand(sp, 0));
+
+ // Setup the object header.
+ __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
+ __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ li(a2, Operand(Smi::FromInt(length)));
+ __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+
+ // Setup the fixed slots.
+ __ li(a1, Operand(Smi::FromInt(0)));
+ __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
+
+ // Copy the global object from the previous context.
+ __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+
+ // Initialize the rest of the slots to undefined.
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
+ }
+
+ // Remove the on-stack argument and return.
+ __ mov(cp, v0);
+ __ Pop();
+ __ Ret();
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
+}
+
+
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ // [sp]: constant elements.
+ // [sp + kPointerSize]: literal index.
+ // [sp + (2 * kPointerSize)]: literals array.
+
+ // All sizes here are multiples of kPointerSize.
+ int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int size = JSArray::kSize + elements_size;
+
+ // Load boilerplate object into r3 and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ lw(a3, MemOperand(sp, 2 * kPointerSize));
+ __ lw(a0, MemOperand(sp, 1 * kPointerSize));
+ __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a3, t0);
+ __ lw(a3, MemOperand(t0));
+ __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
+ __ Branch(&slow_case, eq, a3, Operand(t1));
+
+ if (FLAG_debug_code) {
+ const char* message;
+ Heap::RootListIndex expected_map_index;
+ if (mode_ == CLONE_ELEMENTS) {
+ message = "Expected (writable) fixed array";
+ expected_map_index = Heap::kFixedArrayMapRootIndex;
+ } else {
+ ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ message = "Expected copy-on-write fixed array";
+ expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
+ }
+ __ push(a3);
+ __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+ __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ LoadRoot(at, expected_map_index);
+ __ Assert(eq, message, a3, Operand(at));
+ __ pop(a3);
+ }
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ // Return new object in v0.
+ __ AllocateInNewSpace(size,
+ v0,
+ a1,
+ a2,
+ &slow_case,
+ TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+ __ lw(a1, FieldMemOperand(a3, i));
+ __ sw(a1, FieldMemOperand(v0, i));
+ }
+ }
+
+ if (length_ > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+ __ Addu(a2, v0, Operand(JSArray::kSize));
+ __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
+
+ // Copy the elements array.
+ __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
+ }
+
+ // Return and remove the on-stack parameters.
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
+
+
+// Takes a Smi and converts to an IEEE 64 bit floating point value in two
+// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
+// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
+// scratch register. Destroys the source register. No GC occurs during this
+// stub so you don't have to set up the frame.
+class ConvertToDoubleStub : public CodeStub {
+ public:
+ ConvertToDoubleStub(Register result_reg_1,
+ Register result_reg_2,
+ Register source_reg,
+ Register scratch_reg)
+ : result1_(result_reg_1),
+ result2_(result_reg_2),
+ source_(source_reg),
+ zeros_(scratch_reg) { }
+
+ private:
+ Register result1_;
+ Register result2_;
+ Register source_;
+ Register zeros_;
+
+ // Minor key encoding in 16 bits.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 14> {};
+
+ Major MajorKey() { return ConvertToDouble; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return result1_.code() +
+ (result2_.code() << 4) +
+ (source_.code() << 8) +
+ (zeros_.code() << 12);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "ConvertToDoubleStub"; }
+
+#ifdef DEBUG
+ void Print() { PrintF("ConvertToDoubleStub\n"); }
+#endif
+};
+
+
+void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
+#ifndef BIG_ENDIAN_FLOATING_POINT
+ Register exponent = result1_;
+ Register mantissa = result2_;
+#else
+ Register exponent = result2_;
+ Register mantissa = result1_;
+#endif
+ Label not_special;
+ // Convert from Smi to integer.
+ __ sra(source_, source_, kSmiTagSize);
+ // Move sign bit from source to destination. This works because the sign bit
+ // in the exponent word of the double has the same position and polarity as
+ // the 2's complement sign bit in a Smi.
+ STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ __ And(exponent, source_, Operand(HeapNumber::kSignMask));
+ // Subtract from 0 if source was negative.
+ __ subu(at, zero_reg, source_);
+ __ movn(source_, at, exponent);
+
+ // We have -1, 0 or 1, which we treat specially. Register source_ contains
+ // absolute value: it is either equal to 1 (special case of -1 and 1),
+ // greater than 1 (not a special case) or less than 1 (special case of 0).
+ __ Branch(&not_special, gt, source_, Operand(1));
+
+ // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
+ static const uint32_t exponent_word_for_1 =
+ HeapNumber::kExponentBias << HeapNumber::kExponentShift;
+ // Safe to use 'at' as dest reg here.
+ __ Or(at, exponent, Operand(exponent_word_for_1));
+ __ movn(exponent, at, source_); // Write exp when source not 0.
+ // 1, 0 and -1 all have 0 for the second word.
+ __ mov(mantissa, zero_reg);
+ __ Ret();
+
+ __ bind(&not_special);
+ // Count leading zeros.
+ // Gets the wrong answer for 0, but we already checked for that case above.
+ __ clz(zeros_, source_);
+ // Compute exponent and or it into the exponent register.
+ // We use mantissa as a scratch register here.
+ __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
+ __ subu(mantissa, mantissa, zeros_);
+ __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
+ __ Or(exponent, exponent, mantissa);
+
+ // Shift up the source chopping the top bit off.
+ __ Addu(zeros_, zeros_, Operand(1));
+ // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
+ __ sllv(source_, source_, zeros_);
+ // Compute lower part of fraction (last 12 bits).
+ __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
+ // And the top (top 20 bits).
+ __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
+ __ or_(exponent, exponent, source_);
+
+ __ Ret();
+}
+
+
+void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register scratch1,
+ Register scratch2) {
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ sra(scratch1, a0, kSmiTagSize);
+ __ mtc1(scratch1, f14);
+ __ cvt_d_w(f14, f14);
+ __ sra(scratch1, a1, kSmiTagSize);
+ __ mtc1(scratch1, f12);
+ __ cvt_d_w(f12, f12);
+ if (destination == kCoreRegisters) {
+ __ Move(a2, a3, f14);
+ __ Move(a0, a1, f12);
+ }
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Write Smi from a0 to a3 and a2 in double format.
+ __ mov(scratch1, a0);
+ ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
+ __ push(ra);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Write Smi from a1 to a1 and a0 in double format.
+ __ mov(scratch1, a1);
+ ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(ra);
+ }
+}
+
+
+void FloatingPointHelper::LoadOperands(
+ MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* slow) {
+
+ // Load right operand (a0) to f12 or a2/a3.
+ LoadNumber(masm, destination,
+ a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
+
+ // Load left operand (a1) to f14 or a0/a1.
+ LoadNumber(masm, destination,
+ a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
+}
+
+
+void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
+ Destination destination,
+ Register object,
+ FPURegister dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+
+ Label is_smi, done;
+
+ __ JumpIfSmi(object, &is_smi);
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+
+ // Handle loading a double from a heap number.
+ if (CpuFeatures::IsSupported(FPU) &&
+ destination == kFPURegisters) {
+ CpuFeatures::Scope scope(FPU);
+ // Load the double from tagged HeapNumber to double register.
+
+ // ARM uses a workaround here because of the unaligned HeapNumber
+ // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
+ // point in generating even more instructions.
+ __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Load the double from heap number to dst1 and dst2 in double format.
+ __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ lw(dst2, FieldMemOperand(object,
+ HeapNumber::kValueOffset + kPointerSize));
+ }
+ __ Branch(&done);
+
+ // Handle loading a double from a smi.
+ __ bind(&is_smi);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Convert smi to double using FPU instructions.
+ __ SmiUntag(scratch1, object);
+ __ mtc1(scratch1, dst);
+ __ cvt_d_w(dst, dst);
+ if (destination == kCoreRegisters) {
+ // Load the converted smi to dst1 and dst2 in double format.
+ __ Move(dst1, dst2, dst);
+ }
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Write smi to dst1 and dst2 double format.
+ __ mov(scratch1, object);
+ ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
+ __ push(ra);
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(ra);
+ }
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ FPURegister double_scratch,
+ Label* not_number) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ Label is_smi;
+ Label done;
+ Label not_in_int32_range;
+
+ __ JumpIfSmi(object, &is_smi);
+ __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
+ __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
+ __ ConvertToInt32(object,
+ dst,
+ scratch1,
+ scratch2,
+ double_scratch,
+ &not_in_int32_range);
+ __ jmp(&done);
+
+ __ bind(&not_in_int32_range);
+ __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+ __ EmitOutOfInt32RangeTruncate(dst,
+ scratch1,
+ scratch2,
+ scratch3);
+
+ __ jmp(&done);
+
+ __ bind(&is_smi);
+ __ SmiUntag(dst, object);
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
+ Register int_scratch,
+ Destination destination,
+ FPURegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register scratch2,
+ FPURegister single_scratch) {
+ ASSERT(!int_scratch.is(scratch2));
+ ASSERT(!int_scratch.is(dst1));
+ ASSERT(!int_scratch.is(dst2));
+
+ Label done;
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ mtc1(int_scratch, single_scratch);
+ __ cvt_d_w(double_dst, single_scratch);
+ if (destination == kCoreRegisters) {
+ __ Move(dst1, dst2, double_dst);
+ }
+ } else {
+ Label fewer_than_20_useful_bits;
+ // Expected output:
+ // | dst2 | dst1 |
+ // | s | exp | mantissa |
+
+ // Check for zero.
+ __ mov(dst2, int_scratch);
+ __ mov(dst1, int_scratch);
+ __ Branch(&done, eq, int_scratch, Operand(zero_reg));
+
+ // Preload the sign of the value.
+ __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
+ // Get the absolute value of the object (as an unsigned integer).
+ Label skip_sub;
+ __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
+ __ Subu(int_scratch, zero_reg, int_scratch);
+ __ bind(&skip_sub);
+
+ // Get mantisssa[51:20].
+
+ // Get the position of the first set bit.
+ __ clz(dst1, int_scratch);
+ __ li(scratch2, 31);
+ __ Subu(dst1, scratch2, dst1);
+
+ // Set the exponent.
+ __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
+ __ Ins(dst2, scratch2,
+ HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+
+ // Clear the first non null bit.
+ __ li(scratch2, Operand(1));
+ __ sllv(scratch2, scratch2, dst1);
+ __ li(at, -1);
+ __ Xor(scratch2, scratch2, at);
+ __ And(int_scratch, int_scratch, scratch2);
+
+ // Get the number of bits to set in the lower part of the mantissa.
+ __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
+ // Set the higher 20 bits of the mantissa.
+ __ srlv(at, int_scratch, scratch2);
+ __ or_(dst2, dst2, at);
+ __ li(at, 32);
+ __ subu(scratch2, at, scratch2);
+ __ sllv(dst1, int_scratch, scratch2);
+ __ Branch(&done);
+
+ __ bind(&fewer_than_20_useful_bits);
+ __ li(at, HeapNumber::kMantissaBitsInTopWord);
+ __ subu(scratch2, at, dst1);
+ __ sllv(scratch2, int_scratch, scratch2);
+ __ Or(dst2, dst2, scratch2);
+ // Set dst1 to 0.
+ __ mov(dst1, zero_reg);
+ }
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
+ Register object,
+ Destination destination,
+ FPURegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister single_scratch,
+ Label* not_int32) {
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!heap_number_map.is(object) &&
+ !heap_number_map.is(scratch1) &&
+ !heap_number_map.is(scratch2));
+
+ Label done, obj_is_not_smi;
+
+ __ JumpIfNotSmi(object, &obj_is_not_smi);
+ __ SmiUntag(scratch1, object);
+ ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
+ scratch2, single_scratch);
+ __ Branch(&done);
+
+ __ bind(&obj_is_not_smi);
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ // Load the number.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Load the double value.
+ __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+ // On MIPS a lot of things cannot be implemented the same way so right
+ // now it makes a lot more sense to just do things manually.
+
+ // Save FCSR.
+ __ cfc1(scratch1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+ __ trunc_w_d(single_scratch, double_dst);
+ // Retrieve FCSR.
+ __ cfc1(scratch2, FCSR);
+ // Restore FCSR.
+ __ ctc1(scratch1, FCSR);
+
+ // Check for inexact conversion or exception.
+ __ And(scratch2, scratch2, kFCSRFlagMask);
+
+ // Jump to not_int32 if the operation did not succeed.
+ __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+
+ if (destination == kCoreRegisters) {
+ __ Move(dst1, dst2, double_dst);
+ }
+
+ } else {
+ ASSERT(!scratch1.is(object) && !scratch2.is(object));
+ // Load the double value in the destination registers.
+ __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+ // Check for 0 and -0.
+ __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
+ __ Or(scratch1, scratch1, Operand(dst2));
+ __ Branch(&done, eq, scratch1, Operand(zero_reg));
+
+ // Check that the value can be exactly represented by a 32-bit integer.
+ // Jump to not_int32 if that's not the case.
+ DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
+
+ // dst1 and dst2 were trashed. Reload the double value.
+ __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+ }
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ FPURegister double_scratch,
+ Label* not_int32) {
+ ASSERT(!dst.is(object));
+ ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
+ ASSERT(!scratch1.is(scratch2) &&
+ !scratch1.is(scratch3) &&
+ !scratch2.is(scratch3));
+
+ Label done;
+
+ // Untag the object into the destination register.
+ __ SmiUntag(dst, object);
+ // Just return if the object is a smi.
+ __ JumpIfSmi(object, &done);
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ // Object is a heap number.
+ // Convert the floating point value to a 32-bit integer.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Load the double value.
+ __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+ // On MIPS a lot of things cannot be implemented the same way so right
+ // now it makes a lot more sense to just do things manually.
+
+ // Save FCSR.
+ __ cfc1(scratch1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+ __ trunc_w_d(double_scratch, double_scratch);
+ // Retrieve FCSR.
+ __ cfc1(scratch2, FCSR);
+ // Restore FCSR.
+ __ ctc1(scratch1, FCSR);
+
+ // Check for inexact conversion or exception.
+ __ And(scratch2, scratch2, kFCSRFlagMask);
+
+ // Jump to not_int32 if the operation did not succeed.
+ __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+ // Get the result in the destination register.
+ __ mfc1(dst, double_scratch);
+
+ } else {
+ // Load the double value in the destination registers.
+ __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+ // Check for 0 and -0.
+ __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
+ __ Or(dst, scratch2, Operand(dst));
+ __ Branch(&done, eq, dst, Operand(zero_reg));
+
+ DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
+
+ // Registers state after DoubleIs32BitInteger.
+ // dst: mantissa[51:20].
+ // scratch2: 1
+
+ // Shift back the higher bits of the mantissa.
+ __ srlv(dst, dst, scratch3);
+ // Set the implicit first bit.
+ __ li(at, 32);
+ __ subu(scratch3, at, scratch3);
+ __ sllv(scratch2, scratch2, scratch3);
+ __ Or(dst, dst, scratch2);
+ // Set the sign.
+ __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ Label skip_sub;
+ __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
+ __ Subu(dst, zero_reg, dst);
+ __ bind(&skip_sub);
+ }
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
+ Register src1,
+ Register src2,
+ Register dst,
+ Register scratch,
+ Label* not_int32) {
+ // Get exponent alone in scratch.
+ __ Ext(scratch,
+ src1,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // Substract the bias from the exponent.
+ __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
+
+ // src1: higher (exponent) part of the double value.
+ // src2: lower (mantissa) part of the double value.
+ // scratch: unbiased exponent.
+
+ // Fast cases. Check for obvious non 32-bit integer values.
+ // Negative exponent cannot yield 32-bit integers.
+ __ Branch(not_int32, lt, scratch, Operand(zero_reg));
+ // Exponent greater than 31 cannot yield 32-bit integers.
+ // Also, a positive value with an exponent equal to 31 is outside of the
+ // signed 32-bit integer range.
+ // Another way to put it is that if (exponent - signbit) > 30 then the
+ // number cannot be represented as an int32.
+ Register tmp = dst;
+ __ srl(at, src1, 31);
+ __ subu(tmp, scratch, at);
+ __ Branch(not_int32, gt, tmp, Operand(30));
+ // - Bits [21:0] in the mantissa are not null.
+ __ And(tmp, src2, 0x3fffff);
+ __ Branch(not_int32, ne, tmp, Operand(zero_reg));
+
+ // Otherwise the exponent needs to be big enough to shift left all the
+ // non zero bits left. So we need the (30 - exponent) last bits of the
+ // 31 higher bits of the mantissa to be null.
+ // Because bits [21:0] are null, we can check instead that the
+ // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
+
+ // Get the 32 higher bits of the mantissa in dst.
+ __ Ext(dst,
+ src2,
+ HeapNumber::kMantissaBitsInTopWord,
+ 32 - HeapNumber::kMantissaBitsInTopWord);
+ __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
+ __ or_(dst, dst, at);
+
+ // Create the mask and test the lower bits (of the higher bits).
+ __ li(at, 32);
+ __ subu(scratch, at, scratch);
+ __ li(src2, 1);
+ __ sllv(src1, src2, scratch);
+ __ Subu(src1, src1, Operand(1));
+ __ And(src1, dst, src1);
+ __ Branch(not_int32, ne, src1, Operand(zero_reg));
+}
+
+
+void FloatingPointHelper::CallCCodeForDoubleOperation(
+ MacroAssembler* masm,
+ Token::Value op,
+ Register heap_number_result,
+ Register scratch) {
+ // Using core registers:
+ // a0: Left value (least significant part of mantissa).
+ // a1: Left value (sign, exponent, top of mantissa).
+ // a2: Right value (least significant part of mantissa).
+ // a3: Right value (sign, exponent, top of mantissa).
+
+ // Assert that heap_number_result is saved.
+ // We currently always use s0 to pass it.
+ ASSERT(heap_number_result.is(s0));
+
+ // Push the current return address before the C call.
+ __ push(ra);
+ __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
+ if (!IsMipsSoftFloatABI) {
+ CpuFeatures::Scope scope(FPU);
+ // We are not using MIPS FPU instructions, and parameters for the runtime
+ // function call are prepaired in a0-a3 registers, but function we are
+ // calling is compiled with hard-float flag and expecting hard float ABI
+ // (parameters in f12/f14 registers). We need to copy parameters from
+ // a0-a3 registers to f12/f14 register pairs.
+ __ Move(f12, a0, a1);
+ __ Move(f14, a2, a3);
+ }
+ // Call C routine that may not cause GC or other trouble.
+ __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
+ 4);
+ // Store answer in the overwritable heap number.
+ if (!IsMipsSoftFloatABI) {
+ CpuFeatures::Scope scope(FPU);
+ // Double returned in register f0.
+ __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+ } else {
+ // Double returned in registers v0 and v1.
+ __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
+ __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
+ }
+ // Place heap_number_result in v0 and return to the pushed return address.
+ __ mov(v0, heap_number_result);
+ __ pop(ra);
+ __ Ret();
+}
+
+
+// See comment for class, this does NOT work for int32's that are in Smi range.
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
+ Label max_negative_int;
+ // the_int_ has the answer which is a signed int32 but not a Smi.
+ // We test for the special value that has a different exponent.
+ STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ // Test sign, and save for later conditionals.
+ __ And(sign_, the_int_, Operand(0x80000000u));
+ __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
+
+ // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
+ // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
+ uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ li(scratch_, Operand(non_smi_exponent));
+ // Set the sign bit in scratch_ if the value was negative.
+ __ or_(scratch_, scratch_, sign_);
+ // Subtract from 0 if the value was negative.
+ __ subu(at, zero_reg, the_int_);
+ __ movn(the_int_, at, sign_);
+ // We should be masking the implict first digit of the mantissa away here,
+ // but it just ends up combining harmlessly with the last digit of the
+ // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
+ // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
+ ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ srl(at, the_int_, shift_distance);
+ __ or_(scratch_, scratch_, at);
+ __ sw(scratch_, FieldMemOperand(the_heap_number_,
+ HeapNumber::kExponentOffset));
+ __ sll(scratch_, the_int_, 32 - shift_distance);
+ __ sw(scratch_, FieldMemOperand(the_heap_number_,
+ HeapNumber::kMantissaOffset));
+ __ Ret();
+
+ __ bind(&max_negative_int);
+ // The max negative int32 is stored as a positive number in the mantissa of
+ // a double because it uses a sign bit instead of using two's complement.
+ // The actual mantissa bits stored are all 0 because the implicit most
+ // significant 1 bit is not stored.
+ non_smi_exponent += 1 << HeapNumber::kExponentShift;
+ __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
+ __ sw(scratch_,
+ FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
+ __ mov(scratch_, zero_reg);
+ __ sw(scratch_,
+ FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+ __ Ret();
+}
+
+
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc,
+ bool never_nan_nan) {
+ Label not_identical;
+ Label heap_number, return_equal;
+ Register exp_mask_reg = t5;
+
+ __ Branch(&not_identical, ne, a0, Operand(a1));
+
+ // The two objects are identical. If we know that one of them isn't NaN then
+ // we now know they test equal.
+ if (cc != eq || !never_nan_nan) {
+ __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+ // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cc == less || cc == greater) {
+ __ GetObjectType(a0, t4, t4);
+ __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ } else {
+ __ GetObjectType(a0, t4, t4);
+ __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
+ // Comparing JS objects with <=, >= is complicated.
+ if (cc != eq) {
+ __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cc == less_equal || cc == greater_equal) {
+ __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
+ __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&return_equal, ne, a0, Operand(t2));
+ if (cc == le) {
+ // undefined <= undefined should fail.
+ __ li(v0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ li(v0, Operand(LESS));
+ }
+ __ Ret();
+ }
+ }
+ }
+ }
+
+ __ bind(&return_equal);
+ if (cc == less) {
+ __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
+ } else if (cc == greater) {
+ __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
+ } else {
+ __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
+ }
+ __ Ret();
+
+ if (cc != eq || !never_nan_nan) {
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cc != lt && cc != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ And(t3, t2, Operand(exp_mask_reg));
+ // If all bits not set (ne cond), then not a NaN, objects are equal.
+ __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
+ // Or with all low-bits of mantissa.
+ __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+ __ Or(v0, t3, Operand(t2));
+ // For equal we already have the right value in v0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load v0 with the failing
+ // value if it's a NaN.
+ if (cc != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq, v0, Operand(zero_reg));
+ if (cc == le) {
+ __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
+ }
+ }
+ __ Ret();
+ }
+ // No fall through here.
+ }
+
+ __ bind(&not_identical);
+}
+
+
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* both_loaded_as_doubles,
+ Label* slow,
+ bool strict) {
+ ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+ (lhs.is(a1) && rhs.is(a0)));
+
+ Label lhs_is_smi;
+ __ And(t0, lhs, Operand(kSmiTagMask));
+ __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg));
+ // Rhs is a Smi.
+ // Check whether the non-smi is a heap number.
+ __ GetObjectType(lhs, t4, t4);
+ if (strict) {
+ // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // succeed. Return non-equal (lhs is already not zero).
+ __ mov(v0, lhs);
+ __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
+ } else {
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // the runtime.
+ __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
+ }
+
+ // Rhs is a smi, lhs is a number.
+ // Convert smi rhs to double.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ sra(at, rhs, kSmiTagSize);
+ __ mtc1(at, f14);
+ __ cvt_d_w(f14, f14);
+ __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ } else {
+ // Load lhs to a double in a2, a3.
+ __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
+ __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+ // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
+ __ mov(t6, rhs);
+ ConvertToDoubleStub stub1(a1, a0, t6, t5);
+ __ push(ra);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+
+ __ pop(ra);
+ }
+
+ // We now have both loaded as doubles.
+ __ jmp(both_loaded_as_doubles);
+
+ __ bind(&lhs_is_smi);
+ // Lhs is a Smi. Check whether the non-smi is a heap number.
+ __ GetObjectType(rhs, t4, t4);
+ if (strict) {
+ // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // succeed. Return non-equal.
+ __ li(v0, Operand(1));
+ __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
+ } else {
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // the runtime.
+ __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
+ }
+
+ // Lhs is a smi, rhs is a number.
+ // Convert smi lhs to double.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ sra(at, lhs, kSmiTagSize);
+ __ mtc1(at, f12);
+ __ cvt_d_w(f12, f12);
+ __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ } else {
+ // Convert lhs to a double format. t5 is scratch.
+ __ mov(t6, lhs);
+ ConvertToDoubleStub stub2(a3, a2, t6, t5);
+ __ push(ra);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(ra);
+ // Load rhs to a double in a1, a0.
+ if (rhs.is(a0)) {
+ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ } else {
+ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+ }
+ }
+ // Fall through to both_loaded_as_doubles.
+}
+
+
+void EmitNanCheck(MacroAssembler* masm, Condition cc) {
+ bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Lhs and rhs are already loaded to f12 and f14 register pairs.
+ __ Move(t0, t1, f14);
+ __ Move(t2, t3, f12);
+ } else {
+ // Lhs and rhs are already loaded to GP registers.
+ __ mov(t0, a0); // a0 has LS 32 bits of rhs.
+ __ mov(t1, a1); // a1 has MS 32 bits of rhs.
+ __ mov(t2, a2); // a2 has LS 32 bits of lhs.
+ __ mov(t3, a3); // a3 has MS 32 bits of lhs.
+ }
+ Register rhs_exponent = exp_first ? t0 : t1;
+ Register lhs_exponent = exp_first ? t2 : t3;
+ Register rhs_mantissa = exp_first ? t1 : t0;
+ Register lhs_mantissa = exp_first ? t3 : t2;
+ Label one_is_nan, neither_is_nan;
+ Label lhs_not_nan_exp_mask_is_loaded;
+
+ Register exp_mask_reg = t4;
+ __ li(exp_mask_reg, HeapNumber::kExponentMask);
+ __ and_(t5, lhs_exponent, exp_mask_reg);
+ __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
+
+ __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
+ __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
+
+ __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
+
+ __ li(exp_mask_reg, HeapNumber::kExponentMask);
+ __ bind(&lhs_not_nan_exp_mask_is_loaded);
+ __ and_(t5, rhs_exponent, exp_mask_reg);
+
+ __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
+
+ __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
+ __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
+
+ __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
+
+ __ bind(&one_is_nan);
+ // NaN comparisons always fail.
+ // Load whatever we need in v0 to make the comparison fail.
+ if (cc == lt || cc == le) {
+ __ li(v0, Operand(GREATER));
+ } else {
+ __ li(v0, Operand(LESS));
+ }
+ __ Ret(); // Return.
+
+ __ bind(&neither_is_nan);
+}
+
+
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
+ // f12 and f14 have the two doubles. Neither is a NaN.
+ // Call a native function to do a comparison between two non-NaNs.
+ // Call C routine that may not cause GC or other trouble.
+ // We use a call_was and return manually because we need arguments slots to
+ // be freed.
+
+ Label return_result_not_equal, return_result_equal;
+ if (cc == eq) {
+ // Doubles are not equal unless they have the same bit pattern.
+ // Exception: 0 and -0.
+ bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Lhs and rhs are already loaded to f12 and f14 register pairs.
+ __ Move(t0, t1, f14);
+ __ Move(t2, t3, f12);
+ } else {
+ // Lhs and rhs are already loaded to GP registers.
+ __ mov(t0, a0); // a0 has LS 32 bits of rhs.
+ __ mov(t1, a1); // a1 has MS 32 bits of rhs.
+ __ mov(t2, a2); // a2 has LS 32 bits of lhs.
+ __ mov(t3, a3); // a3 has MS 32 bits of lhs.
+ }
+ Register rhs_exponent = exp_first ? t0 : t1;
+ Register lhs_exponent = exp_first ? t2 : t3;
+ Register rhs_mantissa = exp_first ? t1 : t0;
+ Register lhs_mantissa = exp_first ? t3 : t2;
+
+ __ xor_(v0, rhs_mantissa, lhs_mantissa);
+ __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
+
+ __ subu(v0, rhs_exponent, lhs_exponent);
+ __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
+ // 0, -0 case.
+ __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
+ __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
+ __ or_(t4, rhs_exponent, lhs_exponent);
+ __ or_(t4, t4, rhs_mantissa);
+
+ __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
+
+ __ bind(&return_result_equal);
+ __ li(v0, Operand(EQUAL));
+ __ Ret();
+ }
+
+ __ bind(&return_result_not_equal);
+
+ if (!CpuFeatures::IsSupported(FPU)) {
+ __ push(ra);
+ __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments.
+ if (!IsMipsSoftFloatABI) {
+ // We are not using MIPS FPU instructions, and parameters for the runtime
+ // function call are prepaired in a0-a3 registers, but function we are
+ // calling is compiled with hard-float flag and expecting hard float ABI
+ // (parameters in f12/f14 registers). We need to copy parameters from
+ // a0-a3 registers to f12/f14 register pairs.
+ __ Move(f12, a0, a1);
+ __ Move(f14, a2, a3);
+ }
+ __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
+ __ pop(ra); // Because this function returns int, result is in v0.
+ __ Ret();
+ } else {
+ CpuFeatures::Scope scope(FPU);
+ Label equal, less_than;
+ __ c(EQ, D, f12, f14);
+ __ bc1t(&equal);
+ __ nop();
+
+ __ c(OLT, D, f12, f14);
+ __ bc1t(&less_than);
+ __ nop();
+
+ // Not equal, not less, not NaN, must be greater.
+ __ li(v0, Operand(GREATER));
+ __ Ret();
+
+ __ bind(&equal);
+ __ li(v0, Operand(EQUAL));
+ __ Ret();
+
+ __ bind(&less_than);
+ __ li(v0, Operand(LESS));
+ __ Ret();
+ }
+}
+
+
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+ Register lhs,
+ Register rhs) {
+ // If either operand is a JS object or an oddball value, then they are
+ // not equal since their pointers are different.
+ // There is no test for undetectability in strict equality.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ Label first_non_object;
+ // Get the type of the first operand into a2 and compare it with
+ // FIRST_SPEC_OBJECT_TYPE.
+ __ GetObjectType(lhs, a2, a2);
+ __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Return non-zero.
+ Label return_not_equal;
+ __ bind(&return_not_equal);
+ __ li(v0, Operand(1));
+ __ Ret();
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
+
+ __ GetObjectType(rhs, a3, a3);
+ __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Check for oddballs: true, false, null, undefined.
+ __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
+
+ // Now that we have the types we might as well check for symbol-symbol.
+ // Ensure that no non-strings have the symbol bit set.
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ And(t2, a2, Operand(a3));
+ __ And(t0, t2, Operand(kIsSymbolMask));
+ __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
+}
+
+
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* both_loaded_as_doubles,
+ Label* not_heap_numbers,
+ Label* slow) {
+ __ GetObjectType(lhs, a3, a2);
+ __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
+ __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ // If first was a heap number & second wasn't, go to slow case.
+ __ Branch(slow, ne, a3, Operand(a2));
+
+ // Both are heap numbers. Load them up then jump to the code we have
+ // for that.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ } else {
+ __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
+ if (rhs.is(a0)) {
+ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ } else {
+ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+ }
+ }
+ __ jmp(both_loaded_as_doubles);
+}
+
+
+// Fast negative check for symbol-to-symbol equality.
+static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* possible_strings,
+ Label* not_both_strings) {
+ ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+ (lhs.is(a1) && rhs.is(a0)));
+
+ // a2 is object type of lhs.
+ // Ensure that no non-strings have the symbol bit set.
+ Label object_test;
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ And(at, a2, Operand(kIsNotStringMask));
+ __ Branch(&object_test, ne, at, Operand(zero_reg));
+ __ And(at, a2, Operand(kIsSymbolMask));
+ __ Branch(possible_strings, eq, at, Operand(zero_reg));
+ __ GetObjectType(rhs, a3, a3);
+ __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
+ __ And(at, a3, Operand(kIsSymbolMask));
+ __ Branch(possible_strings, eq, at, Operand(zero_reg));
+
+ // Both are symbols. We already checked they weren't the same pointer
+ // so they are not equal.
+ __ li(v0, Operand(1)); // Non-zero indicates not equal.
+ __ Ret();
+
+ __ bind(&object_test);
+ __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ GetObjectType(rhs, a2, a3);
+ __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // If both objects are undetectable, they are equal. Otherwise, they
+ // are not equal, since they are different objects and an object is not
+ // equal to undefined.
+ __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
+ __ and_(a0, a2, a3);
+ __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
+ __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
+ __ Ret();
+}
+
+
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ bool object_is_smi,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ __ sra(mask, mask, kSmiTagSize + 1);
+ __ Addu(mask, mask, -1); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Isolate* isolate = masm->isolate();
+ Label is_smi;
+ Label load_result_from_cache;
+ if (!object_is_smi) {
+ __ JumpIfSmi(object, &is_smi);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ Addu(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ __ lw(scratch2, MemOperand(scratch1, kPointerSize));
+ __ lw(scratch1, MemOperand(scratch1, 0));
+ __ Xor(scratch1, scratch1, Operand(scratch2));
+ __ And(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
+ __ Addu(scratch1, number_string_cache, scratch1);
+
+ Register probe = mask;
+ __ lw(probe,
+ FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ JumpIfSmi(probe, not_found);
+ __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ __ c(EQ, D, f12, f14);
+ __ bc1t(&load_result_from_cache);
+ __ nop(); // bc1t() requires explicit fill of branch delay slot.
+ __ Branch(not_found);
+ } else {
+ // Note that there is no cache check for non-FPU case, even though
+ // it seems there could be. May be a tiny opimization for non-FPU
+ // cores.
+ __ Branch(not_found);
+ }
+ }
+
+ __ bind(&is_smi);
+ Register scratch = scratch1;
+ __ sra(scratch, object, 1); // Shift away the tag.
+ __ And(scratch, mask, Operand(scratch));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ __ sll(scratch, scratch, kPointerSizeLog2 + 1);
+ __ Addu(scratch, number_string_cache, scratch);
+
+ // Check if the entry is the smi we are looking for.
+ Register probe = mask;
+ __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ __ Branch(not_found, ne, object, Operand(probe));
+
+ // Get the result from the cache.
+ __ bind(&load_result_from_cache);
+ __ lw(result,
+ FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+
+ __ IncrementCounter(isolate->counters()->number_to_string_native(),
+ 1,
+ scratch1,
+ scratch2);
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ __ lw(a1, MemOperand(sp, 0));
+
+ // Generate code to lookup number in the number string cache.
+ GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
+ __ Addu(sp, sp, Operand(1 * kPointerSize));
+ __ Ret();
+
+ __ bind(&runtime);
+ // Handle number to string in the runtime system if not found in the cache.
+ __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
+}
+
+
+// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
+// On exit, v0 is 0, positive, or negative (smi) to indicate the result
+// of the comparison.
+void CompareStub::Generate(MacroAssembler* masm) {
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles;
+
+
+ if (include_smi_compare_) {
+ Label not_two_smis, smi_done;
+ __ Or(a2, a1, a0);
+ __ JumpIfNotSmi(a2, &not_two_smis);
+ __ sra(a1, a1, 1);
+ __ sra(a0, a0, 1);
+ __ Subu(v0, a1, a0);
+ __ Ret();
+ __ bind(&not_two_smis);
+ } else if (FLAG_debug_code) {
+ __ Or(a2, a1, a0);
+ __ And(a2, a2, kSmiTagMask);
+ __ Assert(ne, "CompareStub: unexpected smi operands.",
+ a2, Operand(zero_reg));
+ }
+
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Handle the case where the objects are identical. Either returns the answer
+ // or goes to slow. Only falls through if the objects were not identical.
+ EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+
+ // If either is a Smi (we know that not both are), then they can only
+ // be strictly equal if the other is a HeapNumber.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ And(t2, lhs_, Operand(rhs_));
+ __ JumpIfNotSmi(t2, &not_smis, t0);
+ // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
+ // 1) Return the answer.
+ // 2) Go to slow.
+ // 3) Fall through to both_loaded_as_doubles.
+ // 4) Jump to rhs_not_nan.
+ // In cases 3 and 4 we have found out we were dealing with a number-number
+ // comparison and the numbers have been loaded into f12 and f14 as doubles,
+ // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
+ EmitSmiNonsmiComparison(masm, lhs_, rhs_,
+ &both_loaded_as_doubles, &slow, strict_);
+
+ __ bind(&both_loaded_as_doubles);
+ // f12, f14 are the double representations of the left hand side
+ // and the right hand side if we have FPU. Otherwise a2, a3 represent
+ // left hand side and a0, a1 represent right hand side.
+
+ Isolate* isolate = masm->isolate();
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ Label nan;
+ __ li(t0, Operand(LESS));
+ __ li(t1, Operand(GREATER));
+ __ li(t2, Operand(EQUAL));
+
+ // Check if either rhs or lhs is NaN.
+ __ c(UN, D, f12, f14);
+ __ bc1t(&nan);
+ __ nop();
+
+ // Check if LESS condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(OLT, D, f12, f14);
+ __ movt(v0, t0);
+ // Use previous check to store conditionally to v0 oposite condition
+ // (GREATER). If rhs is equal to lhs, this will be corrected in next
+ // check.
+ __ movf(v0, t1);
+ // Check if EQUAL condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(EQ, D, f12, f14);
+ __ movt(v0, t2);
+
+ __ Ret();
+
+ __ bind(&nan);
+ // NaN comparisons always fail.
+ // Load whatever we need in v0 to make the comparison fail.
+ if (cc_ == lt || cc_ == le) {
+ __ li(v0, Operand(GREATER));
+ } else {
+ __ li(v0, Operand(LESS));
+ }
+ __ Ret();
+ } else {
+ // Checks for NaN in the doubles we have loaded. Can return the answer or
+ // fall through if neither is a NaN. Also binds rhs_not_nan.
+ EmitNanCheck(masm, cc_);
+
+ // Compares two doubles that are not NaNs. Returns the answer.
+ // Never falls through.
+ EmitTwoNonNanDoubleComparison(masm, cc_);
+ }
+
+ __ bind(&not_smis);
+ // At this point we know we are dealing with two different objects,
+ // and neither of them is a Smi. The objects are in lhs_ and rhs_.
+ if (strict_) {
+ // This returns non-equal for some object types, or falls through if it
+ // was not lucky.
+ EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+ }
+
+ Label check_for_symbols;
+ Label flat_string_check;
+ // Check for heap-number-heap-number comparison. Can jump to slow case,
+ // or load both doubles and jump to the code that handles
+ // that case. If the inputs are not doubles then jumps to check_for_symbols.
+ // In this case a2 will contain the type of lhs_.
+ EmitCheckForTwoHeapNumbers(masm,
+ lhs_,
+ rhs_,
+ &both_loaded_as_doubles,
+ &check_for_symbols,
+ &flat_string_check);
+
+ __ bind(&check_for_symbols);
+ if (cc_ == eq && !strict_) {
+ // Returns an answer for two symbols or two detectable objects.
+ // Otherwise jumps to string case or not both strings case.
+ // Assumes that a2 is the type of lhs_ on entry.
+ EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+ }
+
+ // Check for both being sequential ASCII strings, and inline if that is the
+ // case.
+ __ bind(&flat_string_check);
+
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
+
+ __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
+ if (cc_ == eq) {
+ StringCompareStub::GenerateFlatAsciiStringEquals(masm,
+ lhs_,
+ rhs_,
+ a2,
+ a3,
+ t0);
+ } else {
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ lhs_,
+ rhs_,
+ a2,
+ a3,
+ t0,
+ t1);
+ }
+ // Never falls through to here.
+
+ __ bind(&slow);
+ // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
+ // a1 (rhs) second.
+ __ Push(lhs_, rhs_);
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript native;
+ if (cc_ == eq) {
+ native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ native = Builtins::COMPARE;
+ int ncr; // NaN compare result.
+ if (cc_ == lt || cc_ == le) {
+ ncr = GREATER;
+ } else {
+ ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
+ ncr = LESS;
+ }
+ __ li(a0, Operand(Smi::FromInt(ncr)));
+ __ push(a0);
+ }
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(native, JUMP_FUNCTION);
+}
+
+
+// The stub returns zero for false, and a non-zero value for true.
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ // This stub uses FPU instructions.
+ CpuFeatures::Scope scope(FPU);
+
+ Label false_result;
+ Label not_heap_number;
+ Register scratch0 = t5.is(tos_) ? t3 : t5;
+
+ // undefined -> false
+ __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&false_result, eq, tos_, Operand(scratch0));
+
+ // Boolean -> its value
+ __ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
+ __ Branch(&false_result, eq, tos_, Operand(scratch0));
+ __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
+ // "tos_" is a register and contains a non-zero value. Hence we implicitly
+ // return true if the equal condition is satisfied.
+ __ Ret(eq, tos_, Operand(scratch0));
+
+ // Smis: 0 -> false, all other -> true
+ __ And(scratch0, tos_, tos_);
+ __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
+ __ And(scratch0, tos_, Operand(kSmiTagMask));
+ // "tos_" is a register and contains a non-zero value. Hence we implicitly
+ // return true if the not equal condition is satisfied.
+ __ Ret(eq, scratch0, Operand(zero_reg));
+
+ // 'null' -> false
+ __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
+ __ Branch(&false_result, eq, tos_, Operand(scratch0));
+
+ // HeapNumber => false if +0, -0, or NaN.
+ __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(&not_heap_number, ne, scratch0, Operand(at));
+
+ __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+ __ fcmp(f12, 0.0, UEQ);
+
+ // "tos_" is a register, and contains a non zero value by default.
+ // Hence we only need to overwrite "tos_" with zero to return false for
+ // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+ __ movt(tos_, zero_reg);
+ __ Ret();
+
+ __ bind(&not_heap_number);
+
+ // It can be an undetectable object.
+ // Undetectable => false.
+ __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
+ __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
+ __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
+
+ // JavaScript object => true.
+ __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+
+ // "tos_" is a register and contains a non-zero value.
+ // Hence we implicitly return true if the greater than
+ // condition is satisfied.
+ __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Check for string.
+ __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+ __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+ // "tos_" is a register and contains a non-zero value.
+ // Hence we implicitly return true if the greater than
+ // condition is satisfied.
+ __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
+
+ // String value => false iff empty, i.e., length is zero.
+ __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+ // If length is zero, "tos_" contains zero ==> false.
+ // If length is not zero, "tos_" contains a non-zero value ==> true.
+ __ Ret();
+
+ // Return 0 in "tos_" for false.
+ __ bind(&false_result);
+ __ mov(tos_, zero_reg);
+ __ Ret();
+}
+
+
+const char* UnaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name = NULL; // Make g++ happy.
+ switch (mode_) {
+ case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "UnaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ UnaryOpIC::GetName(operand_type_));
+ return name_;
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::Generate(MacroAssembler* masm) {
+ switch (operand_type_) {
+ case UnaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case UnaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case UnaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case UnaryOpIC::GENERIC:
+ GenerateGenericStub(masm);
+ break;
+ }
+}
+
+
+void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ // Argument is in a0 and v0 at this point, so we can overwrite a0.
+ __ li(a2, Operand(Smi::FromInt(op_)));
+ __ li(a1, Operand(Smi::FromInt(mode_)));
+ __ li(a0, Operand(Smi::FromInt(operand_type_)));
+ __ Push(v0, a2, a1, a0);
+
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateSmiStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateSmiStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow);
+ __ bind(&non_smi);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+ Label non_smi;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+ Label* non_smi,
+ Label* slow) {
+ __ JumpIfNotSmi(a0, non_smi);
+
+ // The result of negating zero or the smallest negative smi is not a smi.
+ __ And(t0, a0, ~0x80000000);
+ __ Branch(slow, eq, t0, Operand(zero_reg));
+
+ // Return '0 - value'.
+ __ Subu(v0, zero_reg, a0);
+ __ Ret();
+}
+
+
+void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
+ Label* non_smi) {
+ __ JumpIfNotSmi(a0, non_smi);
+
+ // Flip bits and revert inverted smi-tag.
+ __ Neg(v0, a0);
+ __ And(v0, v0, ~kSmiTagMask);
+ __ Ret();
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateHeapNumberStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateHeapNumberStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+ Label non_smi, slow, call_builtin;
+ GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+ __ bind(&call_builtin);
+ GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+ Label* slow) {
+ EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
+ // a0 is a heap number. Get a new heap number in a1.
+ if (mode_ == UNARY_OVERWRITE) {
+ __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ } else {
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
+ __ push(a0);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(a1, v0);
+ __ pop(a0);
+ __ LeaveInternalFrame();
+
+ __ bind(&heapnumber_allocated);
+ __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+ __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
+ __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
+ __ mov(v0, a1);
+ }
+ __ Ret();
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeBitNot(
+ MacroAssembler* masm,
+ Label* slow) {
+ Label impossible;
+
+ EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
+ // Convert the heap number in a0 to an untagged integer in a1.
+ __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
+
+ // Do the bitwise operation and check if the result fits in a smi.
+ Label try_float;
+ __ Neg(a1, a1);
+ __ Addu(a2, a1, Operand(0x40000000));
+ __ Branch(&try_float, lt, a2, Operand(zero_reg));
+
+ // Tag the result as a smi and we're done.
+ __ SmiTag(v0, a1);
+ __ Ret();
+
+ // Try to store the result in a heap number.
+ __ bind(&try_float);
+ if (mode_ == UNARY_NO_OVERWRITE) {
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ // Allocate a new heap number without zapping v0, which we need if it fails.
+ __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
+ __ push(v0); // Push the heap number, not the untagged int32.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(a2, v0); // Move the new heap number into a2.
+ // Get the heap number into v0, now that the new heap number is in a2.
+ __ pop(v0);
+ __ LeaveInternalFrame();
+
+ // Convert the heap number in v0 to an untagged integer in a1.
+ // This can't go slow-case because it's the same number we already
+ // converted once again.
+ __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
+ // Negate the result.
+ __ Xor(a1, a1, -1);
+
+ __ bind(&heapnumber_allocated);
+ __ mov(v0, a2); // Move newly allocated heap number to v0.
+ }
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
+ CpuFeatures::Scope scope(FPU);
+ __ mtc1(a1, f0);
+ __ cvt_d_w(f0, f0);
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+ } else {
+ // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+ // have to set up a frame.
+ WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&impossible);
+ if (FLAG_debug_code) {
+ __ stop("Incorrect assumption in bit-not stub");
+ }
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateGenericStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateGenericStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericCodeFallback(
+ MacroAssembler* masm) {
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ push(a0);
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ Label get_result;
+
+ __ Push(a1, a0);
+
+ __ li(a2, Operand(Smi::FromInt(MinorKey())));
+ __ li(a1, Operand(Smi::FromInt(op_)));
+ __ li(a0, Operand(Smi::FromInt(operands_type_)));
+ __ Push(a2, a1, a0);
+
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
+ masm->isolate()),
+ 5,
+ 1);
+}
+
+
+void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+ MacroAssembler* masm) {
+ UNIMPLEMENTED();
+}
+
+
+void BinaryOpStub::Generate(MacroAssembler* masm) {
+ switch (operands_type_) {
+ case BinaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case BinaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case BinaryOpIC::INT32:
+ GenerateInt32Stub(masm);
+ break;
+ case BinaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case BinaryOpIC::ODDBALL:
+ GenerateOddballStub(masm);
+ break;
+ case BinaryOpIC::BOTH_STRING:
+ GenerateBothStringStub(masm);
+ break;
+ case BinaryOpIC::STRING:
+ GenerateStringStub(masm);
+ break;
+ case BinaryOpIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+const char* BinaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "BinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(operands_type_));
+ return name_;
+}
+
+
+
+void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
+ Register left = a1;
+ Register right = a0;
+
+ Register scratch1 = t0;
+ Register scratch2 = t1;
+
+ ASSERT(right.is(a0));
+ STATIC_ASSERT(kSmiTag == 0);
+
+ Label not_smi_result;
+ switch (op_) {
+ case Token::ADD:
+ __ AdduAndCheckForOverflow(v0, left, right, scratch1);
+ __ RetOnNoOverflow(scratch1);
+ // No need to revert anything - right and left are intact.
+ break;
+ case Token::SUB:
+ __ SubuAndCheckForOverflow(v0, left, right, scratch1);
+ __ RetOnNoOverflow(scratch1);
+ // No need to revert anything - right and left are intact.
+ break;
+ case Token::MUL: {
+ // Remove tag from one of the operands. This way the multiplication result
+ // will be a smi if it fits the smi range.
+ __ SmiUntag(scratch1, right);
+ // Do multiplication.
+ // lo = lower 32 bits of scratch1 * left.
+ // hi = higher 32 bits of scratch1 * left.
+ __ Mult(left, scratch1);
+ // Check for overflowing the smi range - no overflow if higher 33 bits of
+ // the result are identical.
+ __ mflo(scratch1);
+ __ mfhi(scratch2);
+ __ sra(scratch1, scratch1, 31);
+ __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
+ // Go slow on zero result to handle -0.
+ __ mflo(v0);
+ __ Ret(ne, v0, Operand(zero_reg));
+ // We need -0 if we were multiplying a negative number with 0 to get 0.
+ // We know one of them was zero.
+ __ Addu(scratch2, right, left);
+ Label skip;
+ // ARM uses the 'pl' condition, which is 'ge'.
+ // Negating it results in 'lt'.
+ __ Branch(&skip, lt, scratch2, Operand(zero_reg));
+ ASSERT(Smi::FromInt(0) == 0);
+ __ mov(v0, zero_reg);
+ __ Ret(); // Return smi 0 if the non-zero one was positive.
+ __ bind(&skip);
+ // We fall through here if we multiplied a negative number with 0, because
+ // that would mean we should produce -0.
+ }
+ break;
+ case Token::DIV: {
+ Label done;
+ __ SmiUntag(scratch2, right);
+ __ SmiUntag(scratch1, left);
+ __ Div(scratch1, scratch2);
+ // A minor optimization: div may be calculated asynchronously, so we check
+ // for division by zero before getting the result.
+ __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
+ // If the result is 0, we need to make sure the dividsor (right) is
+ // positive, otherwise it is a -0 case.
+ // Quotient is in 'lo', remainder is in 'hi'.
+ // Check for no remainder first.
+ __ mfhi(scratch1);
+ __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
+ __ mflo(scratch1);
+ __ Branch(&done, ne, scratch1, Operand(zero_reg));
+ __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+ __ bind(&done);
+ // Check that the signed result fits in a Smi.
+ __ Addu(scratch2, scratch1, Operand(0x40000000));
+ __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+ __ SmiTag(v0, scratch1);
+ __ Ret();
+ }
+ break;
+ case Token::MOD: {
+ Label done;
+ __ SmiUntag(scratch2, right);
+ __ SmiUntag(scratch1, left);
+ __ Div(scratch1, scratch2);
+ // A minor optimization: div may be calculated asynchronously, so we check
+ // for division by 0 before calling mfhi.
+ // Check for zero on the right hand side.
+ __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
+ // If the result is 0, we need to make sure the dividend (left) is
+ // positive (or 0), otherwise it is a -0 case.
+ // Remainder is in 'hi'.
+ __ mfhi(scratch2);
+ __ Branch(&done, ne, scratch2, Operand(zero_reg));
+ __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
+ __ bind(&done);
+ // Check that the signed result fits in a Smi.
+ __ Addu(scratch1, scratch2, Operand(0x40000000));
+ __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
+ __ SmiTag(v0, scratch2);
+ __ Ret();
+ }
+ break;
+ case Token::BIT_OR:
+ __ Or(v0, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::BIT_AND:
+ __ And(v0, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::BIT_XOR:
+ __ Xor(v0, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::SAR:
+ // Remove tags from right operand.
+ __ GetLeastBitsFromSmi(scratch1, right, 5);
+ __ srav(scratch1, left, scratch1);
+ // Smi tag result.
+ __ And(v0, scratch1, Operand(~kSmiTagMask));
+ __ Ret();
+ break;
+ case Token::SHR:
+ // Remove tags from operands. We can't do this on a 31 bit number
+ // because then the 0s get shifted into bit 30 instead of bit 31.
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ srlv(v0, scratch1, scratch2);
+ // Unsigned shift is not allowed to produce a negative number, so
+ // check the sign bit and the sign bit after Smi tagging.
+ __ And(scratch1, v0, Operand(0xc0000000));
+ __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
+ // Smi tag result.
+ __ SmiTag(v0);
+ __ Ret();
+ break;
+ case Token::SHL:
+ // Remove tags from operands.
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ sllv(scratch1, scratch1, scratch2);
+ // Check that the signed result fits in a Smi.
+ __ Addu(scratch2, scratch1, Operand(0x40000000));
+ __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+ __ SmiTag(v0, scratch1);
+ __ Ret();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ bind(&not_smi_result);
+}
+
+
+void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required) {
+ Register left = a1;
+ Register right = a0;
+ Register scratch1 = t3;
+ Register scratch2 = t5;
+ Register scratch3 = t0;
+
+ ASSERT(smi_operands || (not_numbers != NULL));
+ if (smi_operands && FLAG_debug_code) {
+ __ AbortIfNotSmi(left);
+ __ AbortIfNotSmi(right);
+ }
+
+ Register heap_number_map = t2;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
+ // depending on whether FPU is available or not.
+ FloatingPointHelper::Destination destination =
+ CpuFeatures::IsSupported(FPU) &&
+ op_ != Token::MOD ?
+ FloatingPointHelper::kFPURegisters :
+ FloatingPointHelper::kCoreRegisters;
+
+ // Allocate new heap number for result.
+ Register result = s0;
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
+
+ // Load the operands.
+ if (smi_operands) {
+ FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
+ } else {
+ FloatingPointHelper::LoadOperands(masm,
+ destination,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ not_numbers);
+ }
+
+ // Calculate the result.
+ if (destination == FloatingPointHelper::kFPURegisters) {
+ // Using FPU registers:
+ // f12: Left value.
+ // f14: Right value.
+ CpuFeatures::Scope scope(FPU);
+ switch (op_) {
+ case Token::ADD:
+ __ add_d(f10, f12, f14);
+ break;
+ case Token::SUB:
+ __ sub_d(f10, f12, f14);
+ break;
+ case Token::MUL:
+ __ mul_d(f10, f12, f14);
+ break;
+ case Token::DIV:
+ __ div_d(f10, f12, f14);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // ARM uses a workaround here because of the unaligned HeapNumber
+ // kValueOffset. On MIPS this workaround is built into sdc1 so
+ // there's no point in generating even more instructions.
+ __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
+ __ mov(v0, result);
+ __ Ret();
+ } else {
+ // Call the C function to handle the double operation.
+ FloatingPointHelper::CallCCodeForDoubleOperation(masm,
+ op_,
+ result,
+ scratch1);
+ if (FLAG_debug_code) {
+ __ stop("Unreachable code.");
+ }
+ }
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::SHL: {
+ if (smi_operands) {
+ __ SmiUntag(a3, left);
+ __ SmiUntag(a2, right);
+ } else {
+ // Convert operands to 32-bit integers. Right in a2 and left in a3.
+ FloatingPointHelper::ConvertNumberToInt32(masm,
+ left,
+ a3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ f0,
+ not_numbers);
+ FloatingPointHelper::ConvertNumberToInt32(masm,
+ right,
+ a2,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ f0,
+ not_numbers);
+ }
+ Label result_not_a_smi;
+ switch (op_) {
+ case Token::BIT_OR:
+ __ Or(a2, a3, Operand(a2));
+ break;
+ case Token::BIT_XOR:
+ __ Xor(a2, a3, Operand(a2));
+ break;
+ case Token::BIT_AND:
+ __ And(a2, a3, Operand(a2));
+ break;
+ case Token::SAR:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(a2, a2, 5);
+ __ srav(a2, a3, a2);
+ break;
+ case Token::SHR:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(a2, a2, 5);
+ __ srlv(a2, a3, a2);
+ // SHR is special because it is required to produce a positive answer.
+ // The code below for writing into heap numbers isn't capable of
+ // writing the register as an unsigned int so we go to slow case if we
+ // hit this case.
+ if (CpuFeatures::IsSupported(FPU)) {
+ __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
+ } else {
+ __ Branch(not_numbers, lt, a2, Operand(zero_reg));
+ }
+ break;
+ case Token::SHL:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(a2, a2, 5);
+ __ sllv(a2, a3, a2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ // Check that the *signed* result fits in a smi.
+ __ Addu(a3, a2, Operand(0x40000000));
+ __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
+ __ SmiTag(v0, a2);
+ __ Ret();
+
+ // Allocate new heap number for result.
+ __ bind(&result_not_a_smi);
+ Register result = t1;
+ if (smi_operands) {
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ } else {
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ }
+
+ // a2: Answer as signed int32.
+ // t1: Heap number to write answer into.
+
+ // Nothing can go wrong now, so move the heap number to v0, which is the
+ // result.
+ __ mov(v0, t1);
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ // Convert the int32 in a2 to the heap number in a0. As
+ // mentioned above SHR needs to always produce a positive result.
+ CpuFeatures::Scope scope(FPU);
+ __ mtc1(a2, f0);
+ if (op_ == Token::SHR) {
+ __ Cvt_d_uw(f0, f0);
+ } else {
+ __ cvt_d_w(f0, f0);
+ }
+ // ARM uses a workaround here because of the unaligned HeapNumber
+ // kValueOffset. On MIPS this workaround is built into sdc1 so
+ // there's no point in generating even more instructions.
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+ } else {
+ // Tail call that writes the int32 in a2 to the heap number in v0, using
+ // a3 and a0 as scratch. v0 is preserved and returned.
+ WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
+ __ TailCallStub(&stub);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+// Generate the smi code. If the operation on smis are successful this return is
+// generated. If the result is not a smi and heap number allocation is not
+// requested the code falls through. If number allocation is requested but a
+// heap number cannot be allocated the code jumps to the lable gc_required.
+void BinaryOpStub::GenerateSmiCode(
+ MacroAssembler* masm,
+ Label* use_runtime,
+ Label* gc_required,
+ SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ Label not_smis;
+
+ Register left = a1;
+ Register right = a0;
+ Register scratch1 = t3;
+ Register scratch2 = t5;
+
+ // Perform combined smi check on both operands.
+ __ Or(scratch1, left, Operand(right));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfNotSmi(scratch1, &not_smis);
+
+ // If the smi-smi operation results in a smi return is generated.
+ GenerateSmiSmiOperation(masm);
+
+ // If heap number results are possible generate the result in an allocated
+ // heap number.
+ if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
+ GenerateFPOperation(masm, true, use_runtime, gc_required);
+ }
+ __ bind(&not_smis);
+}
+
+
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ Label not_smis, call_runtime;
+
+ if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+ result_type_ == BinaryOpIC::SMI) {
+ // Only allow smi results.
+ GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
+ } else {
+ // Allow heap number result and don't make a transition if a heap number
+ // cannot be allocated.
+ GenerateSmiCode(masm,
+ &call_runtime,
+ &call_runtime,
+ ALLOW_HEAPNUMBER_RESULTS);
+ }
+
+ // Code falls through if the result is not returned as either a smi or heap
+ // number.
+ GenerateTypeTransition(masm);
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
+}
+
+
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == BinaryOpIC::STRING);
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // BinaryOpIC type.
+ GenerateAddStrings(masm);
+ GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+ Label call_runtime;
+ ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(op_ == Token::ADD);
+ // If both arguments are strings, call the string add stub.
+ // Otherwise, do a transition.
+
+ // Registers containing left and right operands respectively.
+ Register left = a1;
+ Register right = a0;
+
+ // Test if left operand is a string.
+ __ JumpIfSmi(left, &call_runtime);
+ __ GetObjectType(left, a2, a2);
+ __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+ // Test if right operand is a string.
+ __ JumpIfSmi(right, &call_runtime);
+ __ GetObjectType(right, a2, a2);
+ __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&call_runtime);
+ GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == BinaryOpIC::INT32);
+
+ Register left = a1;
+ Register right = a0;
+ Register scratch1 = t3;
+ Register scratch2 = t5;
+ FPURegister double_scratch = f0;
+ FPURegister single_scratch = f6;
+
+ Register heap_number_result = no_reg;
+ Register heap_number_map = t2;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ Label call_runtime;
+ // Labels for type transition, used for wrong input or output types.
+ // Both label are currently actually bound to the same position. We use two
+ // different label to differentiate the cause leading to type transition.
+ Label transition;
+
+ // Smi-smi fast case.
+ Label skip;
+ __ Or(scratch1, left, right);
+ __ JumpIfNotSmi(scratch1, &skip);
+ GenerateSmiSmiOperation(masm);
+ // Fall through if the result is not a smi.
+ __ bind(&skip);
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ // Load both operands and check that they are 32-bit integer.
+ // Jump to type transition if they are not. The registers a0 and a1 (right
+ // and left) are preserved for the runtime call.
+ FloatingPointHelper::Destination destination =
+ (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
+ ? FloatingPointHelper::kFPURegisters
+ : FloatingPointHelper::kCoreRegisters;
+
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ right,
+ destination,
+ f14,
+ a2,
+ a3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ f2,
+ &transition);
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ left,
+ destination,
+ f12,
+ t0,
+ t1,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ f2,
+ &transition);
+
+ if (destination == FloatingPointHelper::kFPURegisters) {
+ CpuFeatures::Scope scope(FPU);
+ Label return_heap_number;
+ switch (op_) {
+ case Token::ADD:
+ __ add_d(f10, f12, f14);
+ break;
+ case Token::SUB:
+ __ sub_d(f10, f12, f14);
+ break;
+ case Token::MUL:
+ __ mul_d(f10, f12, f14);
+ break;
+ case Token::DIV:
+ __ div_d(f10, f12, f14);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (op_ != Token::DIV) {
+ // These operations produce an integer result.
+ // Try to return a smi if we can.
+ // Otherwise return a heap number if allowed, or jump to type
+ // transition.
+
+ // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+ // On MIPS a lot of things cannot be implemented the same way so right
+ // now it makes a lot more sense to just do things manually.
+
+ // Save FCSR.
+ __ cfc1(scratch1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
+ __ trunc_w_d(single_scratch, f10);
+ // Retrieve FCSR.
+ __ cfc1(scratch2, FCSR);
+ // Restore FCSR.
+ __ ctc1(scratch1, FCSR);
+
+ // Check for inexact conversion or exception.
+ __ And(scratch2, scratch2, kFCSRFlagMask);
+
+ if (result_type_ <= BinaryOpIC::INT32) {
+ // If scratch2 != 0, result does not fit in a 32-bit integer.
+ __ Branch(&transition, ne, scratch2, Operand(zero_reg));
+ }
+
+ // Check if the result fits in a smi.
+ __ mfc1(scratch1, single_scratch);
+ __ Addu(scratch2, scratch1, Operand(0x40000000));
+ // If not try to return a heap number.
+ __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
+ // Check for minus zero. Return heap number for minus zero.
+ Label not_zero;
+ __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
+ __ mfc1(scratch2, f11);
+ __ And(scratch2, scratch2, HeapNumber::kSignMask);
+ __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
+ __ bind(&not_zero);
+
+ // Tag the result and return.
+ __ SmiTag(v0, scratch1);
+ __ Ret();
+ } else {
+ // DIV just falls through to allocating a heap number.
+ }
+
+ __ bind(&return_heap_number);
+ // Return a heap number, or fall through to type transition or runtime
+ // call if we can't.
+ if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
+ : BinaryOpIC::INT32)) {
+ // We are using FPU registers so s0 is available.
+ heap_number_result = s0;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
+ __ mov(v0, heap_number_result);
+ __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+ }
+
+ // A DIV operation expecting an integer result falls through
+ // to type transition.
+
+ } else {
+ // We preserved a0 and a1 to be able to call runtime.
+ // Save the left value on the stack.
+ __ Push(t1, t0);
+
+ Label pop_and_call_runtime;
+
+ // Allocate a heap number to store the result.
+ heap_number_result = s0;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &pop_and_call_runtime);
+
+ // Load the left value from the value saved on the stack.
+ __ Pop(a1, a0);
+
+ // Call the C function to handle the double operation.
+ FloatingPointHelper::CallCCodeForDoubleOperation(
+ masm, op_, heap_number_result, scratch1);
+ if (FLAG_debug_code) {
+ __ stop("Unreachable code.");
+ }
+
+ __ bind(&pop_and_call_runtime);
+ __ Drop(2);
+ __ Branch(&call_runtime);
+ }
+
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::SHL: {
+ Label return_heap_number;
+ Register scratch3 = t1;
+ // Convert operands to 32-bit integers. Right in a2 and left in a3. The
+ // registers a0 and a1 (right and left) are preserved for the runtime
+ // call.
+ FloatingPointHelper::LoadNumberAsInt32(masm,
+ left,
+ a3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ f0,
+ &transition);
+ FloatingPointHelper::LoadNumberAsInt32(masm,
+ right,
+ a2,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ scratch3,
+ f0,
+ &transition);
+
+ // The ECMA-262 standard specifies that, for shift operations, only the
+ // 5 least significant bits of the shift value should be used.
+ switch (op_) {
+ case Token::BIT_OR:
+ __ Or(a2, a3, Operand(a2));
+ break;
+ case Token::BIT_XOR:
+ __ Xor(a2, a3, Operand(a2));
+ break;
+ case Token::BIT_AND:
+ __ And(a2, a3, Operand(a2));
+ break;
+ case Token::SAR:
+ __ And(a2, a2, Operand(0x1f));
+ __ srav(a2, a3, a2);
+ break;
+ case Token::SHR:
+ __ And(a2, a2, Operand(0x1f));
+ __ srlv(a2, a3, a2);
+ // SHR is special because it is required to produce a positive answer.
+ // We only get a negative result if the shift value (a2) is 0.
+ // This result cannot be respresented as a signed 32-bit integer, try
+ // to return a heap number if we can.
+ // The non FPU code does not support this special case, so jump to
+ // runtime if we don't support it.
+ if (CpuFeatures::IsSupported(FPU)) {
+ __ Branch((result_type_ <= BinaryOpIC::INT32)
+ ? &transition
+ : &return_heap_number,
+ lt,
+ a2,
+ Operand(zero_reg));
+ } else {
+ __ Branch((result_type_ <= BinaryOpIC::INT32)
+ ? &transition
+ : &call_runtime,
+ lt,
+ a2,
+ Operand(zero_reg));
+ }
+ break;
+ case Token::SHL:
+ __ And(a2, a2, Operand(0x1f));
+ __ sllv(a2, a3, a2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Check if the result fits in a smi.
+ __ Addu(scratch1, a2, Operand(0x40000000));
+ // If not try to return a heap number. (We know the result is an int32.)
+ __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
+ // Tag the result and return.
+ __ SmiTag(v0, a2);
+ __ Ret();
+
+ __ bind(&return_heap_number);
+ heap_number_result = t1;
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+
+ if (op_ != Token::SHR) {
+ // Convert the result to a floating point value.
+ __ mtc1(a2, double_scratch);
+ __ cvt_d_w(double_scratch, double_scratch);
+ } else {
+ // The result must be interpreted as an unsigned 32-bit integer.
+ __ mtc1(a2, double_scratch);
+ __ Cvt_d_uw(double_scratch, double_scratch);
+ }
+
+ // Store the result.
+ __ mov(v0, heap_number_result);
+ __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+ } else {
+ // Tail call that writes the int32 in a2 to the heap number in v0, using
+ // a3 and a1 as scratch. v0 is preserved and returned.
+ __ mov(a0, t1);
+ WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1);
+ __ TailCallStub(&stub);
+ }
+
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+
+ // We never expect DIV to yield an integer result, so we always generate
+ // type transition code for DIV operations expecting an integer result: the
+ // code will fall through to this type transition.
+ if (transition.is_linked() ||
+ ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
+ }
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
+}
+
+
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (op_ == Token::ADD) {
+ // Handle string addition here, because it is the only operation
+ // that does not do a ToNumber conversion on the operands.
+ GenerateAddStrings(masm);
+ }
+
+ // Convert oddball arguments to numbers.
+ Label check, done;
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&check, ne, a1, Operand(t0));
+ if (Token::IsBitOp(op_)) {
+ __ li(a1, Operand(Smi::FromInt(0)));
+ } else {
+ __ LoadRoot(a1, Heap::kNanValueRootIndex);
+ }
+ __ jmp(&done);
+ __ bind(&check);
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&done, ne, a0, Operand(t0));
+ if (Token::IsBitOp(op_)) {
+ __ li(a0, Operand(Smi::FromInt(0)));
+ } else {
+ __ LoadRoot(a0, Heap::kNanValueRootIndex);
+ }
+ __ bind(&done);
+
+ GenerateHeapNumberStub(masm);
+}
+
+
+void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+ Label call_runtime;
+ GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
+}
+
+
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+ Label call_runtime, call_string_add_or_runtime;
+
+ GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+ GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
+
+ __ bind(&call_string_add_or_runtime);
+ if (op_ == Token::ADD) {
+ GenerateAddStrings(masm);
+ }
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
+}
+
+
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+ ASSERT(op_ == Token::ADD);
+ Label left_not_string, call_runtime;
+
+ Register left = a1;
+ Register right = a0;
+
+ // Check if left argument is a string.
+ __ JumpIfSmi(left, &left_not_string);
+ __ GetObjectType(left, a2, a2);
+ __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+ StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_left_stub);
+
+ // Left operand is not a string, test right.
+ __ bind(&left_not_string);
+ __ JumpIfSmi(right, &call_runtime);
+ __ GetObjectType(right, a2, a2);
+ __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+ StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_right_stub);
+
+ // At least one argument is not a string.
+ __ bind(&call_runtime);
+}
+
+
+void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+ GenerateRegisterArgsPush(masm);
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void BinaryOpStub::GenerateHeapResultAllocation(
+ MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+
+ // Code below will scratch result if allocation fails. To keep both arguments
+ // intact for the runtime call result cannot be one of these.
+ ASSERT(!result.is(a0) && !result.is(a1));
+
+ if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
+ Label skip_allocation, allocated;
+ Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
+ // If the overwritable operand is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
+ // Allocate a heap number for the result.
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ __ Branch(&allocated);
+ __ bind(&skip_allocation);
+ // Use object holding the overwritable operand for result.
+ __ mov(result, overwritable_operand);
+ __ bind(&allocated);
+ } else {
+ ASSERT(mode_ == NO_OVERWRITE);
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ }
+}
+
+
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ Push(a1, a0);
+}
+
+
+
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+ // Untagged case: double input in f4, double result goes
+ // into f4.
+ // Tagged case: tagged input on top of stack and in a0,
+ // tagged result (heap number) goes into v0.
+
+ Label input_not_smi;
+ Label loaded;
+ Label calculate;
+ Label invalid_cache;
+ const Register scratch0 = t5;
+ const Register scratch1 = t3;
+ const Register cache_entry = a0;
+ const bool tagged = (argument_type_ == TAGGED);
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+
+ if (tagged) {
+ // Argument is a number and is on stack and in a0.
+ // Load argument and check if it is a smi.
+ __ JumpIfNotSmi(a0, &input_not_smi);
+
+ // Input is a smi. Convert to double and load the low and high words
+ // of the double into a2, a3.
+ __ sra(t0, a0, kSmiTagSize);
+ __ mtc1(t0, f4);
+ __ cvt_d_w(f4, f4);
+ __ Move(a2, a3, f4);
+ __ Branch(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ CheckMap(a0,
+ a1,
+ Heap::kHeapNumberMapRootIndex,
+ &calculate,
+ DONT_DO_SMI_CHECK);
+ // Input is a HeapNumber. Store the
+ // low and high words into a2, a3.
+ __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
+ __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
+ } else {
+ // Input is untagged double in f4. Output goes to f4.
+ __ Move(a2, a3, f4);
+ }
+ __ bind(&loaded);
+ // a2 = low 32 bits of double value.
+ // a3 = high 32 bits of double value.
+ // Compute hash (the shifts are arithmetic):
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ Xor(a1, a2, a3);
+ __ sra(t0, a1, 16);
+ __ Xor(a1, a1, t0);
+ __ sra(t0, a1, 8);
+ __ Xor(a1, a1, t0);
+ ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+ __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
+
+ // a2 = low 32 bits of double value.
+ // a3 = high 32 bits of double value.
+ // a1 = TranscendentalCache::hash(double value).
+ __ li(cache_entry, Operand(
+ ExternalReference::transcendental_cache_array_address(
+ masm->isolate())));
+ // a0 points to cache array.
+ __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
+ Isolate::Current()->transcendental_cache()->caches_[0])));
+ // a0 points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
+
+#ifdef DEBUG
+ // Check that the layout of cache elements match expectations.
+ { TranscendentalCache::SubCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
+#endif
+
+ // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
+ __ sll(t0, a1, 1);
+ __ Addu(a1, a1, t0);
+ __ sll(t0, a1, 2);
+ __ Addu(cache_entry, cache_entry, t0);
+
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ __ lw(t0, MemOperand(cache_entry, 0));
+ __ lw(t1, MemOperand(cache_entry, 4));
+ __ lw(t2, MemOperand(cache_entry, 8));
+ __ Addu(cache_entry, cache_entry, 12);
+ __ Branch(&calculate, ne, a2, Operand(t0));
+ __ Branch(&calculate, ne, a3, Operand(t1));
+ // Cache hit. Load result, cleanup and return.
+ if (tagged) {
+ // Pop input value from stack and load result into v0.
+ __ Drop(1);
+ __ mov(v0, t2);
+ } else {
+ // Load result into f4.
+ __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
+ }
+ __ Ret();
+ } // if (CpuFeatures::IsSupported(FPU))
+
+ __ bind(&calculate);
+ if (tagged) {
+ __ bind(&invalid_cache);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
+ masm->isolate()),
+ 1,
+ 1);
+ } else {
+ if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
+ CpuFeatures::Scope scope(FPU);
+
+ Label no_update;
+ Label skip_cache;
+ const Register heap_number_map = t2;
+
+ // Call C function to calculate the result and update the cache.
+ // Register a0 holds precalculated cache entry address; preserve
+ // it on the stack and pop it into register cache_entry after the
+ // call.
+ __ push(cache_entry);
+ GenerateCallCFunction(masm, scratch0);
+ __ GetCFunctionDoubleResult(f4);
+
+ // Try to update the cache. If we cannot allocate a
+ // heap number, we return the result without updating.
+ __ pop(cache_entry);
+ __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
+ __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
+
+ __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
+ __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
+ __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
+
+ __ mov(v0, cache_entry);
+ __ Ret();
+
+ __ bind(&invalid_cache);
+ // The cache is invalid. Call runtime which will recreate the
+ // cache.
+ __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
+ __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
+ __ EnterInternalFrame();
+ __ push(a0);
+ __ CallRuntime(RuntimeFunction(), 1);
+ __ LeaveInternalFrame();
+ __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+
+ __ bind(&skip_cache);
+ // Call C function to calculate the result and answer directly
+ // without updating the cache.
+ GenerateCallCFunction(masm, scratch0);
+ __ GetCFunctionDoubleResult(f4);
+ __ bind(&no_update);
+
+ // We return the value in f4 without adding it to the cache, but
+ // we cause a scavenging GC so that future allocations will succeed.
+ __ EnterInternalFrame();
+
+ // Allocate an aligned object larger than a HeapNumber.
+ ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+ __ li(scratch0, Operand(4 * kPointerSize));
+ __ push(scratch0);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ __ LeaveInternalFrame();
+ __ Ret();
+ }
+}
+
+
+void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
+ Register scratch) {
+ __ push(ra);
+ __ PrepareCallCFunction(2, scratch);
+ if (IsMipsSoftFloatABI) {
+ __ Move(v0, v1, f4);
+ } else {
+ __ mov_d(f12, f4);
+ }
+ switch (type_) {
+ case TranscendentalCache::SIN:
+ __ CallCFunction(
+ ExternalReference::math_sin_double_function(masm->isolate()), 2);
+ break;
+ case TranscendentalCache::COS:
+ __ CallCFunction(
+ ExternalReference::math_cos_double_function(masm->isolate()), 2);
+ break;
+ case TranscendentalCache::LOG:
+ __ CallCFunction(
+ ExternalReference::math_log_double_function(masm->isolate()), 2);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ __ pop(ra);
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+ switch (type_) {
+ // Add more cases when necessary.
+ case TranscendentalCache::SIN: return Runtime::kMath_sin;
+ case TranscendentalCache::COS: return Runtime::kMath_cos;
+ case TranscendentalCache::LOG: return Runtime::kMath_log;
+ default:
+ UNIMPLEMENTED();
+ return Runtime::kAbort;
+ }
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
+}
+
+
+void MathPowStub::Generate(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+
+ Label base_not_smi;
+ Label exponent_not_smi;
+ Label convert_exponent;
+
+ const Register base = a0;
+ const Register exponent = a2;
+ const Register heapnumbermap = t1;
+ const Register heapnumber = s0; // Callee-saved register.
+ const Register scratch = t2;
+ const Register scratch2 = t3;
+
+ // Alocate FP values in the ABI-parameter-passing regs.
+ const DoubleRegister double_base = f12;
+ const DoubleRegister double_exponent = f14;
+ const DoubleRegister double_result = f0;
+ const DoubleRegister double_scratch = f2;
+
+ __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+ __ lw(base, MemOperand(sp, 1 * kPointerSize));
+ __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
+
+ // Convert base to double value and store it in f0.
+ __ JumpIfNotSmi(base, &base_not_smi);
+ // Base is a Smi. Untag and convert it.
+ __ SmiUntag(base);
+ __ mtc1(base, double_scratch);
+ __ cvt_d_w(double_base, double_scratch);
+ __ Branch(&convert_exponent);
+
+ __ bind(&base_not_smi);
+ __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
+ __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+ // Base is a heapnumber. Load it into double register.
+ __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+
+ __ bind(&convert_exponent);
+ __ JumpIfNotSmi(exponent, &exponent_not_smi);
+ __ SmiUntag(exponent);
+
+ // The base is in a double register and the exponent is
+ // an untagged smi. Allocate a heap number and call a
+ // C function for integer exponents. The register containing
+ // the heap number is callee-saved.
+ __ AllocateHeapNumber(heapnumber,
+ scratch,
+ scratch2,
+ heapnumbermap,
+ &call_runtime);
+ __ push(ra);
+ __ PrepareCallCFunction(3, scratch);
+ __ SetCallCDoubleArguments(double_base, exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(masm->isolate()), 3);
+ __ pop(ra);
+ __ GetCFunctionDoubleResult(double_result);
+ __ sdc1(double_result,
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+ __ mov(v0, heapnumber);
+ __ DropAndRet(2 * kPointerSize);
+
+ __ bind(&exponent_not_smi);
+ __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
+ __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+ // Exponent is a heapnumber. Load it into double register.
+ __ ldc1(double_exponent,
+ FieldMemOperand(exponent, HeapNumber::kValueOffset));
+
+ // The base and the exponent are in double registers.
+ // Allocate a heap number and call a C function for
+ // double exponents. The register containing
+ // the heap number is callee-saved.
+ __ AllocateHeapNumber(heapnumber,
+ scratch,
+ scratch2,
+ heapnumbermap,
+ &call_runtime);
+ __ push(ra);
+ __ PrepareCallCFunction(4, scratch);
+ // ABI (o32) for func(double a, double b): a in f12, b in f14.
+ ASSERT(double_base.is(f12));
+ ASSERT(double_exponent.is(f14));
+ __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()), 4);
+ __ pop(ra);
+ __ GetCFunctionDoubleResult(double_result);
+ __ sdc1(double_result,
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+ __ mov(v0, heapnumber);
+ __ DropAndRet(2 * kPointerSize);
+ }
+
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+}
+
+
+bool CEntryStub::NeedsImmovableCode() {
+ return true;
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ __ Throw(v0);
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+ UncatchableExceptionType type) {
+ __ ThrowUncatchable(type, v0);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_termination_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool always_allocate) {
+ // v0: result parameter for PerformGC, if any
+ // s0: number of arguments including receiver (C callee-saved)
+ // s1: pointer to the first argument (C callee-saved)
+ // s2: pointer to builtin function (C callee-saved)
+
+ if (do_gc) {
+ // Move result passed in v0 into a0 to call PerformGC.
+ __ mov(a0, v0);
+ __ PrepareCallCFunction(1, a1);
+ __ CallCFunction(
+ ExternalReference::perform_gc_function(masm->isolate()), 1);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
+ if (always_allocate) {
+ __ li(a0, Operand(scope_depth));
+ __ lw(a1, MemOperand(a0));
+ __ Addu(a1, a1, Operand(1));
+ __ sw(a1, MemOperand(a0));
+ }
+
+ // Prepare arguments for C routine: a0 = argc, a1 = argv
+ __ mov(a0, s0);
+ __ mov(a1, s1);
+
+ // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
+ // also need to reserve the 4 argument slots on the stack.
+
+ __ AssertStackIsAligned();
+
+ __ li(a2, Operand(ExternalReference::isolate_address()));
+
+ // To let the GC traverse the return address of the exit frames, we need to
+ // know where the return address is. The CEntryStub is unmovable, so
+ // we can store the address on the stack to be able to find it again and
+ // we never have to restore it, because it will not change.
+ { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+ // This branch-and-link sequence is needed to find the current PC on mips,
+ // saved to the ra register.
+ // Use masm-> here instead of the double-underscore macro since extra
+ // coverage code can interfere with the proper calculation of ra.
+ Label find_ra;
+ masm->bal(&find_ra); // bal exposes branch delay slot.
+ masm->nop(); // Branch delay slot nop.
+ masm->bind(&find_ra);
+
+ // Adjust the value in ra to point to the correct return location, 2nd
+ // instruction past the real call into C code (the jalr(t9)), and push it.
+ // This is the return address of the exit frame.
+ const int kNumInstructionsToJump = 6;
+ masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
+ masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
+ masm->Subu(sp, sp, StandardFrameConstants::kCArgsSlotsSize);
+ // Stack is still aligned.
+
+ // Call the C routine.
+ masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
+ masm->jalr(t9);
+ masm->nop(); // Branch delay slot nop.
+ // Make sure the stored 'ra' points to this position.
+ ASSERT_EQ(kNumInstructionsToJump,
+ masm->InstructionsGeneratedSince(&find_ra));
+ }
+
+ // Restore stack (remove arg slots).
+ __ Addu(sp, sp, StandardFrameConstants::kCArgsSlotsSize);
+
+ if (always_allocate) {
+ // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
+ __ li(a2, Operand(scope_depth));
+ __ lw(a3, MemOperand(a2));
+ __ Subu(a3, a3, Operand(1));
+ __ sw(a3, MemOperand(a2));
+ }
+
+ // Check for failure result.
+ Label failure_returned;
+ STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+ __ addiu(a2, v0, 1);
+ __ andi(t0, a2, kFailureTagMask);
+ __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
+
+ // Exit C frame and return.
+ // v0:v1: result
+ // sp: stack pointer
+ // fp: frame pointer
+ __ LeaveExitFrame(save_doubles_, s0);
+ __ Ret();
+
+ // Check if we should retry or throw exception.
+ Label retry;
+ __ bind(&failure_returned);
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
+ __ Branch(&retry, eq, t0, Operand(zero_reg));
+
+ // Special handling of out of memory exceptions.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ __ Branch(throw_out_of_memory_exception, eq,
+ v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+
+ // Retrieve the pending exception and clear the variable.
+ __ li(t0,
+ Operand(ExternalReference::the_hole_value_location(masm->isolate())));
+ __ lw(a3, MemOperand(t0));
+ __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ masm->isolate())));
+ __ lw(v0, MemOperand(t0));
+ __ sw(a3, MemOperand(t0));
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ __ Branch(throw_termination_exception, eq,
+ v0, Operand(masm->isolate()->factory()->termination_exception()));
+
+ // Handle normal exception.
+ __ jmp(throw_normal_exception);
+
+ __ bind(&retry);
+ // Last failure (v0) will be moved to (a0) for parameter when retrying.
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ // Called from JavaScript; parameters are on stack as if calling JS function
+ // a0: number of arguments including receiver
+ // a1: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+
+ // NOTE: Invocations of builtins may return failure objects
+ // instead of a proper result. The builtin entry handles
+ // this by performing a garbage collection and retrying the
+ // builtin once.
+
+ // Compute the argv pointer in a callee-saved register.
+ __ sll(s1, a0, kPointerSizeLog2);
+ __ Addu(s1, sp, s1);
+ __ Subu(s1, s1, Operand(kPointerSize));
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame(save_doubles_);
+
+ // Setup argc and the builtin function in callee-saved registers.
+ __ mov(s0, a0);
+ __ mov(s2, a1);
+
+ // s0: number of arguments (C callee-saved)
+ // s1: pointer to first argument (C callee-saved)
+ // s2: pointer to builtin function (C callee-saved)
+
+ Label throw_normal_exception;
+ Label throw_termination_exception;
+ Label throw_out_of_memory_exception;
+
+ // Call into the runtime system.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ false,
+ false);
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ Failure* failure = Failure::InternalError();
+ __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_termination_exception,
+ &throw_out_of_memory_exception,
+ true,
+ true);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+ __ bind(&throw_termination_exception);
+ GenerateThrowUncatchable(masm, TERMINATION);
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ Label invoke, exit;
+
+ // Registers:
+ // a0: entry address
+ // a1: function
+ // a2: reveiver
+ // a3: argc
+ //
+ // Stack:
+ // 4 args slots
+ // args
+
+ // Save callee saved registers on the stack.
+ __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit());
+
+ // Load argv in s0 register.
+ __ lw(s0, MemOperand(sp, kNumCalleeSaved * kPointerSize +
+ StandardFrameConstants::kCArgsSlotsSize));
+
+ // We build an EntryFrame.
+ __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ __ li(t2, Operand(Smi::FromInt(marker)));
+ __ li(t1, Operand(Smi::FromInt(marker)));
+ __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address,
+ masm->isolate())));
+ __ lw(t0, MemOperand(t0));
+ __ Push(t3, t2, t1, t0);
+ // Setup frame pointer for the frame to be pushed.
+ __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Registers:
+ // a0: entry_address
+ // a1: function
+ // a2: reveiver_pointer
+ // a3: argc
+ // s0: argv
+ //
+ // Stack:
+ // caller fp |
+ // function slot | entry frame
+ // context slot |
+ // bad fp (0xff...f) |
+ // callee saved registers + ra
+ // 4 args slots
+ // args
+
+ #ifdef ENABLE_LOGGING_AND_PROFILING
+ // If this is the outermost JS call, set js_entry_sp value.
+ Label non_outermost_js;
+ ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
+ masm->isolate());
+ __ li(t1, Operand(ExternalReference(js_entry_sp)));
+ __ lw(t2, MemOperand(t1));
+ __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
+ __ sw(fp, MemOperand(t1));
+ __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ Label cont;
+ __ b(&cont);
+ __ nop(); // Branch delay slot nop.
+ __ bind(&non_outermost_js);
+ __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+ __ bind(&cont);
+ __ push(t0);
+ #endif
+
+ // Call a faked try-block that does the invoke.
+ __ bal(&invoke); // bal exposes branch delay slot.
+ __ nop(); // Branch delay slot nop.
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ // Coming in here the fp will be invalid because the PushTryHandler below
+ // sets it to 0 to signal the existence of the JSEntry frame.
+ __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ masm->isolate())));
+ __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
+ __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+ __ b(&exit); // b exposes branch delay slot.
+ __ nop(); // Branch delay slot nop.
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the bal(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+
+ // Clear any pending exceptions.
+ __ li(t0,
+ Operand(ExternalReference::the_hole_value_location(masm->isolate())));
+ __ lw(t1, MemOperand(t0));
+ __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ masm->isolate())));
+ __ sw(t1, MemOperand(t0));
+
+ // Invoke the function by calling through JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Registers:
+ // a0: entry_address
+ // a1: function
+ // a2: reveiver_pointer
+ // a3: argc
+ // s0: argv
+ //
+ // Stack:
+ // handler frame
+ // entry frame
+ // callee saved registers + ra
+ // 4 args slots
+ // args
+
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+ masm->isolate());
+ __ li(t0, Operand(construct_entry));
+ } else {
+ ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
+ __ li(t0, Operand(entry));
+ }
+ __ lw(t9, MemOperand(t0)); // Deref address.
+
+ // Call JSEntryTrampoline.
+ __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+ __ Call(t9);
+
+ // Unlink this frame from the handler chain.
+ __ PopTryHandler();
+
+ __ bind(&exit); // v0 holds result
+ #ifdef ENABLE_LOGGING_AND_PROFILING
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ pop(t1);
+ __ Branch(&non_outermost_js_2, ne, t1,
+ Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ li(t1, Operand(ExternalReference(js_entry_sp)));
+ __ sw(zero_reg, MemOperand(t1));
+ __ bind(&non_outermost_js_2);
+ #endif
+
+ // Restore the top frame descriptors from the stack.
+ __ pop(t1);
+ __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address,
+ masm->isolate())));
+ __ sw(t1, MemOperand(t0));
+
+ // Reset the stack to the callee saved registers.
+ __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Restore callee saved registers from the stack.
+ __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit());
+ // Return.
+ __ Jump(ra);
+}
+
+
+// Uses registers a0 to t0.
+// Expected input (depending on whether args are in registers or on the stack):
+// * object: a0 or at sp + 1 * kPointerSize.
+// * function: a1 or at sp.
+//
+// Inlined call site patching is a crankshaft-specific feature that is not
+// implemented on MIPS.
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // This is a crankshaft-specific feature that has not been implemented yet.
+ ASSERT(!HasCallSiteInlineCheck());
+ // Call site inlining and patching implies arguments in registers.
+ ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+ // ReturnTrueFalse is only implemented for inlined call sites.
+ ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
+
+ // Fixed register usage throughout the stub:
+ const Register object = a0; // Object (lhs).
+ Register map = a3; // Map of the object.
+ const Register function = a1; // Function (rhs).
+ const Register prototype = t0; // Prototype of the function.
+ const Register inline_site = t5;
+ const Register scratch = a2;
+
+ Label slow, loop, is_instance, is_not_instance, not_js_object;
+
+ if (!HasArgsInRegisters()) {
+ __ lw(object, MemOperand(sp, 1 * kPointerSize));
+ __ lw(function, MemOperand(sp, 0));
+ }
+
+ // Check that the left hand is a JS object and load map.
+ __ JumpIfSmi(object, &not_js_object);
+ __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
+
+ // If there is a call site cache don't look in the global cache, but do the
+ // real lookup and update the call site cache.
+ if (!HasCallSiteInlineCheck()) {
+ Label miss;
+ __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
+ __ Branch(&miss, ne, function, Operand(t1));
+ __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
+ __ Branch(&miss, ne, map, Operand(t1));
+ __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&miss);
+ }
+
+ // Get the prototype of the function.
+ __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+
+ // Check that the function prototype is a JS object.
+ __ JumpIfSmi(prototype, &slow);
+ __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+
+ // Update the global instanceof or call site inlined cache with the current
+ // map and function. The cached answer will be set when it is known below.
+ if (!HasCallSiteInlineCheck()) {
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+ } else {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ // Register mapping: a3 is object map and t0 is function prototype.
+ // Get prototype of object into a2.
+ __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+
+ // We don't need map any more. Use it as a scratch register.
+ Register scratch2 = map;
+ map = no_reg;
+
+ // Loop through the prototype chain looking for the function prototype.
+ __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
+ __ bind(&loop);
+ __ Branch(&is_instance, eq, scratch, Operand(prototype));
+ __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
+ __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
+ __ Branch(&loop);
+
+ __ bind(&is_instance);
+ ASSERT(Smi::FromInt(0) == 0);
+ if (!HasCallSiteInlineCheck()) {
+ __ mov(v0, zero_reg);
+ __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ UNIMPLEMENTED_MIPS();
+ }
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&is_not_instance);
+ if (!HasCallSiteInlineCheck()) {
+ __ li(v0, Operand(Smi::FromInt(1)));
+ __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ UNIMPLEMENTED_MIPS();
+ }
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ Label object_not_null, object_not_null_or_smi;
+ __ bind(&not_js_object);
+ // Before null, smi and string value checks, check that the rhs is a function
+ // as for a non-function rhs an exception needs to be thrown.
+ __ JumpIfSmi(function, &slow);
+ __ GetObjectType(function, scratch2, scratch);
+ __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+ // Null is not instance of anything.
+ __ Branch(&object_not_null, ne, scratch,
+ Operand(masm->isolate()->factory()->null_value()));
+ __ li(v0, Operand(Smi::FromInt(1)));
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&object_not_null);
+ // Smi values are not instances of anything.
+ __ JumpIfNotSmi(object, &object_not_null_or_smi);
+ __ li(v0, Operand(Smi::FromInt(1)));
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&object_not_null_or_smi);
+ // String values are not instances of anything.
+ __ IsObjectJSStringType(object, scratch, &slow);
+ __ li(v0, Operand(Smi::FromInt(1)));
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+ // Slow-case. Tail call builtin.
+ __ bind(&slow);
+ if (!ReturnTrueFalseObject()) {
+ if (HasArgsInRegisters()) {
+ __ Push(a0, a1);
+ }
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+ } else {
+ __ EnterInternalFrame();
+ __ Push(a0, a1);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ mov(a0, v0);
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+ }
+}
+
+
+Register InstanceofStub::left() { return a0; }
+
+
+Register InstanceofStub::right() { return a1; }
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The displacement is the offset of the last parameter (if any)
+ // relative to the frame pointer.
+ static const int kDisplacement =
+ StandardFrameConstants::kCallerSPOffset - kPointerSize;
+
+ // Check that the key is a smiGenerateReadElement.
+ Label slow;
+ __ JumpIfNotSmi(a1, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor,
+ eq,
+ a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Check index (a1) against formal parameters count limit passed in
+ // through register a0. Use unsigned comparison to get negative
+ // check for free.
+ __ Branch(&slow, hs, a1, Operand(a0));
+
+ // Read the argument from the stack and return it.
+ __ subu(a3, a0, a1);
+ __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a3, fp, Operand(t3));
+ __ lw(v0, MemOperand(a3, kDisplacement));
+ __ Ret();
+
+ // Arguments adaptor case: Check index (a1) against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
+
+ // Read the argument from the adaptor frame and return it.
+ __ subu(a3, a0, a1);
+ __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a3, a2, Operand(t3));
+ __ lw(v0, MemOperand(a3, kDisplacement));
+ __ Ret();
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ push(a1);
+ __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+ // sp[0] : number of parameters
+ // sp[4] : receiver displacement
+ // sp[8] : function
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
+ __ Branch(&runtime, ne,
+ a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Patch the arguments.length and the parameters pointer in the current frame.
+ __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sw(a2, MemOperand(sp, 0 * kPointerSize));
+ __ sll(t3, a2, 1);
+ __ Addu(a3, a3, Operand(t3));
+ __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
+ __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ // Stack layout:
+ // sp[0] : number of parameters (tagged)
+ // sp[4] : address of receiver argument
+ // sp[8] : function
+ // Registers used over whole function:
+ // t2 : allocated object (tagged)
+ // t5 : mapped parameter count (tagged)
+
+ __ lw(a1, MemOperand(sp, 0 * kPointerSize));
+ // a1 = parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame, eq, a2,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // No adaptor, parameter count = argument count.
+ __ mov(a2, a1);
+ __ b(&try_allocate);
+ __ nop(); // Branch delay slot nop.
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sll(t6, a2, 1);
+ __ Addu(a3, a3, Operand(t6));
+ __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+
+ // a1 = parameter count (tagged)
+ // a2 = argument count (tagged)
+ // Compute the mapped parameter count = min(a1, a2) in a1.
+ Label skip_min;
+ __ Branch(&skip_min, lt, a1, Operand(a2));
+ __ mov(a1, a2);
+ __ bind(&skip_min);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ Label param_map_size;
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
+ __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
+ __ sll(t5, a1, 1);
+ __ addiu(t5, t5, kParameterMapHeaderSize);
+ __ bind(&param_map_size);
+
+ // 2. Backing store.
+ __ sll(t6, a2, 1);
+ __ Addu(t5, t5, Operand(t6));
+ __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
+
+ // v0 = address of new object(s) (tagged)
+ // a2 = argument count (tagged)
+ // Get the arguments boilerplate from the current (global) context into t0.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
+
+ __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
+ Label skip2_ne, skip2_eq;
+ __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
+ __ lw(t0, MemOperand(t0, kNormalOffset));
+ __ bind(&skip2_ne);
+
+ __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
+ __ lw(t0, MemOperand(t0, kAliasedOffset));
+ __ bind(&skip2_eq);
+
+ // v0 = address of new object (tagged)
+ // a1 = mapped parameter count (tagged)
+ // a2 = argument count (tagged)
+ // t0 = address of boilerplate object (tagged)
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ lw(a3, FieldMemOperand(t0, i));
+ __ sw(a3, FieldMemOperand(v0, i));
+ }
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ lw(a3, MemOperand(sp, 2 * kPointerSize));
+ const int kCalleeOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ sw(a2, FieldMemOperand(v0, kLengthOffset));
+
+ // Setup the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, t0 will point there, otherwise
+ // it will point to the backing store.
+ __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
+ __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ // v0 = address of new object (tagged)
+ // a1 = mapped parameter count (tagged)
+ // a2 = argument count (tagged)
+ // t0 = address of parameter map or backing store (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ Label skip3;
+ __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
+ // Move backing store address to a3, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mov(a3, t0);
+ __ bind(&skip3);
+
+ __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
+
+ __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
+ __ Addu(t2, a1, Operand(Smi::FromInt(2)));
+ __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ sll(t6, a1, 1);
+ __ Addu(t2, t0, Operand(t6));
+ __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
+ __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ mov(t2, a1);
+ __ lw(t5, MemOperand(sp, 0 * kPointerSize));
+ __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ Subu(t5, t5, Operand(a1));
+ __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
+ __ sll(t6, t2, 1);
+ __ Addu(a3, t0, Operand(t6));
+ __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
+
+ // t2 = loop variable (tagged)
+ // a1 = mapping index (tagged)
+ // a3 = address of backing store (tagged)
+ // t0 = address of parameter map (tagged)
+ // t1 = temporary scratch (a.o., for address calculation)
+ // t3 = the hole value
+ __ jmp(&parameters_test);
+
+ __ bind(&parameters_loop);
+ __ Subu(t2, t2, Operand(Smi::FromInt(1)));
+ __ sll(t1, t2, 1);
+ __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ Addu(t6, t0, t1);
+ __ sw(t5, MemOperand(t6));
+ __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ Addu(t6, a3, t1);
+ __ sw(t3, MemOperand(t6));
+ __ Addu(t5, t5, Operand(Smi::FromInt(1)));
+ __ bind(&parameters_test);
+ __ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
+
+ __ bind(&skip_parameter_map);
+ // a2 = argument count (tagged)
+ // a3 = address of backing store (tagged)
+ // t1 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+ __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
+ __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
+
+ Label arguments_loop, arguments_test;
+ __ mov(t5, a1);
+ __ lw(t0, MemOperand(sp, 1 * kPointerSize));
+ __ sll(t6, t5, 1);
+ __ Subu(t0, t0, Operand(t6));
+ __ jmp(&arguments_test);
+
+ __ bind(&arguments_loop);
+ __ Subu(t0, t0, Operand(kPointerSize));
+ __ lw(t2, MemOperand(t0, 0));
+ __ sll(t6, t5, 1);
+ __ Addu(t1, a3, Operand(t6));
+ __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ Addu(t5, t5, Operand(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ Branch(&arguments_loop, lt, t5, Operand(a2));
+
+ // Return and remove the on-stack parameters.
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ // a2 = argument count (taggged)
+ __ bind(&runtime);
+ __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // sp[0] : number of parameters
+ // sp[4] : receiver displacement
+ // sp[8] : function
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame,
+ eq,
+ a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Get the length from the frame.
+ __ lw(a1, MemOperand(sp, 0));
+ __ Branch(&try_allocate);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sw(a1, MemOperand(sp, 0));
+ __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a3, a2, Operand(at));
+
+ __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+
+ // Try the new space allocation. Start out with computing the size
+ // of the arguments object and the elements array in words.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
+ __ srl(a1, a1, kSmiTagSize);
+
+ __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ bind(&add_arguments_object);
+ __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
+
+ // Do the allocation of both objects in one go.
+ __ AllocateInNewSpace(a1,
+ v0,
+ a2,
+ a3,
+ &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT |
+ SIZE_IN_WORDS));
+
+ // Get the arguments boilerplate from the current (global) context.
+ __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
+ __ lw(t0, MemOperand(t0, Context::SlotOffset(
+ Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
+
+ // Copy the JS object part.
+ __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ __ lw(a1, MemOperand(sp, 0 * kPointerSize));
+ __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+
+ Label done;
+ __ Branch(&done, eq, a1, Operand(zero_reg));
+
+ // Get the parameters pointer from the stack.
+ __ lw(a2, MemOperand(sp, 1 * kPointerSize));
+
+ // Setup the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
+ __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
+ __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
+ __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ // Untag the length for the loop.
+ __ srl(a1, a1, kSmiTagSize);
+
+ // Copy the fixed array slots.
+ Label loop;
+ // Setup t0 to point to the first array slot.
+ __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ // Pre-decrement a2 with kPointerSize on each iteration.
+ // Pre-decrement in order to skip receiver.
+ __ Addu(a2, a2, Operand(-kPointerSize));
+ __ lw(a3, MemOperand(a2));
+ // Post-increment t0 with kPointerSize on each iteration.
+ __ sw(a3, MemOperand(t0));
+ __ Addu(t0, t0, Operand(kPointerSize));
+ __ Subu(a1, a1, Operand(1));
+ __ Branch(&loop, ne, a1, Operand(zero_reg));
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if native RegExp is not selected at compile
+ // time or if regexp entry in generated code is turned off runtime switch or
+ // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else // V8_INTERPRETED_REGEXP
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ return;
+ }
+
+ // Stack frame on entry.
+ // sp[0]: last_match_info (expected JSArray)
+ // sp[4]: previous index
+ // sp[8]: subject string
+ // sp[12]: JSRegExp object
+
+ static const int kLastMatchInfoOffset = 0 * kPointerSize;
+ static const int kPreviousIndexOffset = 1 * kPointerSize;
+ static const int kSubjectOffset = 2 * kPointerSize;
+ static const int kJSRegExpOffset = 3 * kPointerSize;
+
+ Label runtime, invoke_regexp;
+
+ // Allocation of registers for this function. These are in callee save
+ // registers and will be preserved by the call to the native RegExp code, as
+ // this code is called using the normal C calling convention. When calling
+ // directly from generated code the native RegExp code will not do a GC and
+ // therefore the content of these registers are safe to use after the call.
+ // MIPS - using s0..s2, since we are not using CEntry Stub.
+ Register subject = s0;
+ Register regexp_data = s1;
+ Register last_match_info_elements = s2;
+
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address(
+ masm->isolate());
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
+ __ li(a0, Operand(address_of_regexp_stack_memory_size));
+ __ lw(a0, MemOperand(a0, 0));
+ __ Branch(&runtime, eq, a0, Operand(zero_reg));
+
+ // Check that the first argument is a JSRegExp object.
+ __ lw(a0, MemOperand(sp, kJSRegExpOffset));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(a0, &runtime);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
+
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
+ if (FLAG_debug_code) {
+ __ And(t0, regexp_data, Operand(kSmiTagMask));
+ __ Check(nz,
+ "Unexpected type for RegExp data, FixedArray expected",
+ t0,
+ Operand(zero_reg));
+ __ GetObjectType(regexp_data, a0, a0);
+ __ Check(eq,
+ "Unexpected type for RegExp data, FixedArray expected",
+ a0,
+ Operand(FIXED_ARRAY_TYPE));
+ }
+
+ // regexp_data: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+ __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
+
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ lw(a2,
+ FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2. This
+ // uses the asumption that smis are 2 * their untagged value.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ Addu(a2, a2, Operand(2)); // a2 was a smi.
+ // Check that the static offsets vector buffer is large enough.
+ __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
+
+ // a2: Number of capture registers
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the second argument is a string.
+ __ lw(subject, MemOperand(sp, kSubjectOffset));
+ __ JumpIfSmi(subject, &runtime);
+ __ GetObjectType(subject, a0, a0);
+ __ And(a0, a0, Operand(kIsNotStringMask));
+ STATIC_ASSERT(kStringTag == 0);
+ __ Branch(&runtime, ne, a0, Operand(zero_reg));
+
+ // Get the length of the string to r3.
+ __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
+
+ // a2: Number of capture registers
+ // a3: Length of subject string as a smi
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the third argument is a positive smi less than the subject
+ // string length. A negative value will be greater (unsigned comparison).
+ __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
+ __ And(at, a0, Operand(kSmiTagMask));
+ __ Branch(&runtime, ne, at, Operand(zero_reg));
+ __ Branch(&runtime, ls, a3, Operand(a0));
+
+ // a2: Number of capture registers
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check that the fourth object is a JSArray object.
+ __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(a0, &runtime);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
+ // Check that the JSArray is in fast case.
+ __ lw(last_match_info_elements,
+ FieldMemOperand(a0, JSArray::kElementsOffset));
+ __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ Branch(&runtime, ne, a0, Operand(
+ masm->isolate()->factory()->fixed_array_map()));
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ lw(a0,
+ FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+ __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
+ __ sra(at, a0, kSmiTagSize); // Untag length for comparison.
+ __ Branch(&runtime, gt, a2, Operand(at));
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check the representation and encoding of the subject string.
+ Label seq_string;
+ __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+ // First check for flat string.
+ __ And(at, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
+ STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+ __ Branch(&seq_string, eq, at, Operand(zero_reg));
+
+ // subject: Subject string
+ // a0: instance type if Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Check for flat cons string.
+ // A flat cons string is a cons string where the second part is the empty
+ // string. In that case the subject string is just the first part of the cons
+ // string. Also in this case the first part of the cons string is known to be
+ // a sequential string or an external string.
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
+ __ And(at, a0, Operand(kIsNotStringMask | kExternalStringTag));
+ __ Branch(&runtime, ne, at, Operand(zero_reg));
+ __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
+ __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
+ __ Branch(&runtime, ne, a0, Operand(a1));
+ __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+ __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+ // Is first part a flat string?
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ And(at, a0, Operand(kStringRepresentationMask));
+ __ Branch(&runtime, ne, at, Operand(zero_reg));
+
+ __ bind(&seq_string);
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // a0: Instance type of subject string
+ STATIC_ASSERT(kStringEncodingMask == 4);
+ STATIC_ASSERT(kAsciiStringTag == 4);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ // Find the code object based on the assumptions above.
+ __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ascii.
+ __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
+ __ sra(a3, a0, 2); // a3 is 1 for ascii, 0 for UC16 (usyed below).
+ __ lw(t0, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
+ __ movz(t9, t0, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
+
+ // Check that the irregexp code has been generated for the actual string
+ // encoding. If it has, the field contains a code object otherwise it
+ // contains the hole.
+ __ GetObjectType(t9, a0, a0);
+ __ Branch(&runtime, ne, a0, Operand(CODE_TYPE));
+
+ // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+ // t9: code
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // Load used arguments before starting to push arguments for call to native
+ // RegExp code to avoid handling changing stack height.
+ __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
+ __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
+
+ // a1: previous index
+ // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+ // t9: code
+ // subject: Subject string
+ // regexp_data: RegExp data (FixedArray)
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(),
+ 1, a0, a2);
+
+ // Isolates: note we add an additional parameter here (isolate pointer).
+ static const int kRegExpExecuteArguments = 8;
+ static const int kParameterRegisters = 4;
+ __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
+
+ // Stack pointer now points to cell where return address is to be written.
+ // Arguments are before that on the stack or in registers, meaning we
+ // treat the return address as argument 5. Thus every argument after that
+ // needs to be shifted back by 1. Since DirectCEntryStub will handle
+ // allocating space for the c argument slots, we don't need to calculate
+ // that into the argument positions on the stack. This is how the stack will
+ // look (sp meaning the value of sp at this moment):
+ // [sp + 4] - Argument 8
+ // [sp + 3] - Argument 7
+ // [sp + 2] - Argument 6
+ // [sp + 1] - Argument 5
+ // [sp + 0] - saved ra
+
+ // Argument 8: Pass current isolate address.
+ // CFunctionArgumentOperand handles MIPS stack argument slots.
+ __ li(a0, Operand(ExternalReference::isolate_address()));
+ __ sw(a0, MemOperand(sp, 4 * kPointerSize));
+
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ __ li(a0, Operand(1));
+ __ sw(a0, MemOperand(sp, 3 * kPointerSize));
+
+ // Argument 6: Start (high end) of backtracking stack memory area.
+ __ li(a0, Operand(address_of_regexp_stack_memory_address));
+ __ lw(a0, MemOperand(a0, 0));
+ __ li(a2, Operand(address_of_regexp_stack_memory_size));
+ __ lw(a2, MemOperand(a2, 0));
+ __ addu(a0, a0, a2);
+ __ sw(a0, MemOperand(sp, 2 * kPointerSize));
+
+ // Argument 5: static offsets vector buffer.
+ __ li(a0, Operand(
+ ExternalReference::address_of_static_offsets_vector(masm->isolate())));
+ __ sw(a0, MemOperand(sp, 1 * kPointerSize));
+
+ // For arguments 4 and 3 get string length, calculate start of string data
+ // and calculate the shift of the index (0 for ASCII and 1 for two byte).
+ __ lw(a0, FieldMemOperand(subject, String::kLengthOffset));
+ __ sra(a0, a0, kSmiTagSize);
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ Addu(t0, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
+ // Argument 4 (a3): End of string data
+ // Argument 3 (a2): Start of string data
+ __ sllv(t1, a1, a3);
+ __ addu(a2, t0, t1);
+ __ sllv(t1, a0, a3);
+ __ addu(a3, t0, t1);
+
+ // Argument 2 (a1): Previous index.
+ // Already there
+
+ // Argument 1 (a0): Subject string.
+ __ mov(a0, subject);
+
+ // Locate the code entry and call it.
+ __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm, t9);
+
+ __ LeaveExitFrame(false, no_reg);
+
+ // v0: result
+ // subject: subject string (callee saved)
+ // regexp_data: RegExp data (callee saved)
+ // last_match_info_elements: Last match info elements (callee saved)
+
+ // Check the result.
+
+ Label success;
+ __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+ Label failure;
+ __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ __ li(a1, Operand(
+ ExternalReference::the_hole_value_location(masm->isolate())));
+ __ lw(a1, MemOperand(a1, 0));
+ __ li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ masm->isolate())));
+ __ lw(v0, MemOperand(a2, 0));
+ __ Branch(&runtime, eq, v0, Operand(a1));
+
+ __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
+
+ // Check if the exception is a termination. If so, throw as uncatchable.
+ __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
+ Label termination_exception;
+ __ Branch(&termination_exception, eq, v0, Operand(a0));
+
+ __ Throw(a0); // Expects thrown value in v0.
+
+ __ bind(&termination_exception);
+ __ ThrowUncatchable(TERMINATION, v0); // Expects thrown value in v0.
+
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ li(v0, Operand(masm->isolate()->factory()->null_value()));
+ __ Addu(sp, sp, Operand(4 * kPointerSize));
+ __ Ret();
+
+ // Process the result from the native regexp code.
+ __ bind(&success);
+ __ lw(a1,
+ FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+ __ Addu(a1, a1, Operand(2)); // a1 was a smi.
+
+ // a1: number of capture registers
+ // subject: subject string
+ // Store the capture count.
+ __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
+ __ sw(a2, FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastCaptureCountOffset));
+ // Store last subject and last input.
+ __ mov(a3, last_match_info_elements); // Moved up to reduce latency.
+ __ sw(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset));
+ __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0);
+ __ sw(subject,
+ FieldMemOperand(last_match_info_elements,
+ RegExpImpl::kLastInputOffset));
+ __ mov(a3, last_match_info_elements);
+ __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0);
+
+ // Get the static offsets vector filled by the native regexp code.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector(masm->isolate());
+ __ li(a2, Operand(address_of_static_offsets_vector));
+
+ // a1: number of capture registers
+ // a2: offsets vector
+ Label next_capture, done;
+ // Capture register counter starts from number of capture registers and
+ // counts down until wrapping after zero.
+ __ Addu(a0,
+ last_match_info_elements,
+ Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+ __ bind(&next_capture);
+ __ Subu(a1, a1, Operand(1));
+ __ Branch(&done, lt, a1, Operand(zero_reg));
+ // Read the value from the static offsets vector buffer.
+ __ lw(a3, MemOperand(a2, 0));
+ __ addiu(a2, a2, kPointerSize);
+ // Store the smi value in the last match info.
+ __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
+ __ sw(a3, MemOperand(a0, 0));
+ __ Branch(&next_capture, USE_DELAY_SLOT);
+ __ addiu(a0, a0, kPointerSize); // In branch delay slot.
+
+ __ bind(&done);
+
+ // Return last match info.
+ __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
+ __ Addu(sp, sp, Operand(4 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif // V8_INTERPRETED_REGEXP
+}
+
+
+void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
+ const int kMaxInlineLength = 100;
+ Label slowcase;
+ Label done;
+ __ lw(a1, MemOperand(sp, kPointerSize * 2));
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ JumpIfNotSmi(a1, &slowcase);
+ __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
+ // Smi-tagging is equivalent to multiplying by 2.
+ // Allocate RegExpResult followed by FixedArray with size in ebx.
+ // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
+ // Elements: [Map][Length][..elements..]
+ // Size of JSArray with two in-object properties and the header of a
+ // FixedArray.
+ int objects_size =
+ (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
+ __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
+ __ Addu(a2, t1, Operand(objects_size));
+ __ AllocateInNewSpace(
+ a2, // In: Size, in words.
+ v0, // Out: Start of allocation (tagged).
+ a3, // Scratch register.
+ t0, // Scratch register.
+ &slowcase,
+ static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+ // v0: Start of allocated area, object-tagged.
+ // a1: Number of elements in array, as smi.
+ // t1: Number of elements, untagged.
+
+ // Set JSArray map to global.regexp_result_map().
+ // Set empty properties FixedArray.
+ // Set elements to point to FixedArray allocated right after the JSArray.
+ // Interleave operations for better latency.
+ __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
+ __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+ __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
+ __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+ // Set input, index and length fields from arguments.
+ __ lw(a1, MemOperand(sp, kPointerSize * 0));
+ __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
+ __ lw(a1, MemOperand(sp, kPointerSize * 1));
+ __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
+ __ lw(a1, MemOperand(sp, kPointerSize * 2));
+ __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
+
+ // Fill out the elements FixedArray.
+ // v0: JSArray, tagged.
+ // a3: FixedArray, tagged.
+ // t1: Number of elements in array, untagged.
+
+ // Set map.
+ __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
+ // Set FixedArray length.
+ __ sll(t2, t1, kSmiTagSize);
+ __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
+ // Fill contents of fixed-array with the-hole.
+ __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
+ __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // Fill fixed array elements with hole.
+ // v0: JSArray, tagged.
+ // a2: the hole.
+ // a3: Start of elements in FixedArray.
+ // t1: Number of elements to fill.
+ Label loop;
+ __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
+ __ addu(t1, t1, a3); // Point past last element to store.
+ __ bind(&loop);
+ __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
+ __ sw(a2, MemOperand(a3));
+ __ Branch(&loop, USE_DELAY_SLOT);
+ __ addiu(a3, a3, kPointerSize); // In branch delay slot.
+
+ __ bind(&done);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&slowcase);
+ __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow;
+
+ // The receiver might implicitly be the global object. This is
+ // indicated by passing the hole as the receiver to the call
+ // function stub.
+ if (ReceiverMightBeImplicit()) {
+ Label call;
+ // Get the receiver from the stack.
+ // function, receiver [, arguments]
+ __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
+ // Call as function is indicated with the hole.
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&call, ne, t0, Operand(at));
+ // Patch the receiver on the stack with the global receiver object.
+ __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+ __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
+ __ bind(&call);
+ }
+
+ // Get the function to call from the stack.
+ // function, receiver [, arguments]
+ __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
+
+ // Check that the function is really a JavaScript function.
+ // a1: pushed function (to be verified)
+ __ JumpIfSmi(a1, &slow);
+ // Get the map of the function object.
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ // Fast-case: Invoke the function now.
+ // a1: pushed function
+ ParameterCount actual(argc_);
+
+ if (ReceiverMightBeImplicit()) {
+ Label call_as_function;
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&call_as_function, eq, t0, Operand(at));
+ __ InvokeFunction(a1,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ CALL_AS_METHOD);
+ __ bind(&call_as_function);
+ }
+ __ InvokeFunction(a1,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ CALL_AS_FUNCTION);
+
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+ // of the original receiver from the call site).
+ __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
+ __ li(a0, Operand(argc_)); // Setup the number of arguments.
+ __ mov(a2, zero_reg);
+ __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+const char* CompareStub::GetName() {
+ ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+ (lhs_.is(a1) && rhs_.is(a0)));
+
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+
+ const char* cc_name;
+ switch (cc_) {
+ case lt: cc_name = "LT"; break;
+ case gt: cc_name = "GT"; break;
+ case le: cc_name = "LE"; break;
+ case ge: cc_name = "GE"; break;
+ case eq: cc_name = "EQ"; break;
+ case ne: cc_name = "NE"; break;
+ default: cc_name = "UnknownCondition"; break;
+ }
+
+ const char* lhs_name = lhs_.is(a0) ? "_a0" : "_a1";
+ const char* rhs_name = rhs_.is(a0) ? "_a0" : "_a1";
+
+ const char* strict_name = "";
+ if (strict_ && (cc_ == eq || cc_ == ne)) {
+ strict_name = "_STRICT";
+ }
+
+ const char* never_nan_nan_name = "";
+ if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
+ never_nan_nan_name = "_NO_NAN";
+ }
+
+ const char* include_number_compare_name = "";
+ if (!include_number_compare_) {
+ include_number_compare_name = "_NO_NUMBER";
+ }
+
+ const char* include_smi_compare_name = "";
+ if (!include_smi_compare_) {
+ include_smi_compare_name = "_NO_SMI";
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "CompareStub_%s%s%s%s%s%s",
+ cc_name,
+ lhs_name,
+ rhs_name,
+ strict_name,
+ never_nan_nan_name,
+ include_number_compare_name,
+ include_smi_compare_name);
+ return name_;
+}
+
+
+int CompareStub::MinorKey() {
+ // Encode the two parameters in a unique 16 bit value.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
+ ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+ (lhs_.is(a1) && rhs_.is(a0)));
+ return ConditionField::encode(static_cast<unsigned>(cc_))
+ | RegisterField::encode(lhs_.is(a0))
+ | StrictField::encode(strict_)
+ | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
+ | IncludeSmiCompareField::encode(include_smi_compare_);
+}
+
+
+// StringCharCodeAtGenerator.
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ Label flat_string;
+ Label ascii_string;
+ Label got_char_code;
+
+ ASSERT(!t0.is(scratch_));
+ ASSERT(!t0.is(index_));
+ ASSERT(!t0.is(result_));
+ ASSERT(!t0.is(object_));
+
+ // If the receiver is a smi trigger the non-string case.
+ __ JumpIfSmi(object_, receiver_not_string_);
+
+ // Fetch the instance type of the receiver into result register.
+ __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the non-string case.
+ __ And(t0, result_, Operand(kIsNotStringMask));
+ __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
+
+ // If the index is non-smi trigger the non-smi case.
+ __ JumpIfNotSmi(index_, &index_not_smi_);
+
+ // Put smi-tagged index into scratch register.
+ __ mov(scratch_, index_);
+ __ bind(&got_smi_index_);
+
+ // Check for index out of range.
+ __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
+ __ Branch(index_out_of_range_, ls, t0, Operand(scratch_));
+
+ // We need special handling for non-flat strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ And(t0, result_, Operand(kStringRepresentationMask));
+ __ Branch(&flat_string, eq, t0, Operand(zero_reg));
+
+ // Handle non-flat strings.
+ __ And(t0, result_, Operand(kIsConsStringMask));
+ __ Branch(&call_runtime_, eq, t0, Operand(zero_reg));
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
+ __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
+ __ Branch(&call_runtime_, ne, result_, Operand(t0));
+
+ // Get the first of the two strings and load its instance type.
+ __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+ __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+
+ __ And(t0, result_, Operand(kStringRepresentationMask));
+ __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ And(t0, result_, Operand(kStringEncodingMask));
+ __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register. We can
+ // add without shifting since the smi tag size is the log2 of the
+ // number of bytes in a two-byte character.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
+ __ Addu(scratch_, object_, Operand(scratch_));
+ __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
+ __ Branch(&got_char_code);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+
+ __ srl(t0, scratch_, kSmiTagSize);
+ __ Addu(scratch_, object_, t0);
+
+ __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
+
+ __ bind(&got_char_code);
+ __ sll(result_, result_, kSmiTagSize);
+ __ bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_,
+ scratch_,
+ Heap::kHeapNumberMapRootIndex,
+ index_not_number_,
+ DONT_DO_SMI_CHECK);
+ call_helper.BeforeCall(masm);
+ // Consumed by runtime conversion function:
+ __ Push(object_, index_, index_);
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+
+ __ Move(scratch_, v0);
+
+ __ pop(index_);
+ __ pop(object_);
+ // Reload the instance type.
+ __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ Branch(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ Push(object_, index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+
+ __ Move(result_, v0);
+
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ // Fast case of Heap::LookupSingleCharacterStringFromCode.
+
+ ASSERT(!t0.is(result_));
+ ASSERT(!t0.is(code_));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ __ And(t0,
+ code_,
+ Operand(kSmiTagMask |
+ ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
+
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ // At this point code register contains smi tagged ASCII char code.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(result_, result_, t0);
+ __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&slow_case_, eq, result_, Operand(t0));
+ __ bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ Move(result_, v0);
+
+ call_helper.AfterCall(masm);
+ __ Branch(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
+}
+
+
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersLong adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
+
+ // Generate code for copying a large number of characters. This function
+ // is allowed to spend extra time setting up conditions to make copying
+ // faster. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags);
+
+
+ // Probe the symbol table for a two character string. If the string is
+ // not found by probing a jump to the label not_found is performed. This jump
+ // does not guarantee that the string is not in the symbol table. If the
+ // string is found the code falls through with the string in register r0.
+ // Contents of both c1 and c2 registers are modified. At the exit c1 is
+ // guaranteed to contain halfword with low and high bytes equal to
+ // initial contents of c1 and c2 respectively.
+ static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found);
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ Label loop;
+ Label done;
+ // This loop just copies one character at a time, as it is only used for
+ // very short strings.
+ if (!ascii) {
+ __ addu(count, count, count);
+ }
+ __ Branch(&done, eq, count, Operand(zero_reg));
+ __ addu(count, dest, count); // Count now points to the last dest byte.
+
+ __ bind(&loop);
+ __ lbu(scratch, MemOperand(src));
+ __ addiu(src, src, 1);
+ __ sb(scratch, MemOperand(dest));
+ __ addiu(dest, dest, 1);
+ __ Branch(&loop, lt, dest, Operand(count));
+
+ __ bind(&done);
+}
+
+
+enum CopyCharactersFlags {
+ COPY_ASCII = 1,
+ DEST_ALWAYS_ALIGNED = 2
+};
+
+
+void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags) {
+ bool ascii = (flags & COPY_ASCII) != 0;
+ bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
+
+ if (dest_always_aligned && FLAG_debug_code) {
+ // Check that destination is actually word aligned if the flag says
+ // that it is.
+ __ And(scratch4, dest, Operand(kPointerAlignmentMask));
+ __ Check(eq,
+ "Destination of copy not aligned.",
+ scratch4,
+ Operand(zero_reg));
+ }
+
+ const int kReadAlignment = 4;
+ const int kReadAlignmentMask = kReadAlignment - 1;
+ // Ensure that reading an entire aligned word containing the last character
+ // of a string will not read outside the allocated area (because we pad up
+ // to kObjectAlignment).
+ STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
+ // Assumes word reads and writes are little endian.
+ // Nothing to do for zero characters.
+ Label done;
+
+ if (!ascii) {
+ __ addu(count, count, count);
+ }
+ __ Branch(&done, eq, count, Operand(zero_reg));
+
+ Label byte_loop;
+ // Must copy at least eight bytes, otherwise just do it one byte at a time.
+ __ Subu(scratch1, count, Operand(8));
+ __ Addu(count, dest, Operand(count));
+ Register limit = count; // Read until src equals this.
+ __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
+
+ if (!dest_always_aligned) {
+ // Align dest by byte copying. Copies between zero and three bytes.
+ __ And(scratch4, dest, Operand(kReadAlignmentMask));
+ Label dest_aligned;
+ __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
+ Label aligned_loop;
+ __ bind(&aligned_loop);
+ __ lbu(scratch1, MemOperand(src));
+ __ addiu(src, src, 1);
+ __ sb(scratch1, MemOperand(dest));
+ __ addiu(dest, dest, 1);
+ __ addiu(scratch4, scratch4, 1);
+ __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
+ __ bind(&dest_aligned);
+ }
+
+ Label simple_loop;
+
+ __ And(scratch4, src, Operand(kReadAlignmentMask));
+ __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
+
+ // Loop for src/dst that are not aligned the same way.
+ // This loop uses lwl and lwr instructions. These instructions
+ // depend on the endianness, and the implementation assumes little-endian.
+ {
+ Label loop;
+ __ bind(&loop);
+ __ lwr(scratch1, MemOperand(src));
+ __ Addu(src, src, Operand(kReadAlignment));
+ __ lwl(scratch1, MemOperand(src, -1));
+ __ sw(scratch1, MemOperand(dest));
+ __ Addu(dest, dest, Operand(kReadAlignment));
+ __ Subu(scratch2, limit, dest);
+ __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
+ }
+
+ __ Branch(&byte_loop);
+
+ // Simple loop.
+ // Copy words from src to dest, until less than four bytes left.
+ // Both src and dest are word aligned.
+ __ bind(&simple_loop);
+ {
+ Label loop;
+ __ bind(&loop);
+ __ lw(scratch1, MemOperand(src));
+ __ Addu(src, src, Operand(kReadAlignment));
+ __ sw(scratch1, MemOperand(dest));
+ __ Addu(dest, dest, Operand(kReadAlignment));
+ __ Subu(scratch2, limit, dest);
+ __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
+ }
+
+ // Copy bytes from src to dest until dest hits limit.
+ __ bind(&byte_loop);
+ // Test if dest has already reached the limit.
+ __ Branch(&done, ge, dest, Operand(limit));
+ __ lbu(scratch1, MemOperand(src));
+ __ addiu(src, src, 1);
+ __ sb(scratch1, MemOperand(dest));
+ __ addiu(dest, dest, 1);
+ __ Branch(&byte_loop);
+
+ __ bind(&done);
+}
+
+
+void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found) {
+ // Register scratch3 is the general scratch register in this function.
+ Register scratch = scratch3;
+
+ // Make sure that both characters are not digits as such strings has a
+ // different hash algorithm. Don't try to look for these in the symbol table.
+ Label not_array_index;
+ __ Subu(scratch, c1, Operand(static_cast<int>('0')));
+ __ Branch(&not_array_index,
+ Ugreater,
+ scratch,
+ Operand(static_cast<int>('9' - '0')));
+ __ Subu(scratch, c2, Operand(static_cast<int>('0')));
+
+ // If check failed combine both characters into single halfword.
+ // This is required by the contract of the method: code at the
+ // not_found branch expects this combination in c1 register.
+ Label tmp;
+ __ sll(scratch1, c2, kBitsPerByte);
+ __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
+ __ Or(c1, c1, scratch1);
+ __ bind(&tmp);
+ __ Branch(not_found,
+ Uless_equal,
+ scratch,
+ Operand(static_cast<int>('9' - '0')));
+
+ __ bind(&not_array_index);
+ // Calculate the two character string hash.
+ Register hash = scratch1;
+ StringHelper::GenerateHashInit(masm, hash, c1);
+ StringHelper::GenerateHashAddCharacter(masm, hash, c2);
+ StringHelper::GenerateHashGetHash(masm, hash);
+
+ // Collect the two characters in a register.
+ Register chars = c1;
+ __ sll(scratch, c2, kBitsPerByte);
+ __ Or(chars, chars, scratch);
+
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string.
+
+ // Load symbol table.
+ // Load address of first element of the symbol table.
+ Register symbol_table = c2;
+ __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+
+ Register undefined = scratch4;
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ // Calculate capacity mask from the symbol table capacity.
+ Register mask = scratch2;
+ __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ sra(mask, mask, 1);
+ __ Addu(mask, mask, -1);
+
+ // Calculate untagged address of the first element of the symbol table.
+ Register first_symbol_table_element = symbol_table;
+ __ Addu(first_symbol_table_element, symbol_table,
+ Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
+
+ // Registers.
+ // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+ // hash: hash of two character string
+ // mask: capacity mask
+ // first_symbol_table_element: address of the first element of
+ // the symbol table
+ // undefined: the undefined object
+ // scratch: -
+
+ // Perform a number of probes in the symbol table.
+ static const int kProbes = 4;
+ Label found_in_symbol_table;
+ Label next_probe[kProbes];
+ Register candidate = scratch5; // Scratch register contains candidate.
+ for (int i = 0; i < kProbes; i++) {
+ // Calculate entry in symbol table.
+ if (i > 0) {
+ __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
+ } else {
+ __ mov(candidate, hash);
+ }
+
+ __ And(candidate, candidate, Operand(mask));
+
+ // Load the entry from the symble table.
+ STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ __ sll(scratch, candidate, kPointerSizeLog2);
+ __ Addu(scratch, scratch, first_symbol_table_element);
+ __ lw(candidate, MemOperand(scratch));
+
+ // If entry is undefined no string with this hash can be found.
+ Label is_string;
+ __ GetObjectType(candidate, scratch, scratch);
+ __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
+
+ __ Branch(not_found, eq, undefined, Operand(candidate));
+ // Must be null (deleted entry).
+ if (FLAG_debug_code) {
+ __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+ __ Assert(eq, "oddball in symbol table is not undefined or null",
+ scratch, Operand(candidate));
+ }
+ __ jmp(&next_probe[i]);
+
+ __ bind(&is_string);
+
+ // Check that the candidate is a non-external ASCII string. The instance
+ // type is still in the scratch register from the CompareObjectType
+ // operation.
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
+
+ // If length is not 2 the string is not a candidate.
+ __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
+ __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
+
+ // Check if the two characters match.
+ // Assumes that word load is little endian.
+ __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
+ __ bind(&next_probe[i]);
+ }
+
+ // No matching 2 character string found by probing.
+ __ jmp(not_found);
+
+ // Scratch register contains result when we fall through to here.
+ Register result = candidate;
+ __ bind(&found_in_symbol_table);
+ __ mov(v0, result);
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ // hash = character + (character << 10);
+ __ sll(hash, character, 10);
+ __ addu(hash, hash, character);
+ // hash ^= hash >> 6;
+ __ sra(at, hash, 6);
+ __ xor_(hash, hash, at);
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ // hash += character;
+ __ addu(hash, hash, character);
+ // hash += hash << 10;
+ __ sll(at, hash, 10);
+ __ addu(hash, hash, at);
+ // hash ^= hash >> 6;
+ __ sra(at, hash, 6);
+ __ xor_(hash, hash, at);
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash) {
+ // hash += hash << 3;
+ __ sll(at, hash, 3);
+ __ addu(hash, hash, at);
+ // hash ^= hash >> 11;
+ __ sra(at, hash, 11);
+ __ xor_(hash, hash, at);
+ // hash += hash << 15;
+ __ sll(at, hash, 15);
+ __ addu(hash, hash, at);
+
+ // if (hash == 0) hash = 27;
+ __ ori(at, zero_reg, 27);
+ __ movz(hash, at, hash);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label sub_string_runtime;
+ // Stack frame on entry.
+ // ra: return address
+ // sp[0]: to
+ // sp[4]: from
+ // sp[8]: string
+
+ // This stub is called from the native-call %_SubString(...), so
+ // nothing can be assumed about the arguments. It is tested that:
+ // "string" is a sequential string,
+ // both "from" and "to" are smis, and
+ // 0 <= from <= to <= string.length.
+ // If any of these assumptions fail, we call the runtime system.
+
+ static const int kToOffset = 0 * kPointerSize;
+ static const int kFromOffset = 1 * kPointerSize;
+ static const int kStringOffset = 2 * kPointerSize;
+
+ Register to = t2;
+ Register from = t3;
+
+ // Check bounds and smi-ness.
+ __ lw(to, MemOperand(sp, kToOffset));
+ __ lw(from, MemOperand(sp, kFromOffset));
+ STATIC_ASSERT(kFromOffset == kToOffset + 4);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+
+ __ JumpIfNotSmi(from, &sub_string_runtime);
+ __ JumpIfNotSmi(to, &sub_string_runtime);
+
+ __ sra(a3, from, kSmiTagSize); // Remove smi tag.
+ __ sra(t5, to, kSmiTagSize); // Remove smi tag.
+
+ // a3: from index (untagged smi)
+ // t5: to index (untagged smi)
+
+ __ Branch(&sub_string_runtime, lt, a3, Operand(zero_reg)); // From < 0.
+
+ __ subu(a2, t5, a3);
+ __ Branch(&sub_string_runtime, gt, a3, Operand(t5)); // Fail if from > to.
+
+ // Special handling of sub-strings of length 1 and 2. One character strings
+ // are handled in the runtime system (looked up in the single character
+ // cache). Two character strings are looked for in the symbol cache.
+ __ Branch(&sub_string_runtime, lt, a2, Operand(2));
+
+ // Both to and from are smis.
+
+ // a2: result string length
+ // a3: from index (untagged smi)
+ // t2: (a.k.a. to): to (smi)
+ // t3: (a.k.a. from): from offset (smi)
+ // t5: to index (untagged smi)
+
+ // Make sure first argument is a sequential (or flat) string.
+ __ lw(t1, MemOperand(sp, kStringOffset));
+ __ Branch(&sub_string_runtime, eq, t1, Operand(kSmiTagMask));
+
+ __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+ __ And(t4, a1, Operand(kIsNotStringMask));
+
+ __ Branch(&sub_string_runtime, ne, t4, Operand(zero_reg));
+
+ // a1: instance type
+ // a2: result string length
+ // a3: from index (untagged smi)
+ // t1: string
+ // t2: (a.k.a. to): to (smi)
+ // t3: (a.k.a. from): from offset (smi)
+ // t5: to index (untagged smi)
+
+ Label seq_string;
+ __ And(t0, a1, Operand(kStringRepresentationMask));
+ STATIC_ASSERT(kSeqStringTag < kConsStringTag);
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+
+ // External strings go to runtime.
+ __ Branch(&sub_string_runtime, gt, t0, Operand(kConsStringTag));
+
+ // Sequential strings are handled directly.
+ __ Branch(&seq_string, lt, t0, Operand(kConsStringTag));
+
+ // Cons string. Try to recurse (once) on the first substring.
+ // (This adds a little more generality than necessary to handle flattened
+ // cons strings, but not much).
+ __ lw(t1, FieldMemOperand(t1, ConsString::kFirstOffset));
+ __ lw(t0, FieldMemOperand(t1, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSeqStringTag == 0);
+ // Cons and External strings go to runtime.
+ __ Branch(&sub_string_runtime, ne, a1, Operand(kStringRepresentationMask));
+
+ // Definitly a sequential string.
+ __ bind(&seq_string);
+
+ // a1: instance type
+ // a2: result string length
+ // a3: from index (untagged smi)
+ // t1: string
+ // t2: (a.k.a. to): to (smi)
+ // t3: (a.k.a. from): from offset (smi)
+ // t5: to index (untagged smi)
+
+ __ lw(t0, FieldMemOperand(t1, String::kLengthOffset));
+ __ Branch(&sub_string_runtime, lt, t0, Operand(to)); // Fail if to > length.
+ to = no_reg;
+
+ // a1: instance type
+ // a2: result string length
+ // a3: from index (untagged smi)
+ // t1: string
+ // t3: (a.k.a. from): from offset (smi)
+ // t5: to index (untagged smi)
+
+ // Check for flat ASCII string.
+ Label non_ascii_flat;
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+
+ __ And(t4, a1, Operand(kStringEncodingMask));
+ __ Branch(&non_ascii_flat, eq, t4, Operand(zero_reg));
+
+ Label result_longer_than_two;
+ __ Branch(&result_longer_than_two, gt, a2, Operand(2));
+
+ // Sub string of length 2 requested.
+ // Get the two characters forming the sub string.
+ __ Addu(t1, t1, Operand(a3));
+ __ lbu(a3, FieldMemOperand(t1, SeqAsciiString::kHeaderSize));
+ __ lbu(t0, FieldMemOperand(t1, SeqAsciiString::kHeaderSize + 1));
+
+ // Try to lookup two character string in symbol table.
+ Label make_two_character_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+
+ // a2: result string length.
+ // a3: two characters combined into halfword in little endian byte order.
+ __ bind(&make_two_character_string);
+ __ AllocateAsciiString(v0, a2, t0, t1, t4, &sub_string_runtime);
+ __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+ __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&result_longer_than_two);
+
+ // Allocate the result.
+ __ AllocateAsciiString(v0, a2, t4, t0, a1, &sub_string_runtime);
+
+ // v0: result string.
+ // a2: result string length.
+ // a3: from index (untagged smi)
+ // t1: string.
+ // t3: (a.k.a. from): from offset (smi)
+ // Locate first character of result.
+ __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Locate 'from' character of string.
+ __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(t1, t1, Operand(a3));
+
+ // v0: result string.
+ // a1: first character of result string.
+ // a2: result string length.
+ // t1: first character of sub string to copy.
+ STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharactersLong(
+ masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
+ __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii_flat);
+ // a2: result string length.
+ // t1: string.
+ // t3: (a.k.a. from): from offset (smi)
+ // Check for flat two byte string.
+
+ // Allocate the result.
+ __ AllocateTwoByteString(v0, a2, a1, a3, t0, &sub_string_runtime);
+
+ // v0: result string.
+ // a2: result string length.
+ // t1: string.
+ // Locate first character of result.
+ __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Locate 'from' character of string.
+ __ Addu(t1, t1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // As "from" is a smi it is 2 times the value which matches the size of a two
+ // byte character.
+ __ Addu(t1, t1, Operand(from));
+ from = no_reg;
+
+ // v0: result string.
+ // a1: first character of result.
+ // a2: result length.
+ // t1: first character of string to copy.
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ StringHelper::GenerateCopyCharactersLong(
+ masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
+ __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&sub_string_runtime);
+ __ TailCallRuntime(Runtime::kSubString, 3, 1);
+}
+
+
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ Register length = scratch1;
+
+ // Compare lengths.
+ Label strings_not_equal, check_zero_length;
+ __ lw(length, FieldMemOperand(left, String::kLengthOffset));
+ __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Branch(&check_zero_length, eq, length, Operand(scratch2));
+ __ bind(&strings_not_equal);
+ __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
+ __ Ret();
+
+ // Check if the length is zero.
+ Label compare_chars;
+ __ bind(&check_zero_length);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Branch(&compare_chars, ne, length, Operand(zero_reg));
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ Ret();
+
+ // Compare characters.
+ __ bind(&compare_chars);
+
+ GenerateAsciiCharsCompareLoop(masm,
+ left, right, length, scratch2, scratch3, v0,
+ &strings_not_equal);
+
+ // Characters are equal.
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ Ret();
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ Label result_not_equal, compare_lengths;
+ // Find minimum length and length difference.
+ __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Subu(scratch3, scratch1, Operand(scratch2));
+ Register length_delta = scratch3;
+ __ slt(scratch4, scratch2, scratch1);
+ __ movn(scratch1, scratch2, scratch4);
+ Register min_length = scratch1;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
+
+ // Compare loop.
+ GenerateAsciiCharsCompareLoop(masm,
+ left, right, min_length, scratch2, scratch4, v0,
+ &result_not_equal);
+
+ // Compare lengths - strings up to min-length are equal.
+ __ bind(&compare_lengths);
+ ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+ // Use length_delta as result if it's zero.
+ __ mov(scratch2, length_delta);
+ __ mov(scratch4, zero_reg);
+ __ mov(v0, zero_reg);
+
+ __ bind(&result_not_equal);
+ // Conditionally update the result based either on length_delta or
+ // the last comparion performed in the loop above.
+ Label ret;
+ __ Branch(&ret, eq, scratch2, Operand(scratch4));
+ __ li(v0, Operand(Smi::FromInt(GREATER)));
+ __ Branch(&ret, gt, scratch2, Operand(scratch4));
+ __ li(v0, Operand(Smi::FromInt(LESS)));
+ __ bind(&ret);
+ __ Ret();
+}
+
+
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+ MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* chars_not_equal) {
+ // Change index to run from -length to -1 by adding length to string
+ // start. This means that loop ends when index reaches zero, which
+ // doesn't need an additional compare.
+ __ SmiUntag(length);
+ __ Addu(scratch1, length,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(left, left, Operand(scratch1));
+ __ Addu(right, right, Operand(scratch1));
+ __ Subu(length, zero_reg, length);
+ Register index = length; // index = -length;
+
+
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ __ Addu(scratch3, left, index);
+ __ lbu(scratch1, MemOperand(scratch3));
+ __ Addu(scratch3, right, index);
+ __ lbu(scratch2, MemOperand(scratch3));
+ __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
+ __ Addu(index, index, 1);
+ __ Branch(&loop, ne, index, Operand(zero_reg));
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ Counters* counters = masm->isolate()->counters();
+
+ // Stack frame on entry.
+ // sp[0]: right string
+ // sp[4]: left string
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
+ __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
+
+ Label not_same;
+ __ Branch(&not_same, ne, a0, Operand(a1));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&not_same);
+
+ // Check that both objects are sequential ASCII strings.
+ __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
+
+ // Compare flat ASCII strings natively. Remove arguments from stack first.
+ __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+}
+
+
+void StringAddStub::Generate(MacroAssembler* masm) {
+ Label string_add_runtime, call_builtin;
+ Builtins::JavaScript builtin_id = Builtins::ADD;
+
+ Counters* counters = masm->isolate()->counters();
+
+ // Stack on entry:
+ // sp[0]: second argument (right).
+ // sp[4]: first argument (left).
+
+ // Load the two arguments.
+ __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
+ __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
+
+ // Make sure that both arguments are strings if not known in advance.
+ if (flags_ == NO_STRING_ADD_FLAGS) {
+ __ JumpIfEitherSmi(a0, a1, &string_add_runtime);
+ // Load instance types.
+ __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kStringTag == 0);
+ // If either is not a string, go to runtime.
+ __ Or(t4, t0, Operand(t1));
+ __ And(t4, t4, Operand(kIsNotStringMask));
+ __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
+ } else {
+ // Here at least one of the arguments is definitely a string.
+ // We convert the one that is not known to be a string.
+ if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
+ GenerateConvertArgument(
+ masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
+ GenerateConvertArgument(
+ masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
+ }
+ }
+
+ // Both arguments are strings.
+ // a0: first string
+ // a1: second string
+ // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ {
+ Label strings_not_empty;
+ // Check if either of the strings are empty. In that case return the other.
+ // These tests use zero-length check on string-length whch is an Smi.
+ // Assert that Smi::FromInt(0) is really 0.
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT(Smi::FromInt(0) == 0);
+ __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
+ __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
+ __ mov(v0, a0); // Assume we'll return first string (from a0).
+ __ movz(v0, a1, a2); // If first is empty, return second (from a1).
+ __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
+ __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
+ __ and_(t4, t4, t5); // Branch if both strings were non-empty.
+ __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
+
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&strings_not_empty);
+ }
+
+ // Untag both string-lengths.
+ __ sra(a2, a2, kSmiTagSize);
+ __ sra(a3, a3, kSmiTagSize);
+
+ // Both strings are non-empty.
+ // a0: first string
+ // a1: second string
+ // a2: length of first string
+ // a3: length of second string
+ // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // Look at the length of the result of adding the two strings.
+ Label string_add_flat_result, longer_than_two;
+ // Adding two lengths can't overflow.
+ STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
+ __ Addu(t2, a2, Operand(a3));
+ // Use the symbol table when adding two one character strings, as it
+ // helps later optimizations to return a symbol here.
+ __ Branch(&longer_than_two, ne, t2, Operand(2));
+
+ // Check that both strings are non-external ASCII strings.
+ if (flags_ != NO_STRING_ADD_FLAGS) {
+ __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ }
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
+ &string_add_runtime);
+
+ // Get the two characters forming the sub string.
+ __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
+ __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
+
+ // Try to lookup two character string in symbol table. If it is not found
+ // just allocate a new one.
+ Label make_two_character_string;
+ StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ masm, a2, a3, t2, t3, t0, t1, t4, &make_two_character_string);
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&make_two_character_string);
+ // Resulting string has length 2 and first chars of two strings
+ // are combined into single halfword in a2 register.
+ // So we can fill resulting string without two loops by a single
+ // halfword store instruction (which assumes that processor is
+ // in a little endian mode).
+ __ li(t2, Operand(2));
+ __ AllocateAsciiString(v0, t2, t0, t1, t4, &string_add_runtime);
+ __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&longer_than_two);
+ // Check if resulting string will be flat.
+ __ Branch(&string_add_flat_result, lt, t2,
+ Operand(String::kMinNonFlatLength));
+ // Handle exceptionally long strings in the runtime system.
+ STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
+ ASSERT(IsPowerOf2(String::kMaxLength + 1));
+ // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
+ __ Branch(&string_add_runtime, hs, t2, Operand(String::kMaxLength + 1));
+
+ // If result is not supposed to be flat, allocate a cons string object.
+ // If both strings are ASCII the result is an ASCII cons string.
+ if (flags_ != NO_STRING_ADD_FLAGS) {
+ __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ }
+ Label non_ascii, allocated, ascii_data;
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ // Branch to non_ascii if either string-encoding field is zero (non-ascii).
+ __ And(t4, t0, Operand(t1));
+ __ And(t4, t4, Operand(kStringEncodingMask));
+ __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
+
+ // Allocate an ASCII cons string.
+ __ bind(&ascii_data);
+ __ AllocateAsciiConsString(t3, t2, t0, t1, &string_add_runtime);
+ __ bind(&allocated);
+ // Fill the fields of the cons string.
+ __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
+ __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
+ __ mov(v0, t3);
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ASCII characters.
+ // t0: first instance type.
+ // t1: second instance type.
+ // Branch to if _both_ instances have kAsciiDataHintMask set.
+ __ And(at, t0, Operand(kAsciiDataHintMask));
+ __ and_(at, at, t1);
+ __ Branch(&ascii_data, ne, at, Operand(zero_reg));
+
+ __ xor_(t0, t0, t1);
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+
+ // Allocate a two byte cons string.
+ __ AllocateTwoByteConsString(t3, t2, t0, t1, &string_add_runtime);
+ __ Branch(&allocated);
+
+ // Handle creating a flat result. First check that both strings are
+ // sequential and that they have the same encoding.
+ // a0: first string
+ // a1: second string
+ // a2: length of first string
+ // a3: length of second string
+ // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // t2: sum of lengths.
+ __ bind(&string_add_flat_result);
+ if (flags_ != NO_STRING_ADD_FLAGS) {
+ __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ }
+ // Check that both strings are sequential, meaning that we
+ // branch to runtime if either string tag is non-zero.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ Or(t4, t0, Operand(t1));
+ __ And(t4, t4, Operand(kStringRepresentationMask));
+ __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
+
+ // Now check if both strings have the same encoding (ASCII/Two-byte).
+ // a0: first string
+ // a1: second string
+ // a2: length of first string
+ // a3: length of second string
+ // t0: first string instance type
+ // t1: second string instance type
+ // t2: sum of lengths.
+ Label non_ascii_string_add_flat_result;
+ ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
+ __ xor_(t3, t1, t0);
+ __ And(t3, t3, Operand(kStringEncodingMask));
+ __ Branch(&string_add_runtime, ne, t3, Operand(zero_reg));
+ // And see if it's ASCII (0) or two-byte (1).
+ __ And(t3, t0, Operand(kStringEncodingMask));
+ __ Branch(&non_ascii_string_add_flat_result, eq, t3, Operand(zero_reg));
+
+ // Both strings are sequential ASCII strings. We also know that they are
+ // short (since the sum of the lengths is less than kMinNonFlatLength).
+ // t2: length of resulting flat string
+ __ AllocateAsciiString(t3, t2, t0, t1, t4, &string_add_runtime);
+ // Locate first character of result.
+ __ Addu(t2, t3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ Addu(a0, a0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // a0: first character of first string.
+ // a1: second string.
+ // a2: length of first string.
+ // a3: length of second string.
+ // t2: first character of result.
+ // t3: result string.
+ StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, true);
+
+ // Load second argument and locate first character.
+ __ Addu(a1, a1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // a1: first character of second string.
+ // a3: length of second string.
+ // t2: next character of result.
+ // t3: result string.
+ StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
+ __ mov(v0, t3);
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&non_ascii_string_add_flat_result);
+ // Both strings are sequential two byte strings.
+ // a0: first string.
+ // a1: second string.
+ // a2: length of first string.
+ // a3: length of second string.
+ // t2: sum of length of strings.
+ __ AllocateTwoByteString(t3, t2, t0, t1, t4, &string_add_runtime);
+ // a0: first string.
+ // a1: second string.
+ // a2: length of first string.
+ // a3: length of second string.
+ // t3: result string.
+
+ // Locate first character of result.
+ __ Addu(t2, t3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ Addu(a0, a0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // a0: first character of first string.
+ // a1: second string.
+ // a2: length of first string.
+ // a3: length of second string.
+ // t2: first character of result.
+ // t3: result string.
+ StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, false);
+
+ // Locate first character of second argument.
+ __ Addu(a1, a1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ // a1: first character of second string.
+ // a3: length of second string.
+ // t2: next character of result (after copy of first string).
+ // t3: result string.
+ StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
+
+ __ mov(v0, t3);
+ __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ // Just jump to runtime to add the two strings.
+ __ bind(&string_add_runtime);
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+
+ if (call_builtin.is_linked()) {
+ __ bind(&call_builtin);
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ }
+}
+
+
+void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* slow) {
+ // First check if the argument is already a string.
+ Label not_string, done;
+ __ JumpIfSmi(arg, &not_string);
+ __ GetObjectType(arg, scratch1, scratch1);
+ __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
+
+ // Check the number to string cache.
+ Label not_cached;
+ __ bind(&not_string);
+ // Puts the cached result into scratch1.
+ NumberToStringStub::GenerateLookupNumberStringCache(masm,
+ arg,
+ scratch1,
+ scratch2,
+ scratch3,
+ scratch4,
+ false,
+ &not_cached);
+ __ mov(arg, scratch1);
+ __ sw(arg, MemOperand(sp, stack_offset));
+ __ jmp(&done);
+
+ // Check if the argument is a safe string wrapper.
+ __ bind(&not_cached);
+ __ JumpIfSmi(arg, slow);
+ __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
+ __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
+ __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
+ __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ __ And(scratch2, scratch2, scratch4);
+ __ Branch(slow, ne, scratch2, Operand(scratch4));
+ __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
+ __ sw(arg, MemOperand(sp, stack_offset));
+
+ __ bind(&done);
+}
+
+
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::SMIS);
+ Label miss;
+ __ Or(a2, a1, a0);
+ __ JumpIfNotSmi(a2, &miss);
+
+ if (GetCondition() == eq) {
+ // For equality we do not care about the sign of the result.
+ __ Subu(v0, a0, a1);
+ } else {
+ // Untag before subtracting to avoid handling overflow.
+ __ SmiUntag(a1);
+ __ SmiUntag(a0);
+ __ Subu(v0, a1, a0);
+ }
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+
+ Label generic_stub;
+ Label unordered;
+ Label miss;
+ __ And(a2, a1, Operand(a0));
+ __ JumpIfSmi(a2, &generic_stub);
+
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
+
+ // Inlining the double comparison and falling back to the general compare
+ // stub if NaN is involved or FPU is unsupported.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+
+ // Load left and right operand.
+ __ Subu(a2, a1, Operand(kHeapObjectTag));
+ __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Subu(a2, a0, Operand(kHeapObjectTag));
+ __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+
+ Label fpu_eq, fpu_lt, fpu_gt;
+ // Compare operands (test if unordered).
+ __ c(UN, D, f0, f2);
+ // Don't base result on status bits when a NaN is involved.
+ __ bc1t(&unordered);
+ __ nop();
+
+ // Test if equal.
+ __ c(EQ, D, f0, f2);
+ __ bc1t(&fpu_eq);
+ __ nop();
+
+ // Test if unordered or less (unordered case is already handled).
+ __ c(ULT, D, f0, f2);
+ __ bc1t(&fpu_lt);
+ __ nop();
+
+ // Otherwise it's greater.
+ __ bc1f(&fpu_gt);
+ __ nop();
+
+ // Return a result of -1, 0, or 1.
+ __ bind(&fpu_eq);
+ __ li(v0, Operand(EQUAL));
+ __ Ret();
+
+ __ bind(&fpu_lt);
+ __ li(v0, Operand(LESS));
+ __ Ret();
+
+ __ bind(&fpu_gt);
+ __ li(v0, Operand(GREATER));
+ __ Ret();
+
+ __ bind(&unordered);
+ }
+
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
+ __ bind(&generic_stub);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::SYMBOLS);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = a1;
+ Register right = a0;
+ Register tmp1 = a2;
+ Register tmp2 = a3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are symbols.
+ __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ And(tmp1, tmp1, Operand(tmp2));
+ __ And(tmp1, tmp1, kIsSymbolMask);
+ __ Branch(&miss, eq, tmp1, Operand(zero_reg));
+ // Make sure a0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(a0));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(v0, right);
+ // Symbols are compared by identity.
+ __ Ret(ne, left, Operand(right));
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::STRINGS);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = a1;
+ Register right = a0;
+ Register tmp1 = a2;
+ Register tmp2 = a3;
+ Register tmp3 = t0;
+ Register tmp4 = t1;
+ Register tmp5 = t2;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are strings. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ STATIC_ASSERT(kNotStringTag != 0);
+ __ Or(tmp3, tmp1, tmp2);
+ __ And(tmp5, tmp3, Operand(kIsNotStringMask));
+ __ Branch(&miss, ne, tmp5, Operand(zero_reg));
+
+ // Fast check for identical strings.
+ Label left_ne_right;
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
+ __ mov(v0, zero_reg); // In the delay slot.
+ __ Ret();
+ __ bind(&left_ne_right);
+
+ // Handle not identical strings.
+
+ // Check that both strings are symbols. If they are, we're done
+ // because we already know they are not identical.
+ ASSERT(GetCondition() == eq);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ And(tmp3, tmp1, Operand(tmp2));
+ __ And(tmp5, tmp3, Operand(kIsSymbolMask));
+ Label is_symbol;
+ __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
+ __ mov(v0, a0); // In the delay slot.
+ // Make sure a0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(a0));
+ __ Ret();
+ __ bind(&is_symbol);
+
+ // Check that both strings are sequential ASCII.
+ Label runtime;
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
+ &runtime);
+
+ // Compare flat ASCII strings. Returns when done.
+ StringCompareStub::GenerateFlatAsciiStringEquals(
+ masm, left, right, tmp1, tmp2, tmp3);
+
+ // Handle more complex cases in runtime.
+ __ bind(&runtime);
+ __ Push(left, right);
+ __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::OBJECTS);
+ Label miss;
+ __ And(a2, a1, Operand(a0));
+ __ JumpIfSmi(a2, &miss);
+
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+
+ ASSERT(GetCondition() == eq);
+ __ Subu(v0, a0, Operand(a1));
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+ __ Push(a1, a0);
+ __ push(ra);
+
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
+ masm->isolate());
+ __ EnterInternalFrame();
+ __ Push(a1, a0);
+ __ li(t0, Operand(Smi::FromInt(op_)));
+ __ push(t0);
+ __ CallExternalReference(miss, 3);
+ __ LeaveInternalFrame();
+ // Compute the entry point of the rewritten stub.
+ __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Restore registers.
+ __ pop(ra);
+ __ pop(a0);
+ __ pop(a1);
+ __ Jump(a2);
+}
+
+
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+ // No need to pop or drop anything, LeaveExitFrame will restore the old
+ // stack, thus dropping the allocated space for the return value.
+ // The saved ra is after the reserved stack space for the 4 args.
+ __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
+
+ if (FLAG_debug_code && EnableSlowAsserts()) {
+ // In case of an error the return address may point to a memory area
+ // filled with kZapValue by the GC.
+ // Dereference the address and check for this.
+ __ lw(t0, MemOperand(t9));
+ __ Assert(ne, "Received invalid return address.", t0,
+ Operand(reinterpret_cast<uint32_t>(kZapValue)));
+ }
+ __ Jump(t9);
+}
+
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+ ExternalReference function) {
+ __ li(t9, Operand(function));
+ this->GenerateCall(masm, t9);
+}
+
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+ Register target) {
+ __ Move(t9, target);
+ __ AssertStackIsAligned();
+ // Allocate space for arg slots.
+ __ Subu(sp, sp, kCArgsSlotsSize);
+
+ // Block the trampoline pool through the whole function to make sure the
+ // number of generated instructions is constant.
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+
+ // We need to get the current 'pc' value, which is not available on MIPS.
+ Label find_ra;
+ masm->bal(&find_ra); // ra = pc + 8.
+ masm->nop(); // Branch delay slot nop.
+ masm->bind(&find_ra);
+
+ const int kNumInstructionsToJump = 6;
+ masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
+ // Push return address (accessible to GC through exit frame pc).
+ // This spot for ra was reserved in EnterExitFrame.
+ masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
+ masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
+ RelocInfo::CODE_TARGET), true);
+ // Call the function.
+ masm->Jump(t9);
+ // Make sure the stored 'ra' points to this position.
+ ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
+}
+
+
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ String* name,
+ Register scratch0) {
+// If names of slots in range from 1 to kProbes - 1 for the hash value are
+ // not equal to the name and kProbes-th slot is not used (its name is the
+ // undefined value), it guarantees the hash table doesn't contain the
+ // property. It's true even if some slots represent deleted properties
+ // (their names are the null value).
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // scratch0 points to properties hash.
+ // Compute the masked index: (hash + i + i * i) & mask.
+ Register index = scratch0;
+ // Capacity is smi 2^n.
+ __ lw(index, FieldMemOperand(properties, kCapacityOffset));
+ __ Subu(index, index, Operand(1));
+ __ And(index, index, Operand(
+ Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ // index *= 3.
+ __ mov(at, index);
+ __ sll(index, index, 1);
+ __ Addu(index, index, at);
+
+ Register entity_name = scratch0;
+ // Having undefined at this place means the name is not contained.
+ ASSERT_EQ(kSmiTagSize, 1);
+ Register tmp = properties;
+
+ __ sll(scratch0, index, 1);
+ __ Addu(tmp, properties, scratch0);
+ __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+ ASSERT(!tmp.is(entity_name));
+ __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
+ __ Branch(done, eq, entity_name, Operand(tmp));
+
+ if (i != kInlinedProbes - 1) {
+ // Stop if found the property.
+ __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
+
+ // Check if the entry name is not a symbol.
+ __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ lbu(entity_name,
+ FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ And(scratch0, entity_name, Operand(kIsSymbolMask));
+ __ Branch(miss, eq, scratch0, Operand(zero_reg));
+
+ // Restore the properties.
+ __ lw(properties,
+ FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ }
+ }
+
+ const int spill_mask =
+ (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
+ a2.bit() | a1.bit() | a0.bit());
+
+ __ MultiPush(spill_mask);
+ __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ li(a1, Operand(Handle<String>(name)));
+ StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+ MaybeObject* result = masm->TryCallStub(&stub);
+ if (result->IsFailure()) return result;
+ __ MultiPop(spill_mask);
+
+ __ Branch(done, eq, v0, Operand(zero_reg));
+ __ Branch(miss, ne, v0, Operand(zero_reg));
+ return result;
+}
+
+
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+// If lookup was successful |scratch2| will be equal to elements + 4 * index.
+void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register scratch1,
+ Register scratch2) {
+ // Assert that name contains a string.
+ if (FLAG_debug_code) __ AbortIfNotString(name);
+
+ // Compute the capacity mask.
+ __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
+ __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
+ __ Subu(scratch1, scratch1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up. Measurements done on Gmail indicate that 2 probes
+ // cover ~93% of loads from dictionaries.
+ for (int i = 0; i < kInlinedProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(StringDictionary::GetProbeOffset(i) <
+ 1 << (32 - String::kHashFieldOffset));
+ __ Addu(scratch2, scratch2, Operand(
+ StringDictionary::GetProbeOffset(i) << String::kHashShift));
+ }
+ __ srl(scratch2, scratch2, String::kHashShift);
+ __ And(scratch2, scratch1, scratch2);
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ // scratch2 = scratch2 * 3.
+
+ __ mov(at, scratch2);
+ __ sll(scratch2, scratch2, 1);
+ __ Addu(scratch2, scratch2, at);
+
+ // Check if the key is identical to the name.
+ __ sll(at, scratch2, 2);
+ __ Addu(scratch2, elements, at);
+ __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
+ __ Branch(done, eq, name, Operand(at));
+ }
+
+ const int spill_mask =
+ (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
+ a3.bit() | a2.bit() | a1.bit() | a0.bit()) &
+ ~(scratch1.bit() | scratch2.bit());
+
+ __ MultiPush(spill_mask);
+ __ Move(a0, elements);
+ __ Move(a1, name);
+ StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
+ __ CallStub(&stub);
+ __ mov(scratch2, a2);
+ __ MultiPop(spill_mask);
+
+ __ Branch(done, ne, v0, Operand(zero_reg));
+ __ Branch(miss, eq, v0, Operand(zero_reg));
+}
+
+
+void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // Registers:
+ // result: StringDictionary to probe
+ // a1: key
+ // : StringDictionary to probe.
+ // index_: will hold an index of entry if lookup is successful.
+ // might alias with result_.
+ // Returns:
+ // result_ is zero if lookup failed, non zero otherwise.
+
+ Register result = v0;
+ Register dictionary = a0;
+ Register key = a1;
+ Register index = a2;
+ Register mask = a3;
+ Register hash = t0;
+ Register undefined = t1;
+ Register entry_key = t2;
+
+ Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+ __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
+ __ sra(mask, mask, kSmiTagSize);
+ __ Subu(mask, mask, Operand(1));
+
+ __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
+
+ __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+ for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ // Capacity is smi 2^n.
+ if (i > 0) {
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(StringDictionary::GetProbeOffset(i) <
+ 1 << (32 - String::kHashFieldOffset));
+ __ Addu(index, hash, Operand(
+ StringDictionary::GetProbeOffset(i) << String::kHashShift));
+ } else {
+ __ mov(index, hash);
+ }
+ __ srl(index, index, String::kHashShift);
+ __ And(index, mask, index);
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ // index *= 3.
+ __ mov(at, index);
+ __ sll(index, index, 1);
+ __ Addu(index, index, at);
+
+
+ ASSERT_EQ(kSmiTagSize, 1);
+ __ sll(index, index, 2);
+ __ Addu(index, index, dictionary);
+ __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+ // Having undefined at this place means the name is not contained.
+ __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
+
+ // Stop if found the property.
+ __ Branch(&in_dictionary, eq, entry_key, Operand(key));
+
+ if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+ // Check if the entry name is not a symbol.
+ __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+ __ lbu(entry_key,
+ FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+ __ And(result, entry_key, Operand(kIsSymbolMask));
+ __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
+ }
+ }
+
+ __ bind(&maybe_in_dictionary);
+ // If we are doing negative lookup then probing failure should be
+ // treated as a lookup success. For positive lookup probing failure
+ // should be treated as lookup failure.
+ if (mode_ == POSITIVE_LOOKUP) {
+ __ mov(result, zero_reg);
+ __ Ret();
+ }
+
+ __ bind(&in_dictionary);
+ __ li(result, 1);
+ __ Ret();
+
+ __ bind(&not_in_dictionary);
+ __ mov(result, zero_reg);
+ __ Ret();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
new file mode 100644
index 000000000..6c70bdd70
--- /dev/null
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -0,0 +1,660 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_CODE_STUBS_ARM_H_
+#define V8_MIPS_CODE_STUBS_ARM_H_
+
+#include "ic-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+// Compute a transcendental math function natively, or call the
+// TranscendentalCache runtime function.
+class TranscendentalCacheStub: public CodeStub {
+ public:
+ enum ArgumentType {
+ TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
+ UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
+ };
+
+ TranscendentalCacheStub(TranscendentalCache::Type type,
+ ArgumentType argument_type)
+ : type_(type), argument_type_(argument_type) { }
+ void Generate(MacroAssembler* masm);
+ private:
+ TranscendentalCache::Type type_;
+ ArgumentType argument_type_;
+ void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
+
+ Major MajorKey() { return TranscendentalCache; }
+ int MinorKey() { return type_ | argument_type_; }
+ Runtime::FunctionId RuntimeFunction();
+};
+
+
+class UnaryOpStub: public CodeStub {
+ public:
+ UnaryOpStub(Token::Value op,
+ UnaryOverwriteMode mode,
+ UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
+ : op_(op),
+ mode_(mode),
+ operand_type_(operand_type),
+ name_(NULL) {
+ }
+
+ private:
+ Token::Value op_;
+ UnaryOverwriteMode mode_;
+
+ // Operand type information determined at runtime.
+ UnaryOpIC::TypeInfo operand_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ UnaryOpIC::GetName(operand_type_));
+ }
+#endif
+
+ class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+ class OpBits: public BitField<Token::Value, 1, 7> {};
+ class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
+
+ Major MajorKey() { return UnaryOp; }
+ int MinorKey() {
+ return ModeBits::encode(mode_)
+ | OpBits::encode(op_)
+ | OperandTypeInfoBits::encode(operand_type_);
+ }
+
+ // Note: A lot of the helper functions below will vanish when we use virtual
+ // function instead of switch more often.
+ void Generate(MacroAssembler* masm);
+
+ void GenerateTypeTransition(MacroAssembler* masm);
+
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateSmiStubSub(MacroAssembler* masm);
+ void GenerateSmiStubBitNot(MacroAssembler* masm);
+ void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
+ void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
+
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateHeapNumberStubSub(MacroAssembler* masm);
+ void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+ void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+ void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
+
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateGenericStubSub(MacroAssembler* masm);
+ void GenerateGenericStubBitNot(MacroAssembler* masm);
+ void GenerateGenericCodeFallback(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return UnaryOpIC::ToState(operand_type_);
+ }
+
+ virtual void FinishCode(Code* code) {
+ code->set_unary_op_type(operand_type_);
+ }
+};
+
+
+class BinaryOpStub: public CodeStub {
+ public:
+ BinaryOpStub(Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ operands_type_(BinaryOpIC::UNINITIALIZED),
+ result_type_(BinaryOpIC::UNINITIALIZED),
+ name_(NULL) {
+ use_fpu_ = CpuFeatures::IsSupported(FPU);
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ BinaryOpStub(
+ int key,
+ BinaryOpIC::TypeInfo operands_type,
+ BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ use_fpu_(FPUBits::decode(key)),
+ operands_type_(operands_type),
+ result_type_(result_type),
+ name_(NULL) { }
+
+ private:
+ enum SmiCodeGenerateHeapNumberResults {
+ ALLOW_HEAPNUMBER_RESULTS,
+ NO_HEAPNUMBER_RESULTS
+ };
+
+ Token::Value op_;
+ OverwriteMode mode_;
+ bool use_fpu_;
+
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo operands_type_;
+ BinaryOpIC::TypeInfo result_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("BinaryOpStub %d (op %s), "
+ "(mode %d, runtime_type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ BinaryOpIC::GetName(operands_type_));
+ }
+#endif
+
+ // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class FPUBits: public BitField<bool, 9, 1> {};
+ class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+ class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
+
+ Major MajorKey() { return BinaryOp; }
+ int MinorKey() {
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FPUBits::encode(use_fpu_)
+ | OperandTypeInfoBits::encode(operands_type_)
+ | ResultTypeInfoBits::encode(result_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+ void GenerateSmiSmiOperation(MacroAssembler* masm);
+ void GenerateFPOperation(MacroAssembler* masm,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required);
+ void GenerateSmiCode(MacroAssembler* masm,
+ Label* use_runtime,
+ Label* gc_required,
+ SmiCodeGenerateHeapNumberResults heapnumber_results);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateUninitializedStub(MacroAssembler* masm);
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateInt32Stub(MacroAssembler* masm);
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateOddballStub(MacroAssembler* masm);
+ void GenerateStringStub(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateAddStrings(MacroAssembler* masm);
+ void GenerateCallRuntime(MacroAssembler* masm);
+
+ void GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+ void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(operands_type_);
+ }
+
+ virtual void FinishCode(Code* code) {
+ code->set_binary_op_type(operands_type_);
+ code->set_binary_op_result_type(result_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+ NO_STRING_ADD_FLAGS = 0,
+ // Omit left string check in stub (left is definitely a string).
+ NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+ // Omit right string check in stub (right is definitely a string).
+ NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+ // Omit both string checks in stub.
+ NO_STRING_CHECK_IN_STUB =
+ NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
+};
+
+
+class StringAddStub: public CodeStub {
+ public:
+ explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
+
+ private:
+ Major MajorKey() { return StringAdd; }
+ int MinorKey() { return flags_; }
+
+ void Generate(MacroAssembler* masm);
+
+ void GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* slow);
+
+ const StringAddFlags flags_;
+};
+
+
+class SubStringStub: public CodeStub {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public CodeStub {
+ public:
+ StringCompareStub() { }
+
+ // Compare two flat ASCII strings and returns result in v0.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Compares two flat ASCII strings for equality and returns result
+ // in v0.
+ static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ private:
+ virtual Major MajorKey() { return StringCompare; }
+ virtual int MinorKey() { return 0; }
+ virtual void Generate(MacroAssembler* masm);
+
+ static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* chars_not_equal);
+};
+
+
+// This stub can convert a signed int32 to a heap number (double). It does
+// not work for int32s that are in Smi range! No GC occurs during this stub
+// so you don't have to set up the frame.
+class WriteInt32ToHeapNumberStub : public CodeStub {
+ public:
+ WriteInt32ToHeapNumberStub(Register the_int,
+ Register the_heap_number,
+ Register scratch,
+ Register scratch2)
+ : the_int_(the_int),
+ the_heap_number_(the_heap_number),
+ scratch_(scratch),
+ sign_(scratch2) { }
+
+ private:
+ Register the_int_;
+ Register the_heap_number_;
+ Register scratch_;
+ Register sign_;
+
+ // Minor key encoding in 16 bits.
+ class IntRegisterBits: public BitField<int, 0, 4> {};
+ class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
+ class ScratchRegisterBits: public BitField<int, 8, 4> {};
+
+ Major MajorKey() { return WriteInt32ToHeapNumber; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return IntRegisterBits::encode(the_int_.code())
+ | HeapNumberRegisterBits::encode(the_heap_number_.code())
+ | ScratchRegisterBits::encode(scratch_.code());
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
+
+#ifdef DEBUG
+ void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
+#endif
+};
+
+
+class NumberToStringStub: public CodeStub {
+ public:
+ NumberToStringStub() { }
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ static void GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ bool object_is_smi,
+ Label* not_found);
+
+ private:
+ Major MajorKey() { return NumberToString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "NumberToStringStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("NumberToStringStub\n");
+ }
+#endif
+};
+
+
+// Enter C code from generated RegExp code in a way that allows
+// the C code to fix the return address in case of a GC.
+// Currently only needed on ARM and MIPS.
+class RegExpCEntryStub: public CodeStub {
+ public:
+ RegExpCEntryStub() {}
+ virtual ~RegExpCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return RegExpCEntry; }
+ int MinorKey() { return 0; }
+
+ bool NeedsImmovableCode() { return true; }
+
+ const char* GetName() { return "RegExpCEntryStub"; }
+};
+
+// Trampoline stub to call into native code. To call safely into native code
+// in the presence of compacting GC (which can move code objects) we need to
+// keep the code which called into native pinned in the memory. Currently the
+// simplest approach is to generate such stub early enough so it can never be
+// moved by GC
+class DirectCEntryStub: public CodeStub {
+ public:
+ DirectCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+ void GenerateCall(MacroAssembler* masm,
+ ExternalReference function);
+ void GenerateCall(MacroAssembler* masm, Register target);
+
+ private:
+ Major MajorKey() { return DirectCEntry; }
+ int MinorKey() { return 0; }
+
+ bool NeedsImmovableCode() { return true; }
+
+ const char* GetName() { return "DirectCEntryStub"; }
+};
+
+class FloatingPointHelper : public AllStatic {
+ public:
+ enum Destination {
+ kFPURegisters,
+ kCoreRegisters
+ };
+
+
+ // Loads smis from a0 and a1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+ // is floating point registers FPU must be supported. If core registers are
+ // requested when FPU is supported f12 and f14 will be scratched.
+ static void LoadSmis(MacroAssembler* masm,
+ Destination destination,
+ Register scratch1,
+ Register scratch2);
+
+ // Loads objects from a0 and a1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+ // is floating point registers FPU must be supported. If core registers are
+ // requested when FPU is supported f12 and f14 will still be scratched. If
+ // either a0 or a1 is not a number (not smi and not heap number object) the
+ // not_number label is jumped to with a0 and a1 intact.
+ static void LoadOperands(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+
+ // Convert the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1.
+ static void ConvertNumberToInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ FPURegister double_scratch,
+ Label* not_int32);
+
+ // Converts the integer (untagged smi) in |int_scratch| to a double, storing
+ // the result either in |double_dst| or |dst2:dst1|, depending on
+ // |destination|.
+ // Warning: The value in |int_scratch| will be changed in the process!
+ static void ConvertIntToDouble(MacroAssembler* masm,
+ Register int_scratch,
+ Destination destination,
+ FPURegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register scratch2,
+ FPURegister single_scratch);
+
+ // Load the number from object into double_dst in the double format.
+ // Control will jump to not_int32 if the value cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be loaded.
+ static void LoadNumberAsInt32Double(MacroAssembler* masm,
+ Register object,
+ Destination destination,
+ FPURegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister single_scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst as a 32-bit integer.
+ // Control will jump to not_int32 if the object cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be converted.
+ // scratch3 is not used when FPU is supported.
+ static void LoadNumberAsInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ FPURegister double_scratch,
+ Label* not_int32);
+
+ // Generate non FPU code to check if a double can be exactly represented by a
+ // 32-bit integer. This does not check for 0 or -0, which need
+ // to be checked for separately.
+ // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
+ // through otherwise.
+ // src1 and src2 will be cloberred.
+ //
+ // Expected input:
+ // - src1: higher (exponent) part of the double value.
+ // - src2: lower (mantissa) part of the double value.
+ // Output status:
+ // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
+ // - src2: contains 1.
+ // - other registers are clobbered.
+ static void DoubleIs32BitInteger(MacroAssembler* masm,
+ Register src1,
+ Register src2,
+ Register dst,
+ Register scratch,
+ Label* not_int32);
+
+ // Generates code to call a C function to do a double operation using core
+ // registers. (Used when FPU is not supported.)
+ // This code never falls through, but returns with a heap number containing
+ // the result in v0.
+ // Register heapnumber_result must be a heap number in which the
+ // result of the operation will be stored.
+ // Requires the following layout on entry:
+ // a0: Left value (least significant part of mantissa).
+ // a1: Left value (sign, exponent, top of mantissa).
+ // a2: Right value (least significant part of mantissa).
+ // a3: Right value (sign, exponent, top of mantissa).
+ static void CallCCodeForDoubleOperation(MacroAssembler* masm,
+ Token::Value op,
+ Register heap_number_result,
+ Register scratch);
+
+ private:
+ static void LoadNumber(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register object,
+ FPURegister dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+};
+
+
+class StringDictionaryLookupStub: public CodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+
+ void Generate(MacroAssembler* masm);
+
+ MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ String* name,
+ Register scratch0);
+
+ static void GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1);
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("StringDictionaryLookupStub\n");
+ }
+#endif
+
+ Major MajorKey() { return StringDictionaryNegativeLookup; }
+
+ int MinorKey() {
+ return LookupModeBits::encode(mode_);
+ }
+
+ class LookupModeBits: public BitField<LookupMode, 0, 1> {};
+
+ LookupMode mode_;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_CODE_STUBS_ARM_H_
diff --git a/deps/v8/src/mips/codegen-mips-inl.h b/deps/v8/src/mips/codegen-mips-inl.h
deleted file mode 100644
index 3a511b80f..000000000
--- a/deps/v8/src/mips/codegen-mips-inl.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_CODEGEN_MIPS_INL_H_
-#define V8_MIPS_CODEGEN_MIPS_INL_H_
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() {
- __ b(&entry_label_);
- __ nop();
-}
-
-
-void Reference::GetValueAndSpill() {
- GetValue();
-}
-
-
-void CodeGenerator::VisitAndSpill(Statement* statement) {
- Visit(statement);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
- VisitStatements(statements);
-}
-
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
- Load(expression);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_CODEGEN_MIPS_INL_H_
-
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 79801f07b..4400b643a 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,1413 +25,28 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
#include "v8.h"
#if defined(V8_TARGET_ARCH_MIPS)
-#include "bootstrapper.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "parser.h"
-#include "register-allocator-inl.h"
-#include "runtime.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-
-
+#include "codegen.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
-
-
-
-// -----------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-
-void DeferredCode::SaveRegisters() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void DeferredCode::RestoreRegisters() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// -----------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
- : owner_(owner),
- true_target_(NULL),
- false_target_(NULL),
- previous_(NULL) {
- owner_->set_state(this);
-}
-
-
-CodeGenState::CodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target)
- : owner_(owner),
- true_target_(true_target),
- false_target_(false_target),
- previous_(owner->state()) {
- owner_->set_state(this);
-}
-
-
-CodeGenState::~CodeGenState() {
- ASSERT(owner_->state() == this);
- owner_->set_state(previous_);
-}
-
-
-// -----------------------------------------------------------------------------
-// CodeGenerator implementation
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
- : deferred_(8),
- masm_(masm),
- frame_(NULL),
- allocator_(NULL),
- cc_reg_(cc_always),
- state_(NULL),
- function_return_is_shadowed_(false) {
-}
-
-
-// Calling conventions:
-// fp: caller's frame pointer
-// sp: stack pointer
-// a1: called JS function
-// cp: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
- // Record the position for debugging purposes.
- CodeForFunctionPosition(info->function());
-
- // Initialize state.
- info_ = info;
- ASSERT(allocator_ == NULL);
- RegisterAllocator register_allocator(this);
- allocator_ = &register_allocator;
- ASSERT(frame_ == NULL);
- frame_ = new VirtualFrame();
- cc_reg_ = cc_always;
-
- {
- CodeGenState state(this);
-
- // Registers:
- // a1: called JS function
- // ra: return address
- // fp: caller's frame pointer
- // sp: stack pointer
- // cp: callee's context
- //
- // Stack:
- // arguments
- // receiver
-
- frame_->Enter();
-
- // Allocate space for locals and initialize them.
- frame_->AllocateStackSlots();
-
- // Initialize the function return target.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
- function_return_is_shadowed_ = false;
-
- VirtualFrame::SpilledScope spilled_scope;
- if (scope()->num_heap_slots() > 0) {
- UNIMPLEMENTED_MIPS();
- }
-
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
-
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope()->num_parameters(); i++) {
- UNIMPLEMENTED_MIPS();
- }
- }
-
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in the
- // context.
- if (scope()->arguments() != NULL) {
- UNIMPLEMENTED_MIPS();
- }
-
- // Generate code to 'execute' declarations and initialize functions
- // (source elements). In case of an illegal redeclaration we need to
- // handle that instead of processing the declarations.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ illegal redeclarations");
- scope()->VisitIllegalRedeclaration(this);
- } else {
- Comment cmnt(masm_, "[ declarations");
- ProcessDeclarations(scope()->declarations());
- // Bail out if a stack-overflow exception occurred when processing
- // declarations.
- if (HasStackOverflow()) return;
- }
-
- if (FLAG_trace) {
- UNIMPLEMENTED_MIPS();
- }
-
- // Compile the body of the function in a vanilla state. Don't
- // bother compiling all the code if the scope has an illegal
- // redeclaration.
- if (!scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
- bool is_builtin = Bootstrapper::IsActive();
- bool should_trace =
- is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
- if (should_trace) {
- UNIMPLEMENTED_MIPS();
- }
-#endif
- VisitStatementsAndSpill(info->function()->body());
- }
- }
-
- if (has_valid_frame() || function_return_.is_linked()) {
- if (!function_return_.is_linked()) {
- CodeForReturnPosition(info->function());
- }
- // Registers:
- // v0: result
- // sp: stack pointer
- // fp: frame pointer
- // cp: callee's context
-
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
-
- function_return_.Bind();
- if (FLAG_trace) {
- UNIMPLEMENTED_MIPS();
- }
-
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-
- masm_->mov(sp, fp);
- masm_->lw(fp, MemOperand(sp, 0));
- masm_->lw(ra, MemOperand(sp, 4));
- masm_->addiu(sp, sp, 8);
-
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
- // TODO(MIPS): Should we be able to use more than 0x1ffe parameters?
- masm_->addiu(sp, sp, (scope()->num_parameters() + 1) * kPointerSize);
- masm_->Jump(ra);
- // The Jump automatically generates a nop in the branch delay slot.
-
- // Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(kJSReturnSequenceLength,
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
- }
-
- // Code generation state must be reset.
- ASSERT(!has_cc());
- ASSERT(state_ == NULL);
- ASSERT(!function_return_is_shadowed_);
- function_return_.Unuse();
- DeleteFrame();
-
- // Process any deferred code using the register allocator.
- if (!HasStackOverflow()) {
- ProcessDeferred();
- }
-
- allocator_ = NULL;
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ LoadReference");
- Expression* e = ref->expression();
- Property* property = e->AsProperty();
- Variable* var = e->AsVariableProxy()->AsVariable();
-
- if (property != NULL) {
- UNIMPLEMENTED_MIPS();
- } else if (var != NULL) {
- // The expression is a variable proxy that does not rewrite to a
- // property. Global variables are treated as named property references.
- if (var->is_global()) {
- LoadGlobal();
- ref->set_type(Reference::NAMED);
- } else {
- ASSERT(var->slot() != NULL);
- ref->set_type(Reference::SLOT);
- }
- } else {
- UNIMPLEMENTED_MIPS();
- }
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
- VirtualFrame::SpilledScope spilled_scope;
- // Pop a reference from the stack while preserving TOS.
- Comment cmnt(masm_, "[ UnloadReference");
- int size = ref->size();
- if (size > 0) {
- frame_->EmitPop(a0);
- frame_->Drop(size);
- frame_->EmitPush(a0);
- }
- ref->set_unloaded();
-}
-
-
-MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER:
- UNIMPLEMENTED_MIPS();
- return MemOperand(no_reg, 0);
-
- case Slot::LOCAL:
- return frame_->LocalAt(index);
-
- case Slot::CONTEXT: {
- UNIMPLEMENTED_MIPS();
- return MemOperand(no_reg, 0);
- }
-
- default:
- UNREACHABLE();
- return MemOperand(no_reg, 0);
- }
-}
-
-
-// Loads a value on TOS. If it is a boolean value, the result may have been
-// (partially) translated into branches, or it may have set the condition
-// code register. If force_cc is set, the value is forced to set the
-// condition code register and no value is pushed. If the condition code
-// register was set, has_cc() is true and cc_reg_ contains the condition to
-// test for 'true'.
-void CodeGenerator::LoadCondition(Expression* x,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_cc) {
- ASSERT(!has_cc());
- int original_height = frame_->height();
-
- { CodeGenState new_state(this, true_target, false_target);
- Visit(x);
-
- // If we hit a stack overflow, we may not have actually visited
- // the expression. In that case, we ensure that we have a
- // valid-looking frame state because we will continue to generate
- // code as we unwind the C++ stack.
- //
- // It's possible to have both a stack overflow and a valid frame
- // state (eg, a subexpression overflowed, visiting it returned
- // with a dummied frame state, and visiting this expression
- // returned with a normal-looking state).
- if (HasStackOverflow() &&
- has_valid_frame() &&
- !has_cc() &&
- frame_->height() == original_height) {
- true_target->Jump();
- }
- }
- if (force_cc && frame_ != NULL && !has_cc()) {
- // Convert the TOS value to a boolean in the condition code register.
- UNIMPLEMENTED_MIPS();
- }
- ASSERT(!force_cc || !has_valid_frame() || has_cc());
- ASSERT(!has_valid_frame() ||
- (has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::Load(Expression* x) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- JumpTarget true_target;
- JumpTarget false_target;
- LoadCondition(x, &true_target, &false_target, false);
-
- if (has_cc()) {
- UNIMPLEMENTED_MIPS();
- }
-
- if (true_target.is_linked() || false_target.is_linked()) {
- UNIMPLEMENTED_MIPS();
- }
- ASSERT(has_valid_frame());
- ASSERT(!has_cc());
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadGlobal() {
- VirtualFrame::SpilledScope spilled_scope;
- __ lw(a0, GlobalObject());
- frame_->EmitPush(a0);
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- VirtualFrame::SpilledScope spilled_scope;
- if (slot->type() == Slot::LOOKUP) {
- UNIMPLEMENTED_MIPS();
- } else {
- __ lw(a0, SlotOperand(slot, a2));
- frame_->EmitPush(a0);
- if (slot->var()->mode() == Variable::CONST) {
- UNIMPLEMENTED_MIPS();
- }
- }
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- ASSERT(slot != NULL);
- if (slot->type() == Slot::LOOKUP) {
- UNIMPLEMENTED_MIPS();
- } else {
- ASSERT(!slot->var()->is_dynamic());
-
- JumpTarget exit;
- if (init_state == CONST_INIT) {
- UNIMPLEMENTED_MIPS();
- }
-
- // We must execute the store. Storing a variable must keep the
- // (new) value on the stack. This is necessary for compiling
- // assignment expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will
- // initialize consts to 'the hole' value and by doing so, end up
- // calling this code. a2 may be loaded with context; used below in
- // RecordWrite.
- frame_->EmitPop(a0);
- __ sw(a0, SlotOperand(slot, a2));
- frame_->EmitPush(a0);
- if (slot->type() == Slot::CONTEXT) {
- UNIMPLEMENTED_MIPS();
- }
- // If we definitely did not jump over the assignment, we do not need
- // to bind the exit label. Doing so can defeat peephole
- // optimization.
- if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
- exit.Bind();
- }
- }
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
- VirtualFrame::SpilledScope spilled_scope;
- for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
- VisitAndSpill(statements->at(i));
- }
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- VirtualFrame::SpilledScope spilled_scope;
- frame_->EmitPush(cp);
- __ li(t0, Operand(pairs));
- frame_->EmitPush(t0);
- __ li(t0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
- frame_->EmitPush(t0);
- frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
- // The result is discarded.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ ExpressionStatement");
- CodeForStatementPosition(node);
- Expression* expression = node->expression();
- expression->MarkAsStatement();
- LoadAndSpill(expression);
- frame_->Drop();
- ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ ReturnStatement");
-
- CodeForStatementPosition(node);
- LoadAndSpill(node->expression());
- if (function_return_is_shadowed_) {
- frame_->EmitPop(v0);
- function_return_.Jump();
- } else {
- // Pop the result from the frame and prepare the frame for
- // returning thus making it easier to merge.
- frame_->EmitPop(v0);
- frame_->PrepareForReturn();
-
- function_return_.Jump();
- }
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
-void CodeGenerator::VisitConditional(Conditional* node) {
- UNIMPLEMENTED_MIPS();
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterInternalFrame();
}
-void CodeGenerator::VisitSlot(Slot* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Slot");
- LoadFromSlot(node, typeof_state());
- ASSERT(frame_->height() == original_height + 1);
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveInternalFrame();
}
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ VariableProxy");
-
- Variable* var = node->var();
- Expression* expr = var->rewrite();
- if (expr != NULL) {
- Visit(expr);
- } else {
- ASSERT(var->is_global());
- Reference ref(this, node);
- ref.GetValueAndSpill();
- }
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Literal");
- __ li(t0, Operand(node->handle()));
- frame_->EmitPush(t0);
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Assignment");
-
- { Reference target(this, node->target());
- if (target.is_illegal()) {
- // Fool the virtual frame into thinking that we left the assignment's
- // value on the frame.
- frame_->EmitPush(zero_reg);
- ASSERT(frame_->height() == original_height + 1);
- return;
- }
-
- if (node->op() == Token::ASSIGN ||
- node->op() == Token::INIT_VAR ||
- node->op() == Token::INIT_CONST) {
- LoadAndSpill(node->value());
- } else {
- UNIMPLEMENTED_MIPS();
- }
-
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- if (var != NULL &&
- (var->mode() == Variable::CONST) &&
- node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
- // Assignment ignored - leave the value on the stack.
- } else {
- CodeForSourcePosition(node->position());
- if (node->op() == Token::INIT_CONST) {
- // Dynamic constant initializations must use the function context
- // and initialize the actual constant declared. Dynamic variable
- // initializations are simply assignments and use SetValue.
- target.SetValue(CONST_INIT);
- } else {
- target.SetValue(NOT_CONST_INIT);
- }
- }
- }
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Call");
-
- Expression* function = node->expression();
- ZoneList<Expression*>* args = node->arguments();
-
- // Standard function call.
- // Check if the function is a variable or a property.
- Variable* var = function->AsVariableProxy()->AsVariable();
- Property* property = function->AsProperty();
-
- // ------------------------------------------------------------------------
- // Fast-case: Use inline caching.
- // ---
- // According to ECMA-262, section 11.2.3, page 44, the function to call
- // must be resolved after the arguments have been evaluated. The IC code
- // automatically handles this by loading the arguments before the function
- // is resolved in cache misses (this also holds for megamorphic calls).
- // ------------------------------------------------------------------------
-
- if (var != NULL && var->is_possibly_eval()) {
- UNIMPLEMENTED_MIPS();
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is global
- // ----------------------------------
-
- int arg_count = args->length();
-
- // We need sp to be 8 bytes aligned when calling the stub.
- __ SetupAlignedCall(t0, arg_count);
-
- // Pass the global object as the receiver and let the IC stub
- // patch the stack to use the global proxy as 'this' in the
- // invoked function.
- LoadGlobal();
-
- // Load the arguments.
- for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
- }
-
- // Setup the receiver register and call the IC initialization code.
- __ li(a2, Operand(var->name()));
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
- CodeForSourcePosition(node->position());
- frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
- arg_count + 1);
- __ ReturnFromAlignedCall();
- __ lw(cp, frame_->Context());
- // Remove the function from the stack.
- frame_->EmitPush(v0);
-
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
- UNIMPLEMENTED_MIPS();
- } else if (property != NULL) {
- UNIMPLEMENTED_MIPS();
- } else {
- UNIMPLEMENTED_MIPS();
- }
-
- ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// This should generate code that performs a charCodeAt() call or returns
-// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
-// It is not yet implemented on ARM, so it always goes to the slow case.
-void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() { return true; }
-#endif
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-// -----------------------------------------------------------------------------
-// Reference support
-
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
-}
-
-
-Handle<String> Reference::GetName() {
- ASSERT(type_ == NAMED);
- Property* property = expression_->AsProperty();
- if (property == NULL) {
- // Global variable reference treated as a named property reference.
- VariableProxy* proxy = expression_->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- return proxy->name();
- } else {
- Literal* raw_name = property->key()->AsLiteral();
- ASSERT(raw_name != NULL);
- return Handle<String>(String::cast(*raw_name->handle()));
- }
-}
-
-
-void Reference::GetValue() {
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- ASSERT(!cgen_->has_cc());
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- UNIMPLEMENTED_MIPS();
- break;
- }
-
- case NAMED: {
- UNIMPLEMENTED_MIPS();
- break;
- }
-
- case KEYED: {
- UNIMPLEMENTED_MIPS();
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void Reference::SetValue(InitState init_state) {
- ASSERT(!is_illegal());
- ASSERT(!cgen_->has_cc());
- MacroAssembler* masm = cgen_->masm();
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Store to Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
- cgen_->StoreToSlot(slot, init_state);
- cgen_->UnloadReference(this);
- break;
- }
-
- case NAMED: {
- UNIMPLEMENTED_MIPS();
- break;
- }
-
- case KEYED: {
- UNIMPLEMENTED_MIPS();
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-// On entry a0 and a1 are the things to be compared. On exit v0 is 0,
-// positive or negative to indicate the result of the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x765);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- UNIMPLEMENTED_MIPS();
- return Handle<Code>::null();
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x790);
-}
-
-
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x808);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x815);
-}
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate) {
- // s0: number of arguments including receiver (C callee-saved)
- // s1: pointer to the first argument (C callee-saved)
- // s2: pointer to builtin function (C callee-saved)
-
- if (do_gc) {
- UNIMPLEMENTED_MIPS();
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth();
- if (always_allocate) {
- UNIMPLEMENTED_MIPS();
- }
-
- // Call C built-in.
- // a0 = argc, a1 = argv
- __ mov(a0, s0);
- __ mov(a1, s1);
-
- __ CallBuiltin(s2);
-
- if (always_allocate) {
- UNIMPLEMENTED_MIPS();
- }
-
- // Check for failure result.
- Label failure_returned;
- ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- __ addiu(a2, v0, 1);
- __ andi(t0, a2, kFailureTagMask);
- __ Branch(eq, &failure_returned, t0, Operand(zero_reg));
-
- // Exit C frame and return.
- // v0:v1: result
- // sp: stack pointer
- // fp: frame pointer
- __ LeaveExitFrame(mode_);
-
- // Check if we should retry or throw exception.
- Label retry;
- __ bind(&failure_returned);
- ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
- __ Branch(eq, &retry, t0, Operand(zero_reg));
-
- // Special handling of out of memory exceptions.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ Branch(eq, throw_out_of_memory_exception,
- v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
-
- // Retrieve the pending exception and clear the variable.
- __ LoadExternalReference(t0, ExternalReference::the_hole_value_location());
- __ lw(a3, MemOperand(t0));
- __ LoadExternalReference(t0,
- ExternalReference(Top::k_pending_exception_address));
- __ lw(v0, MemOperand(t0));
- __ sw(a3, MemOperand(t0));
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ Branch(eq, throw_termination_exception,
- v0, Operand(Factory::termination_exception()));
-
- // Handle normal exception.
- __ b(throw_normal_exception);
- __ nop(); // Branch delay slot nop.
-
- __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
-}
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // Called from JavaScript; parameters are on stack as if calling JS function
- // a0: number of arguments including receiver
- // a1: pointer to builtin function
- // fp: frame pointer (restored after C call)
- // sp: stack pointer (restored as callee's sp after C call)
- // cp: current context (C callee-saved)
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode_, s0, s1, s2);
-
- // s0: number of arguments (C callee-saved)
- // s1: pointer to first argument (C callee-saved)
- // s2: pointer to builtin function (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
-
- __ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
-
- __ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
-}
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
-
- // Registers:
- // a0: entry address
- // a1: function
- // a2: reveiver
- // a3: argc
- //
- // Stack:
- // 4 args slots
- // args
-
- // Save callee saved registers on the stack.
- __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit());
-
- // We build an EntryFrame.
- __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ li(t2, Operand(Smi::FromInt(marker)));
- __ li(t1, Operand(Smi::FromInt(marker)));
- __ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
- __ lw(t0, MemOperand(t0));
- __ MultiPush(t0.bit() | t1.bit() | t2.bit() | t3.bit());
-
- // Setup frame pointer for the frame to be pushed.
- __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
-
- // Load argv in s0 register.
- __ lw(s0, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize +
- StandardFrameConstants::kCArgsSlotsSize));
-
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: reveiver_pointer
- // a3: argc
- // s0: argv
- //
- // Stack:
- // caller fp |
- // function slot | entry frame
- // context slot |
- // bad fp (0xff...f) |
- // callee saved registers + ra
- // 4 args slots
- // args
-
- // Call a faked try-block that does the invoke.
- __ bal(&invoke);
- __ nop(); // Branch delay slot nop.
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- // Coming in here the fp will be invalid because the PushTryHandler below
- // sets it to 0 to signal the existence of the JSEntry frame.
- __ LoadExternalReference(t0,
- ExternalReference(Top::k_pending_exception_address));
- __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
- __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
- __ b(&exit);
- __ nop(); // Branch delay slot nop.
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the bal(&invoke) above, which
- // restores all kCalleeSaved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Clear any pending exceptions.
- __ LoadExternalReference(t0, ExternalReference::the_hole_value_location());
- __ lw(t1, MemOperand(t0));
- __ LoadExternalReference(t0,
- ExternalReference(Top::k_pending_exception_address));
- __ sw(t1, MemOperand(t0));
-
- // Invoke the function by calling through JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: reveiver_pointer
- // a3: argc
- // s0: argv
- //
- // Stack:
- // handler frame
- // entry frame
- // callee saved registers + ra
- // 4 args slots
- // args
-
- if (is_construct) {
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
- __ LoadExternalReference(t0, construct_entry);
- } else {
- ExternalReference entry(Builtins::JSEntryTrampoline);
- __ LoadExternalReference(t0, entry);
- }
- __ lw(t9, MemOperand(t0)); // deref address
-
- // Call JSEntryTrampoline.
- __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
- __ CallBuiltin(t9);
-
- // Unlink this frame from the handler chain. When reading the
- // address of the next handler, there is no need to use the address
- // displacement since the current stack pointer (sp) points directly
- // to the stack handler.
- __ lw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
- __ LoadExternalReference(t0, ExternalReference(Top::k_handler_address));
- __ sw(t1, MemOperand(t0));
-
- // This restores sp to its position before PushTryHandler.
- __ addiu(sp, sp, StackHandlerConstants::kSize);
-
- __ bind(&exit); // v0 holds result
- // Restore the top frame descriptors from the stack.
- __ Pop(t1);
- __ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
- __ sw(t1, MemOperand(t0));
-
- // Reset the stack to the callee saved registers.
- __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
-
- // Restore callee saved registers from the stack.
- __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit());
- // Return.
- __ Jump(ra);
-}
-
-
-// This stub performs an instanceof, calling the builtin function if
-// necessary. Uses a1 for the object, a0 for the function that it may
-// be an instance of (these are fetched from the stack).
-void InstanceofStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x845);
-}
-
-
-void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x851);
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x857);
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x863);
-}
-
-
-const char* CompareStub::GetName() {
- UNIMPLEMENTED_MIPS();
- return NULL; // UNIMPLEMENTED RETURN
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the two parameters in a unique 16 bit value.
- ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
- return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
-}
-
-
-#undef __
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index 66f891bd7..fecd321fa 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,153 +29,25 @@
#ifndef V8_MIPS_CODEGEN_MIPS_H_
#define V8_MIPS_CODEGEN_MIPS_H_
+
+#include "ast.h"
+#include "code-stubs-mips.h"
+#include "ic-inl.h"
+
namespace v8 {
namespace internal {
// Forward declarations
class CompilationInfo;
-class DeferredCode;
-class RegisterAllocator;
-class RegisterFile;
-enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-// -----------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-class Reference BASE_EMBEDDED {
- public:
- // The values of the types is important, see size().
- enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get = false);
- ~Reference();
-
- Expression* expression() const { return expression_; }
- Type type() const { return type_; }
- void set_type(Type value) {
- ASSERT_EQ(ILLEGAL, type_);
- type_ = value;
- }
-
- void set_unloaded() {
- ASSERT_NE(ILLEGAL, type_);
- ASSERT_NE(UNLOADED, type_);
- type_ = UNLOADED;
- }
- // The size the reference takes up on the stack.
- int size() const {
- return (type_ < SLOT) ? 0 : type_;
- }
-
- bool is_illegal() const { return type_ == ILLEGAL; }
- bool is_slot() const { return type_ == SLOT; }
- bool is_property() const { return type_ == NAMED || type_ == KEYED; }
- bool is_unloaded() const { return type_ == UNLOADED; }
-
- // Return the name. Only valid for named property references.
- Handle<String> GetName();
-
- // Generate code to push the value of the reference on top of the
- // expression stack. The reference is expected to be already on top of
- // the expression stack, and it is consumed by the call unless the
- // reference is for a compound assignment.
- // If the reference is not consumed, it is left in place under its value.
- void GetValue();
-
- // Generate code to pop a reference, push the value of the reference,
- // and then spill the stack frame.
- inline void GetValueAndSpill();
-
- // Generate code to store the value on top of the expression stack in the
- // reference. The reference is expected to be immediately below the value
- // on the expression stack. The value is stored in the location specified
- // by the reference, and is left on top of the stack, after the reference
- // is popped from beneath it (unloaded).
- void SetValue(InitState init_state);
-
- private:
- CodeGenerator* cgen_;
- Expression* expression_;
- Type type_;
- // Keep the reference on the stack after get, so it can be used by set later.
- bool persist_after_get_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the label pair). It is threaded through the
-// call stack. Constructing a state implicitly pushes it on the owning code
-// generator's stack of states, and destroying one implicitly pops it.
-
-class CodeGenState BASE_EMBEDDED {
- public:
- // Create an initial code generator state. Destroying the initial state
- // leaves the code generator with a NULL state.
- explicit CodeGenState(CodeGenerator* owner);
-
- // Create a code generator state based on a code generator's current
- // state. The new state has its own typeof state and pair of branch
- // labels.
- CodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target);
-
- // Destroy a code generator state and restore the owning code generator's
- // previous state.
- ~CodeGenState();
-
- TypeofState typeof_state() const { return typeof_state_; }
- JumpTarget* true_target() const { return true_target_; }
- JumpTarget* false_target() const { return false_target_; }
-
- private:
- // The owning code generator.
- CodeGenerator* owner_;
-
- // A flag indicating whether we are compiling the immediate subexpression
- // of a typeof expression.
- TypeofState typeof_state_;
-
- JumpTarget* true_target_;
- JumpTarget* false_target_;
-
- // The previous state of the owning code generator, restored when
- // this state is destroyed.
- CodeGenState* previous_;
-};
-
-
-
-// -----------------------------------------------------------------------------
+// -------------------------------------------------------------------------
// CodeGenerator
class CodeGenerator: public AstVisitor {
public:
- // Compilation mode. Either the compiler is used as the primary
- // compiler and needs to setup everything or the compiler is used as
- // the secondary compiler for split compilation and has to handle
- // bailouts.
- enum Mode {
- PRIMARY,
- SECONDARY
- };
-
- // Takes a function literal, generates code for it. This function should only
- // be called by compiler.cc.
- static Handle<Code> MakeCode(CompilationInfo* info);
+ static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(CompilationInfo* info);
@@ -185,6 +57,9 @@ class CodeGenerator: public AstVisitor {
Code::Flags flags,
CompilationInfo* info);
+ // Print the code after compiling it.
+ static void PrintCode(Handle<Code> code, CompilationInfo* info);
+
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
@@ -194,234 +69,26 @@ class CodeGenerator: public AstVisitor {
bool is_toplevel,
Handle<Script> script);
- static void RecordPositions(MacroAssembler* masm, int pos);
-
- // Accessors
- MacroAssembler* masm() { return masm_; }
- VirtualFrame* frame() const { return frame_; }
- inline Handle<Script> script();
-
- bool has_valid_frame() const { return frame_ != NULL; }
-
- // Set the virtual frame to be new_frame, with non-frame register
- // reference counts given by non_frame_registers. The non-frame
- // register reference counts of the old frame are returned in
- // non_frame_registers.
- void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
- void DeleteFrame();
-
- RegisterAllocator* allocator() const { return allocator_; }
-
- CodeGenState* state() { return state_; }
- void set_state(CodeGenState* state) { state_ = state; }
-
- void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
- static const int kUnknownIntValue = -1;
-
- // Number of instructions used for the JS return sequence. The constant is
- // used by the debugger to patch the JS return sequence.
- static const int kJSReturnSequenceLength = 7;
+ static bool RecordPositions(MacroAssembler* masm,
+ int pos,
+ bool right_here = false);
- // If the name is an inline runtime function call return the number of
- // expected arguments. Otherwise return -1.
- static int InlineRuntimeCallArgumentsCount(Handle<String> name);
-
- private:
- // Construction/Destruction.
- explicit CodeGenerator(MacroAssembler* masm);
-
- // Accessors.
- inline bool is_eval();
- inline Scope* scope();
-
- // Generating deferred code.
- void ProcessDeferred();
-
- // State
- bool has_cc() const { return cc_reg_ != cc_always; }
- TypeofState typeof_state() const { return state_->typeof_state(); }
- JumpTarget* true_target() const { return state_->true_target(); }
- JumpTarget* false_target() const { return state_->false_target(); }
-
- // We don't track loop nesting level on mips yet.
- int loop_nesting() const { return 0; }
-
- // Node visitors.
- void VisitStatements(ZoneList<Statement*>* statements);
-
-#define DEF_VISIT(type) \
- void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- // Visit a statement and then spill the virtual frame if control flow can
- // reach the end of the statement (ie, it does not exit via break,
- // continue, return, or throw). This function is used temporarily while
- // the code generator is being transformed.
- inline void VisitAndSpill(Statement* statement);
-
- // Visit a list of statements and then spill the virtual frame if control
- // flow can reach the end of the list.
- inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
- // Main code generation function
- void Generate(CompilationInfo* info);
-
- // The following are used by class Reference.
- void LoadReference(Reference* ref);
- void UnloadReference(Reference* ref);
-
- MemOperand ContextOperand(Register context, int index) const {
- return MemOperand(context, Context::SlotOffset(index));
+ // Constants related to patching of inlined load/store.
+ static int GetInlinedKeyedLoadInstructionsAfterPatch() {
+ // This is in correlation with the padding in MacroAssembler::Abort.
+ return FLAG_debug_code ? 45 : 20;
}
- MemOperand SlotOperand(Slot* slot, Register tmp);
+ static const int kInlinedKeyedStoreInstructionsAfterPatch = 13;
- // Expressions
- MemOperand GlobalObject() const {
- return ContextOperand(cp, Context::GLOBAL_INDEX);
+ static int GetInlinedNamedStoreInstructionsAfterPatch() {
+ ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
+ // Magic number 5: instruction count after patched map load:
+ // li: 2 (liu & ori), Branch : 2 (bne & nop), sw : 1
+ return Isolate::Current()->inlined_write_barrier_size() + 5;
}
- void LoadCondition(Expression* x,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_cc);
- void Load(Expression* x);
- void LoadGlobal();
-
- // Generate code to push the value of an expression on top of the frame
- // and then spill the frame fully to memory. This function is used
- // temporarily while the code generator is being transformed.
- inline void LoadAndSpill(Expression* expression);
-
- // Read a value from a slot and leave it on top of the expression stack.
- void LoadFromSlot(Slot* slot, TypeofState typeof_state);
- // Store the value on top of the stack to a slot.
- void StoreToSlot(Slot* slot, InitState init_state);
-
- struct InlineRuntimeLUT {
- void (CodeGenerator::*method)(ZoneList<Expression*>*);
- const char* name;
- int nargs;
- };
-
- static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
- bool CheckForInlineRuntimeCall(CallRuntime* node);
-
- static Handle<Code> ComputeLazyCompile(int argc);
- void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
- Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
-
- // Declare global variables and functions in the given array of
- // name/value pairs.
- void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Support for type checks.
- void GenerateIsSmi(ZoneList<Expression*>* args);
- void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
- void GenerateIsArray(ZoneList<Expression*>* args);
- void GenerateIsRegExp(ZoneList<Expression*>* args);
-
- // Support for construct call checks.
- void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
- // Support for arguments.length and arguments[?].
- void GenerateArgumentsLength(ZoneList<Expression*>* args);
- void GenerateArguments(ZoneList<Expression*>* args);
-
- // Support for accessing the class and value fields of an object.
- void GenerateClassOf(ZoneList<Expression*>* args);
- void GenerateValueOf(ZoneList<Expression*>* args);
- void GenerateSetValueOf(ZoneList<Expression*>* args);
-
- // Fast support for charCodeAt(n).
- void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateCharFromCode(ZoneList<Expression*>* args);
-
- // Fast support for object equality testing.
- void GenerateObjectEquals(ZoneList<Expression*>* args);
-
- void GenerateLog(ZoneList<Expression*>* args);
-
- // Fast support for Math.random().
- void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
- void GenerateIsObject(ZoneList<Expression*>* args);
- void GenerateIsSpecObject(ZoneList<Expression*>* args);
- void GenerateIsFunction(ZoneList<Expression*>* args);
- void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
- void GenerateStringAdd(ZoneList<Expression*>* args);
- void GenerateSubString(ZoneList<Expression*>* args);
- void GenerateStringCompare(ZoneList<Expression*>* args);
- void GenerateRegExpExec(ZoneList<Expression*>* args);
- void GenerateNumberToString(ZoneList<Expression*>* args);
-
- // Fast call to math functions.
- void GenerateMathPow(ZoneList<Expression*>* args);
- void GenerateMathSin(ZoneList<Expression*>* args);
- void GenerateMathCos(ZoneList<Expression*>* args);
- void GenerateMathSqrt(ZoneList<Expression*>* args);
-
- // Simple condition analysis.
- enum ConditionAnalysis {
- ALWAYS_TRUE,
- ALWAYS_FALSE,
- DONT_KNOW
- };
- ConditionAnalysis AnalyzeCondition(Expression* cond);
-
- // Methods used to indicate which source code is generated for. Source
- // positions are collected by the assembler and emitted with the relocation
- // information.
- void CodeForFunctionPosition(FunctionLiteral* fun);
- void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Statement* node);
- void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
- void CodeForSourcePosition(int pos);
-
-#ifdef DEBUG
- // True if the registers are valid for entry to a block.
- bool HasValidEntryRegisters();
-#endif
-
- bool is_eval_; // Tells whether code is generated for eval.
-
- Handle<Script> script_;
- List<DeferredCode*> deferred_;
-
- // Assembler
- MacroAssembler* masm_; // to generate code
-
- CompilationInfo* info_;
-
- // Code generation state
- VirtualFrame* frame_;
- RegisterAllocator* allocator_;
- Condition cc_reg_;
- CodeGenState* state_;
-
- // Jump targets
- BreakTarget function_return_;
-
- // True if the function return is shadowed (ie, jumping to the target
- // function_return_ does not jump to the true function return, but rather
- // to some unlinking code).
- bool function_return_is_shadowed_;
-
- static InlineRuntimeLUT kInlineRuntimeLUT[];
-
- friend class VirtualFrame;
- friend class JumpTarget;
- friend class Reference;
- friend class FastCodeGenerator;
- friend class FullCodeGenerator;
- friend class FullCodeGenSyntaxChecker;
-
+ private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
index 49502bdec..96a23338d 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,14 +31,12 @@
#include "constants-mips.h"
-namespace assembler {
-namespace mips {
-
-namespace v8i = v8::internal;
+namespace v8 {
+namespace internal {
// -----------------------------------------------------------------------------
-// Registers
+// Registers.
// These register names are defined in a way to match the native disassembler
@@ -102,20 +100,20 @@ int Registers::Number(const char* name) {
}
-const char* FPURegister::names_[kNumFPURegister] = {
+const char* FPURegisters::names_[kNumFPURegisters] = {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
"f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
};
// List of alias names which can be used when referring to MIPS registers.
-const FPURegister::RegisterAlias FPURegister::aliases_[] = {
+const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
{kInvalidRegister, NULL}
};
-const char* FPURegister::Name(int creg) {
+const char* FPURegisters::Name(int creg) {
const char* result;
- if ((0 <= creg) && (creg < kNumFPURegister)) {
+ if ((0 <= creg) && (creg < kNumFPURegisters)) {
result = names_[creg];
} else {
result = "nocreg";
@@ -124,9 +122,9 @@ const char* FPURegister::Name(int creg) {
}
-int FPURegister::Number(const char* name) {
+int FPURegisters::Number(const char* name) {
// Look through the canonical names.
- for (int i = 0; i < kNumSimuRegisters; i++) {
+ for (int i = 0; i < kNumFPURegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
@@ -147,10 +145,10 @@ int FPURegister::Number(const char* name) {
// -----------------------------------------------------------------------------
-// Instruction
+// Instructions.
-bool Instruction::IsForbiddenInBranchDelay() {
- int op = OpcodeFieldRaw();
+bool Instruction::IsForbiddenInBranchDelay() const {
+ const int op = OpcodeFieldRaw();
switch (op) {
case J:
case JAL:
@@ -189,13 +187,18 @@ bool Instruction::IsForbiddenInBranchDelay() {
}
-bool Instruction::IsLinkingInstruction() {
- int op = OpcodeFieldRaw();
+bool Instruction::IsLinkingInstruction() const {
+ const int op = OpcodeFieldRaw();
switch (op) {
case JAL:
- case BGEZAL:
- case BLTZAL:
- return true;
+ case REGIMM:
+ switch (RtFieldRaw()) {
+ case BGEZAL:
+ case BLTZAL:
+ return true;
+ default:
+ return false;
+ };
case SPECIAL:
switch (FunctionFieldRaw()) {
case JALR:
@@ -209,7 +212,7 @@ bool Instruction::IsLinkingInstruction() {
}
-bool Instruction::IsTrap() {
+bool Instruction::IsTrap() const {
if (OpcodeFieldRaw() != SPECIAL) {
return false;
} else {
@@ -264,6 +267,9 @@ Instruction::Type Instruction::InstructionType() const {
case TLTU:
case TEQ:
case TNE:
+ case MOVZ:
+ case MOVN:
+ case MOVCI:
return kRegisterType;
default:
UNREACHABLE();
@@ -272,20 +278,30 @@ Instruction::Type Instruction::InstructionType() const {
case SPECIAL2:
switch (FunctionFieldRaw()) {
case MUL:
+ case CLZ:
return kRegisterType;
default:
UNREACHABLE();
};
break;
- case COP1: // Coprocessor instructions
+ case SPECIAL3:
switch (FunctionFieldRaw()) {
- case BC1: // branch on coprocessor condition
+ case INS:
+ case EXT:
+ return kRegisterType;
+ default:
+ UNREACHABLE();
+ };
+ break;
+ case COP1: // Coprocessor instructions.
+ switch (RsFieldRawNoAssert()) {
+ case BC1: // Branch on coprocessor condition.
return kImmediateType;
default:
return kRegisterType;
};
break;
- // 16 bits Immediate type instructions. eg: addi dest, src, imm16
+ // 16 bits Immediate type instructions. eg: addi dest, src, imm16.
case REGIMM:
case BEQ:
case BNE:
@@ -304,16 +320,23 @@ Instruction::Type Instruction::InstructionType() const {
case BLEZL:
case BGTZL:
case LB:
+ case LH:
+ case LWL:
case LW:
case LBU:
+ case LHU:
+ case LWR:
case SB:
+ case SH:
+ case SWL:
case SW:
+ case SWR:
case LWC1:
case LDC1:
case SWC1:
case SDC1:
return kImmediateType;
- // 26 bits immediate type instructions. eg: j imm26
+ // 26 bits immediate type instructions. eg: j imm26.
case J:
case JAL:
return kJumpType;
@@ -323,6 +346,7 @@ Instruction::Type Instruction::InstructionType() const {
return kUnsupported;
}
-} } // namespace assembler::mips
+
+} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index d0fdf88db..6bf2570eb 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,15 +28,38 @@
#ifndef V8_MIPS_CONSTANTS_H_
#define V8_MIPS_CONSTANTS_H_
-#include "checks.h"
-
// UNIMPLEMENTED_ macro for MIPS.
+#ifdef DEBUG
#define UNIMPLEMENTED_MIPS() \
v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
__FILE__, __LINE__, __func__)
+#else
+#define UNIMPLEMENTED_MIPS()
+#endif
+
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
+#ifdef _MIPS_ARCH_MIPS32R2
+ #define mips32r2 1
+#else
+ #define mips32r2 0
+#endif
+
+
+#if(defined(__mips_hard_float) && __mips_hard_float != 0)
+// Use floating-point coprocessor instructions. This flag is raised when
+// -mhard-float is passed to the compiler.
+static const bool IsMipsSoftFloatABI = false;
+#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
+// Not using floating-point coprocessor instructions. This flag is raised when
+// -msoft-float is passed to the compiler.
+static const bool IsMipsSoftFloatABI = true;
+#else
+static const bool IsMipsSoftFloatABI = true;
+#endif
+
+
// Defines constants and accessor classes to assemble, disassemble and
// simulate MIPS32 instructions.
//
@@ -44,11 +67,11 @@
// Volume II: The MIPS32 Instruction Set
// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf.
-namespace assembler {
-namespace mips {
+namespace v8 {
+namespace internal {
// -----------------------------------------------------------------------------
-// Registers and FPURegister.
+// Registers and FPURegisters.
// Number of general purpose registers.
static const int kNumRegisters = 32;
@@ -61,9 +84,37 @@ static const int kNumSimuRegisters = 35;
static const int kPCRegister = 34;
// Number coprocessor registers.
-static const int kNumFPURegister = 32;
+static const int kNumFPURegisters = 32;
static const int kInvalidFPURegister = -1;
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+static const int kFCSRRegister = 31;
+static const int kInvalidFPUControlRegister = -1;
+static const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
+
+// FCSR constants.
+static const uint32_t kFCSRInexactFlagBit = 2;
+static const uint32_t kFCSRUnderflowFlagBit = 3;
+static const uint32_t kFCSROverflowFlagBit = 4;
+static const uint32_t kFCSRDivideByZeroFlagBit = 5;
+static const uint32_t kFCSRInvalidOpFlagBit = 6;
+
+static const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+static const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+static const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+static const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+static const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+static const uint32_t kFCSRFlagMask =
+ kFCSRInexactFlagMask |
+ kFCSRUnderflowFlagMask |
+ kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask |
+ kFCSRInvalidOpFlagMask;
+
+static const uint32_t kFCSRExceptionFlagMask =
+ kFCSRFlagMask ^ kFCSRInexactFlagMask;
+
// Helper functions for converting between register numbers and names.
class Registers {
public:
@@ -82,13 +133,12 @@ class Registers {
static const int32_t kMinValue = 0x80000000;
private:
-
static const char* names_[kNumSimuRegisters];
static const RegisterAlias aliases_[];
};
// Helper functions for converting between register numbers and names.
-class FPURegister {
+class FPURegisters {
public:
// Return the name of the register.
static const char* Name(int reg);
@@ -102,8 +152,7 @@ class FPURegister {
};
private:
-
- static const char* names_[kNumFPURegister];
+ static const char* names_[kNumFPURegisters];
static const RegisterAlias aliases_[];
};
@@ -114,8 +163,6 @@ class FPURegister {
// On MIPS all instructions are 32 bits.
typedef int32_t Instr;
-typedef unsigned char byte_;
-
// Special Software Interrupt codes when used in the presence of the MIPS
// simulator.
enum SoftwareInterruptCodes {
@@ -123,6 +170,18 @@ enum SoftwareInterruptCodes {
call_rt_redirected = 0xfffff
};
+// On MIPS Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+static const uint32_t kMaxWatchpointCode = 31;
+static const uint32_t kMaxStopCode = 127;
+STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
+
+
// ----- Fields offset and length.
static const int kOpcodeShift = 26;
static const int kOpcodeBits = 6;
@@ -136,22 +195,34 @@ static const int kSaShift = 6;
static const int kSaBits = 5;
static const int kFunctionShift = 0;
static const int kFunctionBits = 6;
+static const int kLuiShift = 16;
static const int kImm16Shift = 0;
static const int kImm16Bits = 16;
static const int kImm26Shift = 0;
static const int kImm26Bits = 26;
+static const int kImm28Shift = 0;
+static const int kImm28Bits = 28;
static const int kFsShift = 11;
static const int kFsBits = 5;
static const int kFtShift = 16;
static const int kFtBits = 5;
-
-// ----- Miscellianous useful masks.
+static const int kFdShift = 6;
+static const int kFdBits = 5;
+static const int kFCccShift = 8;
+static const int kFCccBits = 3;
+static const int kFBccShift = 18;
+static const int kFBccBits = 3;
+static const int kFBtrueShift = 16;
+static const int kFBtrueBits = 1;
+
+// ----- Miscellaneous useful masks.
// Instruction bit masks.
static const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
static const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
static const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
+static const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
static const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
static const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
static const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
@@ -159,9 +230,9 @@ static const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
static const int kFunctionFieldMask =
((1 << kFunctionBits) - 1) << kFunctionShift;
// Misc masks.
-static const int HIMask = 0xffff << 16;
-static const int LOMask = 0xffff;
-static const int signMask = 0x80000000;
+static const int kHiMask = 0xffff << 16;
+static const int kLoMask = 0xffff;
+static const int kSignMask = 0x80000000;
// ----- MIPS Opcodes and Function Fields.
@@ -187,19 +258,27 @@ enum Opcode {
XORI = ((1 << 3) + 6) << kOpcodeShift,
LUI = ((1 << 3) + 7) << kOpcodeShift,
- COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class
+ COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
BEQL = ((2 << 3) + 4) << kOpcodeShift,
BNEL = ((2 << 3) + 5) << kOpcodeShift,
BLEZL = ((2 << 3) + 6) << kOpcodeShift,
BGTZL = ((2 << 3) + 7) << kOpcodeShift,
SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
+ SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
LB = ((4 << 3) + 0) << kOpcodeShift,
+ LH = ((4 << 3) + 1) << kOpcodeShift,
+ LWL = ((4 << 3) + 2) << kOpcodeShift,
LW = ((4 << 3) + 3) << kOpcodeShift,
LBU = ((4 << 3) + 4) << kOpcodeShift,
+ LHU = ((4 << 3) + 5) << kOpcodeShift,
+ LWR = ((4 << 3) + 6) << kOpcodeShift,
SB = ((5 << 3) + 0) << kOpcodeShift,
+ SH = ((5 << 3) + 1) << kOpcodeShift,
+ SWL = ((5 << 3) + 2) << kOpcodeShift,
SW = ((5 << 3) + 3) << kOpcodeShift,
+ SWR = ((5 << 3) + 6) << kOpcodeShift,
LWC1 = ((6 << 3) + 1) << kOpcodeShift,
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
@@ -216,9 +295,12 @@ enum SecondaryField {
SLLV = ((0 << 3) + 4),
SRLV = ((0 << 3) + 6),
SRAV = ((0 << 3) + 7),
+ MOVCI = ((0 << 3) + 1),
JR = ((1 << 3) + 0),
JALR = ((1 << 3) + 1),
+ MOVZ = ((1 << 3) + 2),
+ MOVN = ((1 << 3) + 3),
BREAK = ((1 << 3) + 5),
MFHI = ((2 << 3) + 0),
@@ -250,6 +332,12 @@ enum SecondaryField {
// SPECIAL2 Encoding of Function Field.
MUL = ((0 << 3) + 2),
+ CLZ = ((4 << 3) + 0),
+ CLO = ((4 << 3) + 1),
+
+ // SPECIAL3 Encoding of Function Field.
+ EXT = ((0 << 3) + 0),
+ INS = ((0 << 3) + 4),
// REGIMM encoding of rt Field.
BLTZ = ((0 << 3) + 0) << 16,
@@ -259,8 +347,10 @@ enum SecondaryField {
// COP1 Encoding of rs Field.
MFC1 = ((0 << 3) + 0) << 21,
+ CFC1 = ((0 << 3) + 2) << 21,
MFHC1 = ((0 << 3) + 3) << 21,
MTC1 = ((0 << 3) + 4) << 21,
+ CTC1 = ((0 << 3) + 6) << 21,
MTHC1 = ((0 << 3) + 7) << 21,
BC1 = ((1 << 3) + 0) << 21,
S = ((2 << 3) + 0) << 21,
@@ -269,14 +359,46 @@ enum SecondaryField {
L = ((2 << 3) + 5) << 21,
PS = ((2 << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
+ ROUND_L_S = ((1 << 3) + 0),
+ TRUNC_L_S = ((1 << 3) + 1),
+ CEIL_L_S = ((1 << 3) + 2),
+ FLOOR_L_S = ((1 << 3) + 3),
+ ROUND_W_S = ((1 << 3) + 4),
+ TRUNC_W_S = ((1 << 3) + 5),
+ CEIL_W_S = ((1 << 3) + 6),
+ FLOOR_W_S = ((1 << 3) + 7),
CVT_D_S = ((4 << 3) + 1),
CVT_W_S = ((4 << 3) + 4),
CVT_L_S = ((4 << 3) + 5),
CVT_PS_S = ((4 << 3) + 6),
// COP1 Encoding of Function Field When rs=D.
+ ADD_D = ((0 << 3) + 0),
+ SUB_D = ((0 << 3) + 1),
+ MUL_D = ((0 << 3) + 2),
+ DIV_D = ((0 << 3) + 3),
+ SQRT_D = ((0 << 3) + 4),
+ ABS_D = ((0 << 3) + 5),
+ MOV_D = ((0 << 3) + 6),
+ NEG_D = ((0 << 3) + 7),
+ ROUND_L_D = ((1 << 3) + 0),
+ TRUNC_L_D = ((1 << 3) + 1),
+ CEIL_L_D = ((1 << 3) + 2),
+ FLOOR_L_D = ((1 << 3) + 3),
+ ROUND_W_D = ((1 << 3) + 4),
+ TRUNC_W_D = ((1 << 3) + 5),
+ CEIL_W_D = ((1 << 3) + 6),
+ FLOOR_W_D = ((1 << 3) + 7),
CVT_S_D = ((4 << 3) + 0),
CVT_W_D = ((4 << 3) + 4),
CVT_L_D = ((4 << 3) + 5),
+ C_F_D = ((6 << 3) + 0),
+ C_UN_D = ((6 << 3) + 1),
+ C_EQ_D = ((6 << 3) + 2),
+ C_UEQ_D = ((6 << 3) + 3),
+ C_OLT_D = ((6 << 3) + 4),
+ C_ULT_D = ((6 << 3) + 5),
+ C_OLE_D = ((6 << 3) + 6),
+ C_ULE_D = ((6 << 3) + 7),
// COP1 Encoding of Function Field When rs=W or L.
CVT_S_W = ((4 << 3) + 0),
CVT_D_W = ((4 << 3) + 1),
@@ -293,7 +415,7 @@ enum SecondaryField {
// the 'U' prefix is used to specify unsigned comparisons.
enum Condition {
// Any value < 0 is considered no_condition.
- no_condition = -1,
+ kNoCondition = -1,
overflow = 0,
no_overflow = 1,
@@ -314,32 +436,119 @@ enum Condition {
cc_always = 16,
- // aliases
+ // Aliases.
carry = Uless,
not_carry = Ugreater_equal,
zero = equal,
eq = equal,
not_zero = not_equal,
ne = not_equal,
+ nz = not_equal,
sign = negative,
not_sign = positive,
-
- cc_default = no_condition
+ mi = negative,
+ pl = positive,
+ hi = Ugreater,
+ ls = Uless_equal,
+ ge = greater_equal,
+ lt = less,
+ gt = greater,
+ le = less_equal,
+ hs = Ugreater_equal,
+ lo = Uless,
+ al = cc_always,
+
+ cc_default = kNoCondition
};
+
+// Returns the equivalent of !cc.
+// Negation of the default kNoCondition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc) {
+ ASSERT(cc != cc_always);
+ return static_cast<Condition>(cc ^ 1);
+}
+
+
+inline Condition ReverseCondition(Condition cc) {
+ switch (cc) {
+ case Uless:
+ return Ugreater;
+ case Ugreater:
+ return Uless;
+ case Ugreater_equal:
+ return Uless_equal;
+ case Uless_equal:
+ return Ugreater_equal;
+ case less:
+ return greater;
+ case greater:
+ return less;
+ case greater_equal:
+ return less_equal;
+ case less_equal:
+ return greater_equal;
+ default:
+ return cc;
+ };
+}
+
+
// ----- Coprocessor conditions.
enum FPUCondition {
- F, // False
- UN, // Unordered
- EQ, // Equal
- UEQ, // Unordered or Equal
- OLT, // Ordered or Less Than
- ULT, // Unordered or Less Than
- OLE, // Ordered or Less Than or Equal
- ULE // Unordered or Less Than or Equal
+ F, // False.
+ UN, // Unordered.
+ EQ, // Equal.
+ UEQ, // Unordered or Equal.
+ OLT, // Ordered or Less Than.
+ ULT, // Unordered or Less Than.
+ OLE, // Ordered or Less Than or Equal.
+ ULE // Unordered or Less Than or Equal.
};
+// -----------------------------------------------------------------------------
+// Hints.
+
+// Branch hints are not used on the MIPS. They are defined so that they can
+// appear in shared function signatures, but will be ignored in MIPS
+// implementations.
+enum Hint {
+ no_hint = 0
+};
+
+
+inline Hint NegateHint(Hint hint) {
+ return no_hint;
+}
+
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-mips.cc, as they use named
+// registers and other constants.
+
+// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
+// operations as post-increment of sp.
+extern const Instr kPopInstruction;
+// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
+extern const Instr kPushInstruction;
+// sw(r, MemOperand(sp, 0))
+extern const Instr kPushRegPattern;
+// lw(r, MemOperand(sp, 0))
+extern const Instr kPopRegPattern;
+extern const Instr kLwRegFpOffsetPattern;
+extern const Instr kSwRegFpOffsetPattern;
+extern const Instr kLwRegFpNegOffsetPattern;
+extern const Instr kSwRegFpNegOffsetPattern;
+// A mask for the Rt register for push, pop, lw, sw instructions.
+extern const Instr kRtMask;
+extern const Instr kLwSwInstrTypeMask;
+extern const Instr kLwSwInstrArgumentMask;
+extern const Instr kLwSwOffsetMask;
+
// Break 0xfffff, reserved for redirected real time call.
const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
// A nop instruction. (Encoding of sll 0 0 0).
@@ -348,10 +557,10 @@ const Instr nopInstr = 0;
class Instruction {
public:
enum {
- kInstructionSize = 4,
- kInstructionSizeLog2 = 2,
+ kInstrSize = 4,
+ kInstrSizeLog2 = 2,
// On MIPS PC cannot actually be directly accessed. We behave as if PC was
- // always the value of the current instruction being exectued.
+ // always the value of the current instruction being executed.
kPCReadOffset = 0
};
@@ -388,45 +597,64 @@ class Instruction {
// Accessors for the different named fields used in the MIPS encoding.
- inline Opcode OpcodeField() const {
+ inline Opcode OpcodeValue() const {
return static_cast<Opcode>(
Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
}
- inline int RsField() const {
+ inline int RsValue() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kRsShift + kRsBits - 1, kRsShift);
}
- inline int RtField() const {
+ inline int RtValue() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kRtShift + kRtBits - 1, kRtShift);
}
- inline int RdField() const {
+ inline int RdValue() const {
ASSERT(InstructionType() == kRegisterType);
return Bits(kRdShift + kRdBits - 1, kRdShift);
}
- inline int SaField() const {
+ inline int SaValue() const {
ASSERT(InstructionType() == kRegisterType);
return Bits(kSaShift + kSaBits - 1, kSaShift);
}
- inline int FunctionField() const {
+ inline int FunctionValue() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
}
- inline int FsField() const {
- return Bits(kFsShift + kRsBits - 1, kFsShift);
+ inline int FdValue() const {
+ return Bits(kFdShift + kFdBits - 1, kFdShift);
+ }
+
+ inline int FsValue() const {
+ return Bits(kFsShift + kFsBits - 1, kFsShift);
+ }
+
+ inline int FtValue() const {
+ return Bits(kFtShift + kFtBits - 1, kFtShift);
+ }
+
+ // Float Compare condition code instruction bits.
+ inline int FCccValue() const {
+ return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
+ }
+
+ // Float Branch condition code instruction bits.
+ inline int FBccValue() const {
+ return Bits(kFBccShift + kFBccBits - 1, kFBccShift);
}
- inline int FtField() const {
- return Bits(kFtShift + kRsBits - 1, kFtShift);
+ // Float Branch true/false instruction bit.
+ inline int FBtrueValue() const {
+ return Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
}
// Return the fields at their original place in the instruction encoding.
@@ -440,6 +668,11 @@ class Instruction {
return InstructionBits() & kRsFieldMask;
}
+ // Same as above function, but safe to call within InstructionType().
+ inline int RsFieldRawNoAssert() const {
+ return InstructionBits() & kRsFieldMask;
+ }
+
inline int RtFieldRaw() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
@@ -461,43 +694,43 @@ class Instruction {
}
// Get the secondary field according to the opcode.
- inline int SecondaryField() const {
+ inline int SecondaryValue() const {
Opcode op = OpcodeFieldRaw();
switch (op) {
case SPECIAL:
case SPECIAL2:
- return FunctionField();
+ return FunctionValue();
case COP1:
- return RsField();
+ return RsValue();
case REGIMM:
- return RtField();
+ return RtValue();
default:
return NULLSF;
}
}
- inline int32_t Imm16Field() const {
+ inline int32_t Imm16Value() const {
ASSERT(InstructionType() == kImmediateType);
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
- inline int32_t Imm26Field() const {
+ inline int32_t Imm26Value() const {
ASSERT(InstructionType() == kJumpType);
return Bits(kImm16Shift + kImm26Bits - 1, kImm26Shift);
}
// Say if the instruction should not be used in a branch delay slot.
- bool IsForbiddenInBranchDelay();
+ bool IsForbiddenInBranchDelay() const;
// Say if the instruction 'links'. eg: jal, bal.
- bool IsLinkingInstruction();
+ bool IsLinkingInstruction() const;
// Say if the instruction is a break or a trap.
- bool IsTrap();
+ bool IsTrap() const;
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way
// to allocate or create instances of class Instruction.
// Use the At(pc) function to create references to Instruction.
- static Instruction* At(byte_* pc) {
+ static Instruction* At(byte* pc) {
return reinterpret_cast<Instruction*>(pc);
}
@@ -510,16 +743,23 @@ class Instruction {
// -----------------------------------------------------------------------------
// MIPS assembly various constants.
-static const int kArgsSlotsSize = 4 * Instruction::kInstructionSize;
+
+static const int kArgsSlotsSize = 4 * Instruction::kInstrSize;
static const int kArgsSlotsNum = 4;
+// C/C++ argument slots size.
+static const int kCArgsSlotsSize = 4 * Instruction::kInstrSize;
+// JS argument slots size.
+static const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
+// Assembly builtins argument slots size.
+static const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
-static const int kBranchReturnOffset = 2 * Instruction::kInstructionSize;
+static const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
-static const int kDoubleAlignment = 2 * 8;
-static const int kDoubleAlignmentMask = kDoubleAlignmentMask - 1;
+static const int kDoubleAlignmentBits = 3;
+static const int kDoubleAlignment = (1 << kDoubleAlignmentBits);
+static const int kDoubleAlignmentMask = kDoubleAlignment - 1;
-} } // namespace assembler::mips
+} } // namespace v8::internal
#endif // #ifndef V8_MIPS_CONSTANTS_H_
-
diff --git a/deps/v8/src/mips/cpu-mips.cc b/deps/v8/src/mips/cpu-mips.cc
index 659fc01ce..26e95fb24 100644
--- a/deps/v8/src/mips/cpu-mips.cc
+++ b/deps/v8/src/mips/cpu-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -39,26 +39,48 @@
#if defined(V8_TARGET_ARCH_MIPS)
#include "cpu.h"
+#include "macro-assembler.h"
+
+#include "simulator.h" // For cache flushing.
namespace v8 {
namespace internal {
+
void CPU::Setup() {
- // Nothing to do.
+ CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+ return CpuFeatures::IsSupported(FPU);
}
+
void CPU::FlushICache(void* start, size_t size) {
-#ifdef __mips
+ // Nothing to do, flushing no instructions.
+ if (size == 0) {
+ return;
+ }
+
+#if !defined (USE_SIMULATOR)
int res;
- // See http://www.linux-mips.org/wiki/Cacheflush_Syscall
+ // See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);
if (res) {
V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
}
-#endif // #ifdef __mips
+#else // USE_SIMULATOR.
+ // Not generating mips instructions for C-code. This means that we are
+ // building a mips emulator based target. We should notify the simulator
+ // that the Icache was flushed.
+ // None of this code ends up in the snapshot so there are no issues
+ // around whether or not to generate the code when building snapshots.
+ Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
+#endif // USE_SIMULATOR.
}
@@ -68,6 +90,7 @@ void CPU::DebugBreak() {
#endif // #ifdef __mips
}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index b8ae68e39..e323c505e 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,78 +31,258 @@
#if defined(V8_TARGET_ARCH_MIPS)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
namespace v8 {
namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
+
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
void BreakLocationIterator::SetDebugBreakAtReturn() {
- UNIMPLEMENTED_MIPS();
+ // Mips return sequence:
+ // mov sp, fp
+ // lw fp, sp(0)
+ // lw ra, sp(4)
+ // addiu sp, sp, 8
+ // addiu sp, sp, N
+ // jr ra
+ // nop (in branch delay slot)
+
+ // Make sure this constant matches the number if instrucntions we emit.
+ ASSERT(Assembler::kJSReturnSequenceInstructions == 7);
+ CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
+ // li and Call pseudo-instructions emit two instructions each.
+ patcher.masm()->li(v8::internal::t9,
+ Operand(reinterpret_cast<int32_t>(
+ Isolate::Current()->debug()->debug_break_return()->entry())));
+ patcher.masm()->Call(v8::internal::t9);
+ patcher.masm()->nop();
+ patcher.masm()->nop();
+ patcher.masm()->nop();
+
+ // TODO(mips): Open issue about using breakpoint instruction instead of nops.
+ // patcher.masm()->bkpt(0);
}
// Restore the JS frame exit code.
void BreakLocationIterator::ClearDebugBreakAtReturn() {
- UNIMPLEMENTED_MIPS();
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kJSReturnSequenceInstructions);
}
-// A debug break in the exit code is identified by a call.
+// A debug break in the exit code is identified by the JS frame exit code
+// having been patched with li/call psuedo-instrunction (liu/ori/jalr).
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
return rinfo->IsPatchedReturnSequence();
}
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Check whether the debug break slot instructions have been patched.
+ return rinfo()->IsPatchedDebugBreakSlotSequence();
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ // Patch the code changing the debug break slot code from:
+ // nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
+ // nop(DEBUG_BREAK_NOP)
+ // nop(DEBUG_BREAK_NOP)
+ // nop(DEBUG_BREAK_NOP)
+ // to a call to the debug break slot code.
+ // li t9, address (lui t9 / ori t9 instruction pair)
+ // call t9 (jalr t9 / nop instruction pair)
+ CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
+ patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
+ Isolate::Current()->debug()->debug_break_slot()->entry())));
+ patcher.masm()->Call(v8::internal::t9);
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+ ASSERT(IsDebugBreakSlot());
+ rinfo()->PatchCode(original_rinfo()->pc(),
+ Assembler::kDebugBreakSlotInstructions);
+}
+
+
#define __ ACCESS_MASM(masm)
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+ RegList object_regs,
+ RegList non_object_regs) {
+ __ EnterInternalFrame();
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ if ((object_regs | non_object_regs) != 0) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ And(at, reg, 0xc0000000);
+ __ Assert(eq, "Unable to encode value as smi", at, Operand(zero_reg));
+ }
+ __ sll(reg, reg, kSmiTagSize);
+ }
+ }
+ __ MultiPush(object_regs | non_object_regs);
+ }
+
+#ifdef DEBUG
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+ __ mov(a0, zero_reg); // No arguments.
+ __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub ceb(1);
+ __ CallStub(&ceb);
+
+ // Restore the register values from the expression stack.
+ if ((object_regs | non_object_regs) != 0) {
+ __ MultiPop(object_regs | non_object_regs);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ srl(reg, reg, kSmiTagSize);
+ }
+ if (FLAG_debug_code &&
+ (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+ __ li(reg, kDebugZapValue);
+ }
+ }
+ }
+
+ __ LeaveInternalFrame();
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ __ li(t9, Operand(
+ ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate())));
+ __ lw(t9, MemOperand(t9));
+ __ Jump(t9);
+}
+
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Calling convention for IC load (from ic-mips.cc).
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- [sp] : receiver
+ // -----------------------------------
+ // Registers a0 and a2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, a0.bit() | a2.bit(), 0);
}
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Calling convention for IC store (from ic-mips.cc).
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ // Registers a0, a1, and a2 contain objects that need to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
}
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit(), 0);
}
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Calling convention for IC call (from ic-mips.cc).
+ // ----------- S t a t e -------------
+ // -- a2: name
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, a2.bit(), 0);
}
void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // Calling convention for construct call (from builtins-mips.cc).
+ // -- a0 : number of arguments (not smi)
+ // -- a1 : constructor function
+ Generate_DebugBreakCallHelper(masm, a1.bit(), a0.bit());
}
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // In places other than IC call sites it is expected that v0 is TOS which
+ // is an object - this is not generally the case so this should be used with
+ // care.
+ Generate_DebugBreakCallHelper(masm, v0.bit(), 0);
}
void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // No registers used on entry.
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, 0, 0);
+}
+
+
+void Debug::GenerateSlot(MacroAssembler* masm) {
+ // Generate enough nop's to make space for a call instruction. Avoid emitting
+ // the trampoline pool in the debug break slot code.
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+ Label check_codesize;
+ __ bind(&check_codesize);
+ __ RecordDebugBreakSlot();
+ for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+ __ nop(MacroAssembler::DEBUG_BREAK_NOP);
+ }
+ ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
+ masm->InstructionsGeneratedSince(&check_codesize));
+}
+
+
+void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+ // In the places where a debug break slot is inserted no registers can contain
+ // object pointers.
+ Generate_DebugBreakCallHelper(masm, 0, 0);
}
@@ -110,6 +290,7 @@ void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on mips");
}
+
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on mips");
}
diff --git a/deps/v8/src/mips/fast-codegen-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 186f9fadb..9a19aba75 100644
--- a/deps/v8/src/mips/fast-codegen-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -27,51 +27,70 @@
#include "v8.h"
-#if defined(V8_TARGET_ARCH_MIPS)
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "safepoint-table.h"
-#include "codegen-inl.h"
-#include "fast-codegen.h"
+// Note: this file was taken from the X64 version. ARM has a partially working
+// lithium implementation, but for now it is not ported to mips.
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
-Register FastCodeGenerator::accumulator0() { return no_reg; }
-Register FastCodeGenerator::accumulator1() { return no_reg; }
-Register FastCodeGenerator::scratch0() { return no_reg; }
-Register FastCodeGenerator::scratch1() { return no_reg; }
-Register FastCodeGenerator::receiver_reg() { return no_reg; }
-Register FastCodeGenerator::context_reg() { return no_reg; }
+int Deoptimizer::table_entry_size_ = 10;
-void FastCodeGenerator::Generate(CompilationInfo* info) {
- UNIMPLEMENTED_MIPS();
+int Deoptimizer::patch_size() {
+ const int kCallInstructionSizeInWords = 3;
+ return kCallInstructionSizeInWords * Assembler::kInstrSize;
}
-void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
- UNIMPLEMENTED_MIPS();
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ UNIMPLEMENTED();
}
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> name) {
- UNIMPLEMENTED_MIPS();
+void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+ Code* check_code,
+ Code* replacement_code) {
+ UNIMPLEMENTED();
}
-void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
- UNIMPLEMENTED_MIPS();
+void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+ Code* check_code,
+ Code* replacement_code) {
+ UNIMPLEMENTED();
}
-void FastCodeGenerator::EmitBitOr() {
- UNIMPLEMENTED_MIPS();
+void Deoptimizer::DoComputeOsrOutputFrame() {
+ UNIMPLEMENTED();
}
-#undef __
+void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
+ int frame_index) {
+ UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::EntryGenerator::Generate() {
+ UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+ UNIMPLEMENTED();
+}
-} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_MIPS
+} } // namespace v8::internal
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 959a4a220..7df5c4175 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -33,11 +33,10 @@
//
// NameConverter converter;
// Disassembler d(converter);
-// for (byte_* pc = begin; pc < end;) {
-// char buffer[128];
-// buffer[0] = '\0';
-// byte_* prev_pc = pc;
-// pc += d.InstructionDecode(buffer, sizeof buffer, pc);
+// for (byte* pc = begin; pc < end;) {
+// v8::internal::EmbeddedVector<char, 256> buffer;
+// byte* prev_pc = pc;
+// pc += d.InstructionDecode(buffer, pc);
// printf("%p %08x %s\n",
// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
// }
@@ -59,17 +58,13 @@
#if defined(V8_TARGET_ARCH_MIPS)
-#include "constants-mips.h"
+#include "mips/constants-mips.h"
#include "disasm.h"
#include "macro-assembler.h"
#include "platform.h"
-namespace assembler {
-namespace mips {
-
-
-namespace v8i = v8::internal;
-
+namespace v8 {
+namespace internal {
//------------------------------------------------------------------------------
@@ -90,7 +85,7 @@ class Decoder {
// Writes one disassembled instruction into 'buffer' (0-terminated).
// Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(byte_* instruction);
+ int InstructionDecode(byte* instruction);
private:
// Bottleneck functions to print into the out_buffer.
@@ -99,7 +94,7 @@ class Decoder {
// Printing of common values.
void PrintRegister(int reg);
- void PrintCRegister(int creg);
+ void PrintFPURegister(int freg);
void PrintRs(Instruction* instr);
void PrintRt(Instruction* instr);
void PrintRd(Instruction* instr);
@@ -107,6 +102,11 @@ class Decoder {
void PrintFt(Instruction* instr);
void PrintFd(Instruction* instr);
void PrintSa(Instruction* instr);
+ void PrintSd(Instruction* instr);
+ void PrintSs1(Instruction* instr);
+ void PrintSs2(Instruction* instr);
+ void PrintBc(Instruction* instr);
+ void PrintCc(Instruction* instr);
void PrintFunction(Instruction* instr);
void PrintSecondaryField(Instruction* instr);
void PrintUImm16(Instruction* instr);
@@ -119,7 +119,7 @@ class Decoder {
// Handle formatting of instructions and their options.
int FormatRegister(Instruction* instr, const char* option);
- int FormatCRegister(Instruction* instr, const char* option);
+ int FormatFPURegister(Instruction* instr, const char* option);
int FormatOption(Instruction* instr, const char* option);
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
@@ -166,84 +166,116 @@ void Decoder::PrintRegister(int reg) {
void Decoder::PrintRs(Instruction* instr) {
- int reg = instr->RsField();
+ int reg = instr->RsValue();
PrintRegister(reg);
}
void Decoder::PrintRt(Instruction* instr) {
- int reg = instr->RtField();
+ int reg = instr->RtValue();
PrintRegister(reg);
}
void Decoder::PrintRd(Instruction* instr) {
- int reg = instr->RdField();
+ int reg = instr->RdValue();
PrintRegister(reg);
}
-// Print the Cregister name according to the active name converter.
-void Decoder::PrintCRegister(int creg) {
- Print(converter_.NameOfXMMRegister(creg));
+// Print the FPUregister name according to the active name converter.
+void Decoder::PrintFPURegister(int freg) {
+ Print(converter_.NameOfXMMRegister(freg));
}
void Decoder::PrintFs(Instruction* instr) {
- int creg = instr->RsField();
- PrintCRegister(creg);
+ int freg = instr->RsValue();
+ PrintFPURegister(freg);
}
void Decoder::PrintFt(Instruction* instr) {
- int creg = instr->RtField();
- PrintCRegister(creg);
+ int freg = instr->RtValue();
+ PrintFPURegister(freg);
}
void Decoder::PrintFd(Instruction* instr) {
- int creg = instr->RdField();
- PrintCRegister(creg);
+ int freg = instr->RdValue();
+ PrintFPURegister(freg);
}
// Print the integer value of the sa field.
void Decoder::PrintSa(Instruction* instr) {
- int sa = instr->SaField();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", sa);
+ int sa = instr->SaValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+}
+
+
+// Print the integer value of the rd field, when it is not used as reg.
+void Decoder::PrintSd(Instruction* instr) {
+ int sd = instr->RdValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
+}
+
+
+// Print the integer value of the rd field, when used as 'ext' size.
+void Decoder::PrintSs1(Instruction* instr) {
+ int ss = instr->RdValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1);
+}
+
+
+// Print the integer value of the rd field, when used as 'ins' size.
+void Decoder::PrintSs2(Instruction* instr) {
+ int ss = instr->RdValue();
+ int pos = instr->SaValue();
+ out_buffer_pos_ +=
+ OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1);
+}
+
+
+// Print the integer value of the cc field for the bc1t/f instructions.
+void Decoder::PrintBc(Instruction* instr) {
+ int cc = instr->FBccValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc);
+}
+
+
+// Print the integer value of the cc field for the FP compare instructions.
+void Decoder::PrintCc(Instruction* instr) {
+ int cc = instr->FCccValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc);
}
// Print 16-bit unsigned immediate value.
void Decoder::PrintUImm16(Instruction* instr) {
- int32_t imm = instr->Imm16Field();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%u", imm);
+ int32_t imm = instr->Imm16Value();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm);
}
// Print 16-bit signed immediate value.
void Decoder::PrintSImm16(Instruction* instr) {
- int32_t imm = ((instr->Imm16Field())<<16)>>16;
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", imm);
+ int32_t imm = ((instr->Imm16Value()) << 16) >> 16;
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
// Print 16-bit hexa immediate value.
void Decoder::PrintXImm16(Instruction* instr) {
- int32_t imm = instr->Imm16Field();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "0x%x", imm);
+ int32_t imm = instr->Imm16Value();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
// Print 26-bit immediate value.
void Decoder::PrintImm26(Instruction* instr) {
- int32_t imm = instr->Imm26Field();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", imm);
+ int32_t imm = instr->Imm26Value();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
@@ -254,8 +286,8 @@ void Decoder::PrintCode(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case BREAK: {
int32_t code = instr->Bits(25, 6);
- out_buffer_pos_ +=
- v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%05x", code);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "0x%05x (%d)", code, code);
break;
}
case TGE:
@@ -266,7 +298,7 @@ void Decoder::PrintCode(Instruction* instr) {
case TNE: {
int32_t code = instr->Bits(15, 6);
out_buffer_pos_ +=
- v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
+ OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
break;
}
default: // Not a break or trap instruction.
@@ -284,16 +316,16 @@ void Decoder::PrintInstructionName(Instruction* instr) {
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'r');
- if (format[1] == 's') { // 'rs: Rs register
- int reg = instr->RsField();
+ if (format[1] == 's') { // 'rs: Rs register.
+ int reg = instr->RsValue();
PrintRegister(reg);
return 2;
- } else if (format[1] == 't') { // 'rt: rt register
- int reg = instr->RtField();
+ } else if (format[1] == 't') { // 'rt: rt register.
+ int reg = instr->RtValue();
PrintRegister(reg);
return 2;
- } else if (format[1] == 'd') { // 'rd: rd register
- int reg = instr->RdField();
+ } else if (format[1] == 'd') { // 'rd: rd register.
+ int reg = instr->RdValue();
PrintRegister(reg);
return 2;
}
@@ -302,21 +334,21 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
}
-// Handle all Cregister based formatting in this function to reduce the
+// Handle all FPUregister based formatting in this function to reduce the
// complexity of FormatOption.
-int Decoder::FormatCRegister(Instruction* instr, const char* format) {
+int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'f');
- if (format[1] == 's') { // 'fs: fs register
- int reg = instr->RsField();
- PrintCRegister(reg);
+ if (format[1] == 's') { // 'fs: fs register.
+ int reg = instr->FsValue();
+ PrintFPURegister(reg);
return 2;
- } else if (format[1] == 't') { // 'ft: ft register
- int reg = instr->RtField();
- PrintCRegister(reg);
+ } else if (format[1] == 't') { // 'ft: ft register.
+ int reg = instr->FtValue();
+ PrintFPURegister(reg);
return 2;
- } else if (format[1] == 'd') { // 'fd: fd register
- int reg = instr->RdField();
- PrintCRegister(reg);
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->FdValue();
+ PrintFPURegister(reg);
return 2;
}
UNREACHABLE();
@@ -331,12 +363,12 @@ int Decoder::FormatCRegister(Instruction* instr, const char* format) {
// characters that were consumed from the formatting string.
int Decoder::FormatOption(Instruction* instr, const char* format) {
switch (format[0]) {
- case 'c': { // 'code for break or trap instructions
+ case 'c': { // 'code for break or trap instructions.
ASSERT(STRING_STARTS_WITH(format, "code"));
PrintCode(instr);
return 4;
}
- case 'i': { // 'imm16u or 'imm26
+ case 'i': { // 'imm16u or 'imm26.
if (format[3] == '1') {
ASSERT(STRING_STARTS_WITH(format, "imm16"));
if (format[5] == 's') {
@@ -356,15 +388,45 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return 5;
}
}
- case 'r': { // 'r: registers
+ case 'r': { // 'r: registers.
return FormatRegister(instr, format);
}
- case 'f': { // 'f: Cregisters
- return FormatCRegister(instr, format);
+ case 'f': { // 'f: FPUregisters.
+ return FormatFPURegister(instr, format);
}
- case 's': { // 'sa
- ASSERT(STRING_STARTS_WITH(format, "sa"));
- PrintSa(instr);
+ case 's': { // 'sa.
+ switch (format[1]) {
+ case 'a': {
+ ASSERT(STRING_STARTS_WITH(format, "sa"));
+ PrintSa(instr);
+ return 2;
+ }
+ case 'd': {
+ ASSERT(STRING_STARTS_WITH(format, "sd"));
+ PrintSd(instr);
+ return 2;
+ }
+ case 's': {
+ if (format[2] == '1') {
+ ASSERT(STRING_STARTS_WITH(format, "ss1")); /* ext size */
+ PrintSs1(instr);
+ return 3;
+ } else {
+ ASSERT(STRING_STARTS_WITH(format, "ss2")); /* ins size */
+ PrintSs2(instr);
+ return 3;
+ }
+ }
+ }
+ }
+ case 'b': { // 'bc - Special for bc1 cc field.
+ ASSERT(STRING_STARTS_WITH(format, "bc"));
+ PrintBc(instr);
+ return 2;
+ }
+ case 'C': { // 'Cc - Special for c.xx.d cc field.
+ ASSERT(STRING_STARTS_WITH(format, "Cc"));
+ PrintCc(instr);
return 2;
}
};
@@ -399,256 +461,460 @@ void Decoder::Unknown(Instruction* instr) {
void Decoder::DecodeTypeRegister(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
- case COP1: // Coprocessor instructions
+ case COP1: // Coprocessor instructions.
switch (instr->RsFieldRaw()) {
- case BC1: // branch on coprocessor condition
+ case BC1: // bc1 handled in DecodeTypeImmediate.
UNREACHABLE();
break;
case MFC1:
- Format(instr, "mfc1 'rt, 'fs");
+ Format(instr, "mfc1 'rt, 'fs");
break;
case MFHC1:
- Format(instr, "mfhc1 rt, 'fs");
+ Format(instr, "mfhc1 'rt, 'fs");
break;
case MTC1:
- Format(instr, "mtc1 'rt, 'fs");
+ Format(instr, "mtc1 'rt, 'fs");
+ break;
+ // These are called "fs" too, although they are not FPU registers.
+ case CTC1:
+ Format(instr, "ctc1 'rt, 'fs");
+ break;
+ case CFC1:
+ Format(instr, "cfc1 'rt, 'fs");
break;
case MTHC1:
- Format(instr, "mthc1 rt, 'fs");
+ Format(instr, "mthc1 'rt, 'fs");
break;
- case S:
case D:
+ switch (instr->FunctionFieldRaw()) {
+ case ADD_D:
+ Format(instr, "add.d 'fd, 'fs, 'ft");
+ break;
+ case SUB_D:
+ Format(instr, "sub.d 'fd, 'fs, 'ft");
+ break;
+ case MUL_D:
+ Format(instr, "mul.d 'fd, 'fs, 'ft");
+ break;
+ case DIV_D:
+ Format(instr, "div.d 'fd, 'fs, 'ft");
+ break;
+ case ABS_D:
+ Format(instr, "abs.d 'fd, 'fs");
+ break;
+ case MOV_D:
+ Format(instr, "mov.d 'fd, 'fs");
+ break;
+ case NEG_D:
+ Format(instr, "neg.d 'fd, 'fs");
+ break;
+ case SQRT_D:
+ Format(instr, "sqrt.d 'fd, 'fs");
+ break;
+ case CVT_W_D:
+ Format(instr, "cvt.w.d 'fd, 'fs");
+ break;
+ case CVT_L_D: {
+ if (mips32r2) {
+ Format(instr, "cvt.l.d 'fd, 'fs");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case TRUNC_W_D:
+ Format(instr, "trunc.w.d 'fd, 'fs");
+ break;
+ case TRUNC_L_D: {
+ if (mips32r2) {
+ Format(instr, "trunc.l.d 'fd, 'fs");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case ROUND_W_D:
+ Format(instr, "round.w.d 'fd, 'fs");
+ break;
+ case FLOOR_W_D:
+ Format(instr, "floor.w.d 'fd, 'fs");
+ break;
+ case CEIL_W_D:
+ Format(instr, "ceil.w.d 'fd, 'fs");
+ break;
+ case CVT_S_D:
+ Format(instr, "cvt.s.d 'fd, 'fs");
+ break;
+ case C_F_D:
+ Format(instr, "c.f.d 'fs, 'ft, 'Cc");
+ break;
+ case C_UN_D:
+ Format(instr, "c.un.d 'fs, 'ft, 'Cc");
+ break;
+ case C_EQ_D:
+ Format(instr, "c.eq.d 'fs, 'ft, 'Cc");
+ break;
+ case C_UEQ_D:
+ Format(instr, "c.ueq.d 'fs, 'ft, 'Cc");
+ break;
+ case C_OLT_D:
+ Format(instr, "c.olt.d 'fs, 'ft, 'Cc");
+ break;
+ case C_ULT_D:
+ Format(instr, "c.ult.d 'fs, 'ft, 'Cc");
+ break;
+ case C_OLE_D:
+ Format(instr, "c.ole.d 'fs, 'ft, 'Cc");
+ break;
+ case C_ULE_D:
+ Format(instr, "c.ule.d 'fs, 'ft, 'Cc");
+ break;
+ default:
+ Format(instr, "unknown.cop1.d");
+ break;
+ }
+ break;
+ case S:
UNIMPLEMENTED_MIPS();
break;
case W:
switch (instr->FunctionFieldRaw()) {
- case CVT_S_W:
- UNIMPLEMENTED_MIPS();
+ case CVT_S_W: // Convert word to float (single).
+ Format(instr, "cvt.s.w 'fd, 'fs");
break;
case CVT_D_W: // Convert word to double.
- Format(instr, "cvt.d.w 'fd, 'fs");
+ Format(instr, "cvt.d.w 'fd, 'fs");
break;
default:
UNREACHABLE();
- };
+ }
break;
case L:
+ switch (instr->FunctionFieldRaw()) {
+ case CVT_D_L: {
+ if (mips32r2) {
+ Format(instr, "cvt.d.l 'fd, 'fs");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case CVT_S_L: {
+ if (mips32r2) {
+ Format(instr, "cvt.s.l 'fd, 'fs");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
case PS:
UNIMPLEMENTED_MIPS();
break;
- break;
default:
UNREACHABLE();
- };
+ }
break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
case JR:
- Format(instr, "jr 'rs");
+ Format(instr, "jr 'rs");
break;
case JALR:
- Format(instr, "jalr 'rs");
+ Format(instr, "jalr 'rs");
break;
case SLL:
if ( 0x0 == static_cast<int>(instr->InstructionBits()))
Format(instr, "nop");
else
- Format(instr, "sll 'rd, 'rt, 'sa");
+ Format(instr, "sll 'rd, 'rt, 'sa");
break;
case SRL:
- Format(instr, "srl 'rd, 'rt, 'sa");
+ if (instr->RsValue() == 0) {
+ Format(instr, "srl 'rd, 'rt, 'sa");
+ } else {
+ if (mips32r2) {
+ Format(instr, "rotr 'rd, 'rt, 'sa");
+ } else {
+ Unknown(instr);
+ }
+ }
break;
case SRA:
- Format(instr, "sra 'rd, 'rt, 'sa");
+ Format(instr, "sra 'rd, 'rt, 'sa");
break;
case SLLV:
- Format(instr, "sllv 'rd, 'rt, 'rs");
+ Format(instr, "sllv 'rd, 'rt, 'rs");
break;
case SRLV:
- Format(instr, "srlv 'rd, 'rt, 'rs");
+ if (instr->SaValue() == 0) {
+ Format(instr, "srlv 'rd, 'rt, 'rs");
+ } else {
+ if (mips32r2) {
+ Format(instr, "rotrv 'rd, 'rt, 'rs");
+ } else {
+ Unknown(instr);
+ }
+ }
break;
case SRAV:
- Format(instr, "srav 'rd, 'rt, 'rs");
+ Format(instr, "srav 'rd, 'rt, 'rs");
break;
case MFHI:
- Format(instr, "mfhi 'rd");
+ Format(instr, "mfhi 'rd");
break;
case MFLO:
- Format(instr, "mflo 'rd");
+ Format(instr, "mflo 'rd");
break;
case MULT:
- Format(instr, "mult 'rs, 'rt");
+ Format(instr, "mult 'rs, 'rt");
break;
case MULTU:
- Format(instr, "multu 'rs, 'rt");
+ Format(instr, "multu 'rs, 'rt");
break;
case DIV:
- Format(instr, "div 'rs, 'rt");
+ Format(instr, "div 'rs, 'rt");
break;
case DIVU:
- Format(instr, "divu 'rs, 'rt");
+ Format(instr, "divu 'rs, 'rt");
break;
case ADD:
- Format(instr, "add 'rd, 'rs, 'rt");
+ Format(instr, "add 'rd, 'rs, 'rt");
break;
case ADDU:
- Format(instr, "addu 'rd, 'rs, 'rt");
+ Format(instr, "addu 'rd, 'rs, 'rt");
break;
case SUB:
- Format(instr, "sub 'rd, 'rs, 'rt");
+ Format(instr, "sub 'rd, 'rs, 'rt");
break;
case SUBU:
- Format(instr, "sub 'rd, 'rs, 'rt");
+ Format(instr, "subu 'rd, 'rs, 'rt");
break;
case AND:
- Format(instr, "and 'rd, 'rs, 'rt");
+ Format(instr, "and 'rd, 'rs, 'rt");
break;
case OR:
- if (0 == instr->RsField()) {
- Format(instr, "mov 'rd, 'rt");
- } else if (0 == instr->RtField()) {
- Format(instr, "mov 'rd, 'rs");
+ if (0 == instr->RsValue()) {
+ Format(instr, "mov 'rd, 'rt");
+ } else if (0 == instr->RtValue()) {
+ Format(instr, "mov 'rd, 'rs");
} else {
- Format(instr, "or 'rd, 'rs, 'rt");
+ Format(instr, "or 'rd, 'rs, 'rt");
}
break;
case XOR:
- Format(instr, "xor 'rd, 'rs, 'rt");
+ Format(instr, "xor 'rd, 'rs, 'rt");
break;
case NOR:
- Format(instr, "nor 'rd, 'rs, 'rt");
+ Format(instr, "nor 'rd, 'rs, 'rt");
break;
case SLT:
- Format(instr, "slt 'rd, 'rs, 'rt");
+ Format(instr, "slt 'rd, 'rs, 'rt");
break;
case SLTU:
- Format(instr, "sltu 'rd, 'rs, 'rt");
+ Format(instr, "sltu 'rd, 'rs, 'rt");
break;
case BREAK:
Format(instr, "break, code: 'code");
break;
case TGE:
- Format(instr, "tge 'rs, 'rt, code: 'code");
+ Format(instr, "tge 'rs, 'rt, code: 'code");
break;
case TGEU:
- Format(instr, "tgeu 'rs, 'rt, code: 'code");
+ Format(instr, "tgeu 'rs, 'rt, code: 'code");
break;
case TLT:
- Format(instr, "tlt 'rs, 'rt, code: 'code");
+ Format(instr, "tlt 'rs, 'rt, code: 'code");
break;
case TLTU:
- Format(instr, "tltu 'rs, 'rt, code: 'code");
+ Format(instr, "tltu 'rs, 'rt, code: 'code");
break;
case TEQ:
- Format(instr, "teq 'rs, 'rt, code: 'code");
+ Format(instr, "teq 'rs, 'rt, code: 'code");
break;
case TNE:
- Format(instr, "tne 'rs, 'rt, code: 'code");
+ Format(instr, "tne 'rs, 'rt, code: 'code");
+ break;
+ case MOVZ:
+ Format(instr, "movz 'rd, 'rs, 'rt");
+ break;
+ case MOVN:
+ Format(instr, "movn 'rd, 'rs, 'rt");
+ break;
+ case MOVCI:
+ if (instr->Bit(16)) {
+ Format(instr, "movt 'rd, 'rs, 'bc");
+ } else {
+ Format(instr, "movf 'rd, 'rs, 'bc");
+ }
break;
default:
UNREACHABLE();
- };
+ }
break;
case SPECIAL2:
switch (instr->FunctionFieldRaw()) {
case MUL:
+ Format(instr, "mul 'rd, 'rs, 'rt");
+ break;
+ case CLZ:
+ Format(instr, "clz 'rd, 'rs");
break;
default:
UNREACHABLE();
- };
+ }
+ break;
+ case SPECIAL3:
+ switch (instr->FunctionFieldRaw()) {
+ case INS: {
+ if (mips32r2) {
+ Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case EXT: {
+ if (mips32r2) {
+ Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
break;
default:
UNREACHABLE();
- };
+ }
}
void Decoder::DecodeTypeImmediate(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
// ------------- REGIMM class.
+ case COP1:
+ switch (instr->RsFieldRaw()) {
+ case BC1:
+ if (instr->FBtrueValue()) {
+ Format(instr, "bc1t 'bc, 'imm16u");
+ } else {
+ Format(instr, "bc1f 'bc, 'imm16u");
+ }
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break; // Case COP1.
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
- Format(instr, "bltz 'rs, 'imm16u");
+ Format(instr, "bltz 'rs, 'imm16u");
break;
case BLTZAL:
- Format(instr, "bltzal 'rs, 'imm16u");
+ Format(instr, "bltzal 'rs, 'imm16u");
break;
case BGEZ:
- Format(instr, "bgez 'rs, 'imm16u");
+ Format(instr, "bgez 'rs, 'imm16u");
break;
case BGEZAL:
- Format(instr, "bgezal 'rs, 'imm16u");
+ Format(instr, "bgezal 'rs, 'imm16u");
break;
default:
UNREACHABLE();
- };
- break; // case REGIMM
+ }
+ break; // Case REGIMM.
// ------------- Branch instructions.
case BEQ:
- Format(instr, "beq 'rs, 'rt, 'imm16u");
+ Format(instr, "beq 'rs, 'rt, 'imm16u");
break;
case BNE:
- Format(instr, "bne 'rs, 'rt, 'imm16u");
+ Format(instr, "bne 'rs, 'rt, 'imm16u");
break;
case BLEZ:
- Format(instr, "blez 'rs, 'imm16u");
+ Format(instr, "blez 'rs, 'imm16u");
break;
case BGTZ:
- Format(instr, "bgtz 'rs, 'imm16u");
+ Format(instr, "bgtz 'rs, 'imm16u");
break;
// ------------- Arithmetic instructions.
case ADDI:
- Format(instr, "addi 'rt, 'rs, 'imm16s");
+ Format(instr, "addi 'rt, 'rs, 'imm16s");
break;
case ADDIU:
- Format(instr, "addiu 'rt, 'rs, 'imm16s");
+ Format(instr, "addiu 'rt, 'rs, 'imm16s");
break;
case SLTI:
- Format(instr, "slti 'rt, 'rs, 'imm16s");
+ Format(instr, "slti 'rt, 'rs, 'imm16s");
break;
case SLTIU:
- Format(instr, "sltiu 'rt, 'rs, 'imm16u");
+ Format(instr, "sltiu 'rt, 'rs, 'imm16u");
break;
case ANDI:
- Format(instr, "andi 'rt, 'rs, 'imm16x");
+ Format(instr, "andi 'rt, 'rs, 'imm16x");
break;
case ORI:
- Format(instr, "ori 'rt, 'rs, 'imm16x");
+ Format(instr, "ori 'rt, 'rs, 'imm16x");
break;
case XORI:
- Format(instr, "xori 'rt, 'rs, 'imm16x");
+ Format(instr, "xori 'rt, 'rs, 'imm16x");
break;
case LUI:
- Format(instr, "lui 'rt, 'imm16x");
+ Format(instr, "lui 'rt, 'imm16x");
break;
// ------------- Memory instructions.
case LB:
- Format(instr, "lb 'rt, 'imm16s('rs)");
+ Format(instr, "lb 'rt, 'imm16s('rs)");
+ break;
+ case LH:
+ Format(instr, "lh 'rt, 'imm16s('rs)");
+ break;
+ case LWL:
+ Format(instr, "lwl 'rt, 'imm16s('rs)");
break;
case LW:
- Format(instr, "lw 'rt, 'imm16s('rs)");
+ Format(instr, "lw 'rt, 'imm16s('rs)");
break;
case LBU:
- Format(instr, "lbu 'rt, 'imm16s('rs)");
+ Format(instr, "lbu 'rt, 'imm16s('rs)");
+ break;
+ case LHU:
+ Format(instr, "lhu 'rt, 'imm16s('rs)");
+ break;
+ case LWR:
+ Format(instr, "lwr 'rt, 'imm16s('rs)");
break;
case SB:
- Format(instr, "sb 'rt, 'imm16s('rs)");
+ Format(instr, "sb 'rt, 'imm16s('rs)");
+ break;
+ case SH:
+ Format(instr, "sh 'rt, 'imm16s('rs)");
+ break;
+ case SWL:
+ Format(instr, "swl 'rt, 'imm16s('rs)");
break;
case SW:
- Format(instr, "sw 'rt, 'imm16s('rs)");
+ Format(instr, "sw 'rt, 'imm16s('rs)");
+ break;
+ case SWR:
+ Format(instr, "swr 'rt, 'imm16s('rs)");
break;
case LWC1:
- Format(instr, "lwc1 'ft, 'imm16s('rs)");
+ Format(instr, "lwc1 'ft, 'imm16s('rs)");
break;
case LDC1:
- Format(instr, "ldc1 'ft, 'imm16s('rs)");
+ Format(instr, "ldc1 'ft, 'imm16s('rs)");
break;
case SWC1:
- Format(instr, "swc1 'rt, 'imm16s('fs)");
+ Format(instr, "swc1 'ft, 'imm16s('rs)");
break;
case SDC1:
- Format(instr, "sdc1 'rt, 'imm16s('fs)");
+ Format(instr, "sdc1 'ft, 'imm16s('rs)");
break;
default:
UNREACHABLE();
@@ -660,10 +926,10 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
void Decoder::DecodeTypeJump(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
case J:
- Format(instr, "j 'imm26");
+ Format(instr, "j 'imm26");
break;
case JAL:
- Format(instr, "jal 'imm26");
+ Format(instr, "jal 'imm26");
break;
default:
UNREACHABLE();
@@ -672,10 +938,10 @@ void Decoder::DecodeTypeJump(Instruction* instr) {
// Disassemble the instruction at *instr_ptr into the output buffer.
-int Decoder::InstructionDecode(byte_* instr_ptr) {
+int Decoder::InstructionDecode(byte* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%08x ",
instr->InstructionBits());
switch (instr->InstructionType()) {
@@ -695,11 +961,11 @@ int Decoder::InstructionDecode(byte_* instr_ptr) {
UNSUPPORTED_MIPS();
}
}
- return Instruction::kInstructionSize;
+ return Instruction::kInstrSize;
}
-} } // namespace assembler::mips
+} } // namespace v8::internal
@@ -707,38 +973,34 @@ int Decoder::InstructionDecode(byte_* instr_ptr) {
namespace disasm {
-namespace v8i = v8::internal;
-
-
-const char* NameConverter::NameOfAddress(byte_* addr) const {
- static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
- v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
- return tmp_buffer.start();
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
+ return tmp_buffer_.start();
}
-const char* NameConverter::NameOfConstant(byte_* addr) const {
+const char* NameConverter::NameOfConstant(byte* addr) const {
return NameOfAddress(addr);
}
const char* NameConverter::NameOfCPURegister(int reg) const {
- return assembler::mips::Registers::Name(reg);
+ return v8::internal::Registers::Name(reg);
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
- return assembler::mips::FPURegister::Name(reg);
+ return v8::internal::FPURegisters::Name(reg);
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
- UNREACHABLE(); // MIPS does not have the concept of a byte register
+ UNREACHABLE(); // MIPS does not have the concept of a byte register.
return "nobytereg";
}
-const char* NameConverter::NameInCode(byte_* addr) const {
+const char* NameConverter::NameInCode(byte* addr) const {
// The default name converter is called for unknown code. So we will not try
// to access any memory.
return "";
@@ -755,31 +1017,32 @@ Disassembler::~Disassembler() {}
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- byte_* instruction) {
- assembler::mips::Decoder d(converter_, buffer);
+ byte* instruction) {
+ v8::internal::Decoder d(converter_, buffer);
return d.InstructionDecode(instruction);
}
-int Disassembler::ConstantPoolSizeAt(byte_* instruction) {
- UNIMPLEMENTED_MIPS();
+// The MIPS assembler does not currently use constant pools.
+int Disassembler::ConstantPoolSizeAt(byte* instruction) {
return -1;
}
-void Disassembler::Disassemble(FILE* f, byte_* begin, byte_* end) {
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
NameConverter converter;
Disassembler d(converter);
- for (byte_* pc = begin; pc < end;) {
+ for (byte* pc = begin; pc < end;) {
v8::internal::EmbeddedVector<char, 128> buffer;
buffer[0] = '\0';
- byte_* prev_pc = pc;
+ byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
fprintf(f, "%p %08x %s\n",
prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}
+
#undef UNSUPPORTED
} // namespace disasm
diff --git a/deps/v8/src/mips/frames-mips.cc b/deps/v8/src/mips/frames-mips.cc
index d63056299..faaa0e0f4 100644
--- a/deps/v8/src/mips/frames-mips.cc
+++ b/deps/v8/src/mips/frames-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -37,57 +37,8 @@ namespace v8 {
namespace internal {
-StackFrame::Type StackFrame::ComputeType(State* state) {
- ASSERT(state->fp != NULL);
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- return ARGUMENTS_ADAPTOR;
- }
- // The marker and function offsets overlap. If the marker isn't a
- // smi then the frame is a JavaScript frame -- and the marker is
- // really the function.
- const int offset = StandardFrameConstants::kMarkerOffset;
- Object* marker = Memory::Object_at(state->fp + offset);
- if (!marker->IsSmi()) return JAVA_SCRIPT;
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-}
-
-
Address ExitFrame::ComputeStackPointer(Address fp) {
- Address sp = fp + ExitFrameConstants::kSPDisplacement;
- const int offset = ExitFrameConstants::kCodeOffset;
- Object* code = Memory::Object_at(fp + offset);
- bool is_debug_exit = code->IsSmi();
- if (is_debug_exit) {
- sp -= kNumJSCallerSaved * kPointerSize;
- }
- return sp;
-}
-
-
-void ExitFrame::Iterate(ObjectVisitor* v) const {
- // Do nothing
-}
-
-
-int JavaScriptFrame::GetProvidedParametersCount() const {
- return ComputeParametersCount();
-}
-
-
-Address JavaScriptFrame::GetCallerStackPointer() const {
- UNIMPLEMENTED_MIPS();
- return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
-}
-
-
-Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- UNIMPLEMENTED_MIPS();
- return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
-}
-
-
-Address InternalFrame::GetCallerStackPointer() const {
- return fp() + StandardFrameConstants::kCallerSPOffset;
+ return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
}
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index 06e9979c2..2e720fb17 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -40,16 +40,17 @@ namespace internal {
static const int kNumRegs = 32;
static const RegList kJSCallerSaved =
+ 1 << 2 | // v0
1 << 4 | // a0
1 << 5 | // a1
1 << 6 | // a2
1 << 7; // a3
-static const int kNumJSCallerSaved = 4;
+static const int kNumJSCallerSaved = 5;
// Return the code of the n-th caller-saved register available to JavaScript
-// e.g. JSCallerSavedReg(0) returns r0.code() == 0.
+// e.g. JSCallerSavedReg(0) returns a0.code() == 4.
int JSCallerSavedCode(int n);
@@ -58,14 +59,63 @@ static const RegList kCalleeSaved =
// Saved temporaries.
1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 |
1 << 20 | 1 << 21 | 1 << 22 | 1 << 23 |
- // gp, sp, fp
+ // gp, sp, fp.
1 << 28 | 1 << 29 | 1 << 30;
static const int kNumCalleeSaved = 11;
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+// TODO(mips): Only 8 registers may actually be sufficient. Revisit.
+static const int kNumSafepointRegisters = 16;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+static const int kNumSafepointSavedRegisters =
+ kNumJSCallerSaved + kNumCalleeSaved;
+
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+static const int kUndefIndex = -1;
+// Map with indexes on stack that corresponds to codes of saved registers.
+static const int kSafepointRegisterStackIndexMap[kNumRegs] = {
+ kUndefIndex,
+ kUndefIndex,
+ 0, // v0
+ kUndefIndex,
+ 1, // a0
+ 2, // a1
+ 3, // a2
+ 4, // a3
+ kUndefIndex,
+ kUndefIndex,
+ kUndefIndex,
+ kUndefIndex,
+ kUndefIndex,
+ kUndefIndex,
+ kUndefIndex,
+ kUndefIndex,
+ 5, // Saved temporaries.
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ kUndefIndex,
+ kUndefIndex,
+ kUndefIndex,
+ kUndefIndex,
+ 13, // gp
+ 14, // sp
+ 15, // fp
+ kUndefIndex
+};
+
// ----------------------------------------------------
@@ -88,23 +138,24 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
- // Exit frames have a debug marker on the stack.
- static const int kSPDisplacement = -1 * kPointerSize;
+ // See some explanation in MacroAssembler::EnterExitFrame.
+ // This marks the top of the extra allocated stack space.
+ static const int kStackSpaceOffset = -3 * kPointerSize;
- // The debug marker is just above the frame pointer.
- static const int kDebugMarkOffset = -1 * kPointerSize;
- // Must be the same as kDebugMarkOffset. Alias introduced when upgrading.
- static const int kCodeOffset = -1 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
- static const int kSavedRegistersOffset = 0 * kPointerSize;
+ static const int kSPOffset = -1 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = +0 * kPointerSize;
// The calling JS function is between FP and PC.
static const int kCallerPCOffset = +1 * kPointerSize;
+ // MIPS-specific: a pointer to the old sp to avoid unnecessary calculations.
+ static const int kCallerSPOffset = +2 * kPointerSize;
+
// FP-relative displacement of the caller's SP.
- static const int kCallerSPDisplacement = +3 * kPointerSize;
+ static const int kCallerSPDisplacement = +2 * kPointerSize;
};
@@ -123,9 +174,12 @@ class StandardFrameConstants : public AllStatic {
static const int kRegularArgsSlotsSize = kRArgsSlotsSize;
// C/C++ argument slots size.
- static const int kCArgsSlotsSize = 4 * kPointerSize;
+ static const int kCArgSlotCount = 4;
+ static const int kCArgsSlotsSize = kCArgSlotCount * kPointerSize;
// JS argument slots size.
static const int kJSArgsSlotsSize = 0 * kPointerSize;
+ // Assembly builtins argument slots size.
+ static const int kBArgsSlotsSize = 0 * kPointerSize;
};
@@ -133,7 +187,7 @@ class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kSavedRegistersOffset = +2 * kPointerSize;
+ static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
@@ -159,6 +213,7 @@ inline Object* JavaScriptFrame::function_slot_object() const {
return Memory::Object_at(fp() + offset);
}
+
} } // namespace v8::internal
#endif
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 17ee531a3..5b9bbb578 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,73 +29,647 @@
#if defined(V8_TARGET_ARCH_MIPS)
-#include "codegen-inl.h"
+// Note on Mips implementation:
+//
+// The result_register() for mips is the 'v0' register, which is defined
+// by the ABI to contain function return values. However, the first
+// parameter to a function is defined to be 'a0'. So there are many
+// places where we have to move a previous result in v0 to a0 for the
+// next call: mov(a0, v0). This is not needed on the other architectures.
+
+#include "code-stubs.h"
+#include "codegen.h"
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
#include "parser.h"
+#include "scopes.h"
+#include "stub-cache.h"
+
+#include "mips/code-stubs-mips.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
-void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
- UNIMPLEMENTED_MIPS();
+
+static unsigned GetPropertyId(Property* property) {
+ if (property->is_synthetic()) return AstNode::kNoNumber;
+ return property->id();
+}
+
+
+// A patch site is a location in the code which it is possible to patch. This
+// class has a number of methods to emit the code which is patchable and the
+// method EmitPatchInfo to record a marker back to the patchable code. This
+// marker is a andi at, rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16
+// bit immediate value is used) is the delta from the pc to the first
+// instruction of the patchable code.
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() {
+ ASSERT(patch_site_.is_bound() == info_emitted_);
+ }
+
+ // When initially emitting this ensure that a jump is always generated to skip
+ // the inlined smi code.
+ void EmitJumpIfNotSmi(Register reg, Label* target) {
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ __ bind(&patch_site_);
+ __ andi(at, reg, 0);
+ // Always taken before patched.
+ __ Branch(target, eq, at, Operand(zero_reg));
+ }
+
+ // When initially emitting this ensure that a jump is never generated to skip
+ // the inlined smi code.
+ void EmitJumpIfSmi(Register reg, Label* target) {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ ASSERT(!patch_site_.is_bound() && !info_emitted_);
+ __ bind(&patch_site_);
+ __ andi(at, reg, 0);
+ // Never taken before patched.
+ __ Branch(target, ne, at, Operand(zero_reg));
+ }
+
+ void EmitPatchInfo() {
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+ Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
+ __ andi(at, reg, delta_to_patch_site % kImm16Mask);
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
+ }
+
+ private:
+ MacroAssembler* masm_;
+ Label patch_site_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right. The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+// o a1: the JS function object being called (ie, ourselves)
+// o cp: our context
+// o fp: our caller's frame pointer
+// o sp: stack pointer
+// o ra: return address
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-mips.h for its layout.
+void FullCodeGenerator::Generate(CompilationInfo* info) {
+ ASSERT(info_ == NULL);
+ info_ = info;
+ scope_ = info->scope();
+ SetFunctionPosition(function());
+ Comment cmnt(masm_, "[ function compiled by full code generator");
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ __ stop("stop-at");
+ }
+#endif
+
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). t1 is zero for method calls and non-zero for
+ // function calls.
+ if (info->is_strict_mode() || info->is_native()) {
+ Label ok;
+ __ Branch(&ok, eq, t1, Operand(zero_reg));
+ int receiver_offset = info->scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ sw(a2, MemOperand(sp, receiver_offset));
+ __ bind(&ok);
+ }
+
+ int locals_count = info->scope()->num_stack_slots();
+
+ __ Push(ra, fp, cp, a1);
+ if (locals_count > 0) {
+ // Load undefined value here, so the value is ready for the loop
+ // below.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ }
+ // Adjust fp to point to caller's fp.
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
+
+ { Comment cmnt(masm_, "[ Allocate locals");
+ for (int i = 0; i < locals_count; i++) {
+ __ push(at);
+ }
+ }
+
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is in a1.
+ __ push(a1);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ }
+ function_in_register = false;
+ // Context is returned in both v0 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = info->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = scope()->parameter(i)->AsSlot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ lw(a0, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ __ li(a1, Operand(Context::SlotOffset(slot->index())));
+ __ addu(a2, cp, a1);
+ __ sw(a0, MemOperand(a2, 0));
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have to use two more registers to avoid
+ // clobbering cp.
+ __ mov(a2, cp);
+ __ RecordWrite(a2, a1, a3);
+ }
+ }
+ }
+
+ Variable* arguments = scope()->arguments();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register) {
+ // Load this again, if it's used by the local context below.
+ __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ mov(a3, a1);
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ Addu(a2, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ li(a1, Operand(Smi::FromInt(num_parameters)));
+ __ Push(a3, a2, a1);
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiever and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub::Type type;
+ if (is_strict_mode()) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ }
+ ArgumentsAccessStub stub(type);
+ __ CallStub(&stub);
+
+ Move(arguments->AsSlot(), v0, a1, a2);
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
+
+ } else {
+ { Comment cmnt(masm_, "[ Declarations");
+ // For named function expressions, declare the function name as a
+ // constant.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+ Label ok;
+ __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(t0));
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ bind(&ok);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
+ }
+
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
+ { Comment cmnt(masm_, "[ return <undefined>;");
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ }
+ EmitReturnSequence();
+}
+
+
+void FullCodeGenerator::ClearAccumulator() {
+ ASSERT(Smi::FromInt(0) == 0);
+ __ mov(v0, zero_reg);
+}
+
+
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+ Comment cmnt(masm_, "[ Stack check");
+ Label ok;
+ __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(t0));
+ StackCheckStub stub;
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordStackCheck(stmt->OsrEntryId());
+
+ __ CallStub(&stub);
+ __ bind(&ok);
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ // Record a mapping of the OSR id to this PC. This is used if the OSR
+ // entry becomes the target of a bailout. We don't expect it to be, but
+ // we want it to work if it is.
+ PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
}
void FullCodeGenerator::EmitReturnSequence() {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ Branch(&return_label_);
+ } else {
+ __ bind(&return_label_);
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in v0.
+ __ push(v0);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+
+#ifdef DEBUG
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+#endif
+ // Make sure that the constant pool is not emitted inside of the return
+ // sequence.
+ { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ // Here we use masm_-> instead of the __ macro to avoid the code coverage
+ // tool from instrumenting as we rely on the code size here.
+ int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
+ CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+ __ RecordJSReturn();
+ masm_->mov(sp, fp);
+ masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
+ masm_->Addu(sp, sp, Operand(sp_delta));
+ masm_->Jump(ra);
+ }
+
+#ifdef DEBUG
+ // Check that the size of the code used for returning is large enough
+ // for the debugger's requirements.
+ ASSERT(Assembler::kJSReturnSequenceInstructions <=
+ masm_->InstructionsGeneratedSince(&check_exit_codesize));
+#endif
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
+ codegen()->Move(result_register(), slot);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
+ codegen()->Move(result_register(), slot);
+ __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
+ // For simplicity we always test the accumulator register.
+ codegen()->Move(result_register(), slot);
+ codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+ __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ true,
+ true_label_,
+ false_label_);
+ if (index == Heap::kUndefinedValueRootIndex ||
+ index == Heap::kNullValueRootIndex ||
+ index == Heap::kFalseValueRootIndex) {
+ if (false_label_ != fall_through_) __ Branch(false_label_);
+ } else if (index == Heap::kTrueValueRootIndex) {
+ if (true_label_ != fall_through_) __ Branch(true_label_);
+ } else {
+ __ LoadRoot(result_register(), index);
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ __ li(result_register(), Operand(lit));
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ // Immediates cannot be pushed directly.
+ __ li(result_register(), Operand(lit));
+ __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ true,
+ true_label_,
+ false_label_);
+ ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ if (false_label_ != fall_through_) __ Branch(false_label_);
+ } else if (lit->IsTrue() || lit->IsJSObject()) {
+ if (true_label_ != fall_through_) __ Branch(true_label_);
+ } else if (lit->IsString()) {
+ if (String::cast(*lit)->length() == 0) {
+ if (false_label_ != fall_through_) __ Branch(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ Branch(true_label_);
+ }
+ } else if (lit->IsSmi()) {
+ if (Smi::cast(*lit)->value() == 0) {
+ if (false_label_ != fall_through_) __ Branch(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ Branch(true_label_);
+ }
+ } else {
+ // For simplicity we always test the accumulator register.
+ __ li(result_register(), Operand(lit));
+ codegen()->DoTest(this);
+ }
+}
+
+
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+ int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ __ Drop(count);
+ __ Move(result_register(), reg);
+}
+
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ if (count > 1) __ Drop(count - 1);
+ __ sw(reg, MemOperand(sp, 0));
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+ Register reg) const {
+ ASSERT(count > 0);
+ // For simplicity we always test the accumulator register.
+ __ Drop(count);
+ __ Move(result_register(), reg);
+ codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == materialize_false);
+ __ bind(materialize_true);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ Branch(&done);
+ __ bind(materialize_false);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
-void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ push(at);
+ __ Branch(&done);
+ __ bind(materialize_false);
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ __ push(at);
+ __ bind(&done);
}
-void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ ASSERT(materialize_true == true_label_);
+ ASSERT(materialize_false == false_label_);
}
-void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
- UNIMPLEMENTED_MIPS();
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
}
-void FullCodeGenerator::ApplyTOS(Expression::Context context) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(result_register(), value_root_index);
}
-void FullCodeGenerator::DropAndApply(int count,
- Expression::Context context,
- Register reg) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(at, value_root_index);
+ __ push(at);
}
-void FullCodeGenerator::Apply(Expression::Context context,
- Label* materialize_true,
- Label* materialize_false) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+ true,
+ true_label_,
+ false_label_);
+ if (flag) {
+ if (true_label_ != fall_through_) __ Branch(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ Branch(false_label_);
+ }
}
-void FullCodeGenerator::DoTest(Expression::Context context) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (CpuFeatures::IsSupported(FPU)) {
+ ToBooleanStub stub(result_register());
+ __ CallStub(&stub);
+ __ mov(at, zero_reg);
+ } else {
+ // Call the runtime to find the boolean value of the source and then
+ // translate it into control flow to the pair of labels.
+ __ push(result_register());
+ __ CallRuntime(Runtime::kToBool, 1);
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ }
+ Split(ne, v0, Operand(at), if_true, if_false, fall_through);
+}
+
+
+void FullCodeGenerator::Split(Condition cc,
+ Register lhs,
+ const Operand& rhs,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ Branch(if_true, cc, lhs, rhs);
+ } else if (if_true == fall_through) {
+ __ Branch(if_false, NegateCondition(cc), lhs, rhs);
+ } else {
+ __ Branch(if_true, cc, lhs, rhs);
+ __ Branch(if_false);
+ }
}
MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
- UNIMPLEMENTED_MIPS();
- return MemOperand(zero_reg, 0); // UNIMPLEMENTED RETURN
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return MemOperand(fp, SlotOffset(slot));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ scope()->ContextChainLength(slot->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return ContextOperand(scratch, slot->index());
+ }
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return MemOperand(v0, 0);
}
void FullCodeGenerator::Move(Register destination, Slot* source) {
- UNIMPLEMENTED_MIPS();
+ // Use destination as scratch.
+ MemOperand slot_operand = EmitSlotSearch(source, destination);
+ __ lw(destination, slot_operand);
+}
+
+
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+ bool should_normalize,
+ Label* if_true,
+ Label* if_false) {
+ // Only prepare for bailouts before splits if we're in a test
+ // context. Otherwise, we let the Visit function deal with the
+ // preparation to avoid preparing with the same AST id twice.
+ if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+ Label skip;
+ if (should_normalize) __ Branch(&skip);
+
+ ForwardBailoutStack* current = forward_bailout_stack_;
+ while (current != NULL) {
+ PrepareForBailout(current->expr(), state);
+ current = current->parent();
+ }
+
+ if (should_normalize) {
+ __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+ Split(eq, a0, Operand(t0), if_true, if_false, NULL);
+ __ bind(&skip);
+ }
}
@@ -103,157 +677,3608 @@ void FullCodeGenerator::Move(Slot* dst,
Register src,
Register scratch1,
Register scratch2) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
+ ASSERT(!scratch1.is(src) && !scratch2.is(src));
+ MemOperand location = EmitSlotSearch(dst, scratch1);
+ __ sw(src, location);
+ // Emit the write barrier code if the location is in the heap.
+ if (dst->type() == Slot::CONTEXT) {
+ __ RecordWrite(scratch1,
+ Operand(Context::SlotOffset(dst->index())),
+ scratch2,
+ src);
+ }
+}
+
+
+void FullCodeGenerator::EmitDeclaration(Variable* variable,
+ Variable::Mode mode,
+ FunctionLiteral* function) {
+ Comment cmnt(masm_, "[ Declaration");
+ ASSERT(variable != NULL); // Must have been resolved.
+ Slot* slot = variable->AsSlot();
+ Property* prop = variable->AsProperty();
+
+ if (slot != NULL) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ if (mode == Variable::CONST) {
+ __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ __ sw(t0, MemOperand(fp, SlotOffset(slot)));
+ } else if (function != NULL) {
+ VisitForAccumulatorValue(function);
+ __ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
+ }
+ break;
+
+ case Slot::CONTEXT:
+ // We bypass the general EmitSlotSearch because we know more about
+ // this specific context.
+
+ // The variable in the decl always resides in the current function
+ // context.
+ ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+ if (FLAG_debug_code) {
+ // Check that we're not inside a with or catch context.
+ __ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ LoadRoot(t0, Heap::kWithContextMapRootIndex);
+ __ Check(ne, "Declaration in with context.",
+ a1, Operand(t0));
+ __ LoadRoot(t0, Heap::kCatchContextMapRootIndex);
+ __ Check(ne, "Declaration in catch context.",
+ a1, Operand(t0));
+ }
+ if (mode == Variable::CONST) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ sw(at, ContextOperand(cp, slot->index()));
+ // No write barrier since the_hole_value is in old space.
+ } else if (function != NULL) {
+ VisitForAccumulatorValue(function);
+ __ sw(result_register(), ContextOperand(cp, slot->index()));
+ int offset = Context::SlotOffset(slot->index());
+ // We know that we have written a function, which is not a smi.
+ __ mov(a1, cp);
+ __ RecordWrite(a1, Operand(offset), a2, result_register());
+ }
+ break;
+
+ case Slot::LOOKUP: {
+ __ li(a2, Operand(variable->name()));
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(mode == Variable::VAR ||
+ mode == Variable::CONST);
+ PropertyAttributes attr =
+ (mode == Variable::VAR) ? NONE : READ_ONLY;
+ __ li(a1, Operand(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (mode == Variable::CONST) {
+ __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
+ __ Push(cp, a2, a1, a0);
+ } else if (function != NULL) {
+ __ Push(cp, a2, a1);
+ // Push initial value for function declaration.
+ VisitForStackValue(function);
+ } else {
+ ASSERT(Smi::FromInt(0) == 0);
+ // No initial value!
+ __ mov(a0, zero_reg); // Operand(Smi::FromInt(0)));
+ __ Push(cp, a2, a1, a0);
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+
+ } else if (prop != NULL) {
+ // A const declaration aliasing a parameter is an illegal redeclaration.
+ ASSERT(mode != Variable::CONST);
+ if (function != NULL) {
+ // We are declaring a function that rewrites to a property.
+ // Use (keyed) IC to set the initial value. We cannot visit the
+ // rewrite because it's shared and we risk recording duplicate AST
+ // IDs for bailouts from optimized code.
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ { AccumulatorValueContext for_object(this);
+ EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+ }
+
+ __ push(result_register());
+ VisitForAccumulatorValue(function);
+ __ mov(a0, result_register());
+ __ pop(a2);
+
+ ASSERT(prop->key()->AsLiteral() != NULL &&
+ prop->key()->AsLiteral()->handle()->IsSmi());
+ __ li(a1, Operand(prop->key()->AsLiteral()->handle()));
+
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ __ CallWithAstId(ic);
+ // Value in v0 is ignored (declarations are statements).
+ }
+ }
}
void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
- UNIMPLEMENTED_MIPS();
+ EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
}
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- UNIMPLEMENTED_MIPS();
+ // Call the runtime to declare the globals.
+ // The context is the first argument.
+ __ li(a2, Operand(pairs));
+ __ li(a1, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+ __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
+ __ Push(cp, a2, a1, a0);
+ __ CallRuntime(Runtime::kDeclareGlobals, 4);
+ // Return value is ignored.
}
-void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ Comment cmnt(masm_, "[ SwitchStatement");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+
+ // Keep the switch value on the stack until a case matches.
+ VisitForStackValue(stmt->tag());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ CaseClause* default_clause = NULL; // Can occur anywhere in the list.
+
+ Label next_test; // Recycled for each test.
+ // Compile all the tests with branches to their bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ clause->body_target()->Unuse();
+
+ // The default is not a test, but remember it as final fall through.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ __ bind(&next_test);
+ next_test.Unuse();
+
+ // Compile the label expression.
+ VisitForAccumulatorValue(clause->label());
+ __ mov(a0, result_register()); // CompareStub requires args in a0, a1.
+
+ // Perform the comparison as if via '==='.
+ __ lw(a1, MemOperand(sp, 0)); // Switch value.
+ bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ JumpPatchSite patch_site(masm_);
+ if (inline_smi_code) {
+ Label slow_case;
+ __ or_(a2, a1, a0);
+ patch_site.EmitJumpIfNotSmi(a2, &slow_case);
+
+ __ Branch(&next_test, ne, a1, Operand(a0));
+ __ Drop(1); // Switch value is no longer needed.
+ __ Branch(clause->body_target());
+
+ __ bind(&slow_case);
+ }
+
+ // Record position before stub call for type feedback.
+ SetSourcePosition(clause->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ patch_site.EmitPatchInfo();
+
+ __ Branch(&next_test, ne, v0, Operand(zero_reg));
+ __ Drop(1); // Switch value is no longer needed.
+ __ Branch(clause->body_target());
+ }
+
+ // Discard the test value and jump to the default if present, otherwise to
+ // the end of the statement.
+ __ bind(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ if (default_clause == NULL) {
+ __ Branch(nested_statement.break_target());
+ } else {
+ __ Branch(default_clause->body_target());
+ }
+
+ // Compile all the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ Comment cmnt(masm_, "[ Case body");
+ CaseClause* clause = clauses->at(i);
+ __ bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ VisitStatements(clause->statements());
+ }
+
+ __ bind(nested_statement.break_target());
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ Comment cmnt(masm_, "[ ForInStatement");
+ SetStatementPosition(stmt);
+
+ Label loop, exit;
+ ForIn loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // Get the object to enumerate over. Both SpiderMonkey and JSC
+ // ignore null and undefined in contrast to the specification; see
+ // ECMA-262 section 12.6.4.
+ VisitForAccumulatorValue(stmt->enumerable());
+ __ mov(a0, result_register()); // Result as param to InvokeBuiltin below.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&exit, eq, a0, Operand(at));
+ Register null_value = t1;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ Branch(&exit, eq, a0, Operand(null_value));
+
+ // Convert the object to a JS object.
+ Label convert, done_convert;
+ __ JumpIfSmi(a0, &convert);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ bind(&convert);
+ __ push(a0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a0, v0);
+ __ bind(&done_convert);
+ __ push(a0);
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ Label next, call_runtime;
+ // Preload a couple of values used in the loop.
+ Register empty_fixed_array_value = t2;
+ __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Register empty_descriptor_array_value = t3;
+ __ LoadRoot(empty_descriptor_array_value,
+ Heap::kEmptyDescriptorArrayRootIndex);
+ __ mov(a1, a0);
+ __ bind(&next);
+
+ // Check that there are no elements. Register a1 contains the
+ // current JS object we've reached through the prototype chain.
+ __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
+ __ Branch(&call_runtime, ne, a2, Operand(empty_fixed_array_value));
+
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in a2 for the subsequent
+ // prototype load.
+ __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
+ __ JumpIfSmi(a3, &call_runtime);
+
+ // Check that there is an enum cache in the non-empty instance
+ // descriptors (a3). This is the case if the next enumeration
+ // index field does not contain a smi.
+ __ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
+ __ JumpIfSmi(a3, &call_runtime);
+
+ // For all objects but the receiver, check that the cache is empty.
+ Label check_prototype;
+ __ Branch(&check_prototype, eq, a1, Operand(a0));
+ __ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ Branch(&call_runtime, ne, a3, Operand(empty_fixed_array_value));
+
+ // Load the prototype from the map and loop if non-null.
+ __ bind(&check_prototype);
+ __ lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
+ __ Branch(&next, ne, a1, Operand(null_value));
+
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ Label use_cache;
+ __ lw(v0, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ Branch(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(a0); // Duplicate the enumerable object on the stack.
+ __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ Label fixed_array;
+ __ mov(a2, v0);
+ __ lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kMetaMapRootIndex);
+ __ Branch(&fixed_array, ne, a1, Operand(at));
+
+ // We got a map in register v0. Get the enumeration cache from it.
+ __ bind(&use_cache);
+ __ LoadInstanceDescriptors(v0, a1);
+ __ lw(a1, FieldMemOperand(a1, DescriptorArray::kEnumerationIndexOffset));
+ __ lw(a2, FieldMemOperand(a1, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ // Setup the four remaining stack slots.
+ __ push(v0); // Map.
+ __ lw(a1, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ li(a0, Operand(Smi::FromInt(0)));
+ // Push enumeration cache, enumeration cache length (as smi) and zero.
+ __ Push(a2, a1, a0);
+ __ jmp(&loop);
+
+ // We got a fixed array in register v0. Iterate through that.
+ __ bind(&fixed_array);
+ __ li(a1, Operand(Smi::FromInt(0))); // Map (0) - force slow check.
+ __ Push(a1, v0);
+ __ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ li(a0, Operand(Smi::FromInt(0)));
+ __ Push(a1, a0); // Fixed array length (as smi) and initial index.
+
+ // Generate code for doing the condition check.
+ __ bind(&loop);
+ // Load the current count to a0, load the length to a1.
+ __ lw(a0, MemOperand(sp, 0 * kPointerSize));
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Branch(loop_statement.break_target(), hs, a0, Operand(a1));
+
+ // Get the current entry of the array into register a3.
+ __ lw(a2, MemOperand(sp, 2 * kPointerSize));
+ __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(t0, a2, t0); // Array base + scaled (smi) index.
+ __ lw(a3, MemOperand(t0)); // Current entry.
+
+ // Get the expected map from the stack or a zero map in the
+ // permanent slow case into register a2.
+ __ lw(a2, MemOperand(sp, 3 * kPointerSize));
+
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we have to filter the key.
+ Label update_each;
+ __ lw(a1, MemOperand(sp, 4 * kPointerSize));
+ __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Branch(&update_each, eq, t0, Operand(a2));
+
+ // Convert the entry to a string or (smi) 0 if it isn't a property
+ // any more. If the property has been removed while iterating, we
+ // just skip it.
+ __ push(a1); // Enumerable.
+ __ push(a3); // Current entry.
+ __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+ __ mov(a3, result_register());
+ __ Branch(loop_statement.continue_target(), eq, a3, Operand(zero_reg));
+
+ // Update the 'each' property or variable from the possibly filtered
+ // entry in register a3.
+ __ bind(&update_each);
+ __ mov(result_register(), a3);
+ // Perform the assignment as if via '='.
+ { EffectContext context(this);
+ EmitAssignment(stmt->each(), stmt->AssignmentId());
+ }
+
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Generate code for the going to the next element by incrementing
+ // the index (smi) stored on top of the stack.
+ __ bind(loop_statement.continue_target());
+ __ pop(a0);
+ __ Addu(a0, a0, Operand(Smi::FromInt(1)));
+ __ push(a0);
+
+ EmitStackCheck(stmt);
+ __ Branch(&loop);
+
+ // Remove the pointers stored on the stack.
+ __ bind(loop_statement.break_target());
+ __ Drop(5);
+
+ // Exit and decrement the loop depth.
+ __ bind(&exit);
+ decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+ bool pretenure) {
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning. If
+ // we're running with the --always-opt or the --prepare-always-opt
+ // flag, we need to use the runtime function so that the new function
+ // we are creating here gets a chance to have its code optimized and
+ // doesn't just get a copy of the existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ !pretenure &&
+ scope()->is_function_scope() &&
+ info->num_literals() == 0) {
+ FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+ __ li(a0, Operand(info));
+ __ push(a0);
+ __ CallStub(&stub);
+ } else {
+ __ li(a0, Operand(info));
+ __ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex
+ : Heap::kFalseValueRootIndex);
+ __ Push(cp, a0, a1);
+ __ CallRuntime(Runtime::kNewClosure, 3);
+ }
+ context()->Plug(v0);
}
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ VariableProxy");
+ EmitVariableLoad(expr->var());
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
+ Slot* slot,
+ TypeofState typeof_state,
+ Label* slow) {
+ Register current = cp;
+ Register next = a1;
+ Register temp = a2;
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_eval()) {
+ // Check that extension is NULL.
+ __ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+ __ Branch(slow, ne, temp, Operand(zero_reg));
+ }
+ // Load next context in chain.
+ __ lw(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions.
+ if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ Label loop, fast;
+ if (!current.is(next)) {
+ __ Move(next, current);
+ }
+ __ bind(&loop);
+ // Terminate at global context.
+ __ lw(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+ __ LoadRoot(t0, Heap::kGlobalContextMapRootIndex);
+ __ Branch(&fast, eq, temp, Operand(t0));
+ // Check that extension is NULL.
+ __ lw(temp, ContextOperand(next, Context::EXTENSION_INDEX));
+ __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Load next context in chain.
+ __ lw(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+ __ Branch(&loop);
+ __ bind(&fast);
+ }
+
+ __ lw(a0, GlobalObjectOperand());
+ __ li(a2, Operand(slot->var()->name()));
+ RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
+ ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ __ CallWithAstId(ic, mode);
+}
+
+
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
+ Slot* slot,
+ Label* slow) {
+ ASSERT(slot->type() == Slot::CONTEXT);
+ Register context = cp;
+ Register next = a3;
+ Register temp = t0;
+
+ for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_eval()) {
+ // Check that extension is NULL.
+ __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+ __ Branch(slow, ne, temp, Operand(zero_reg));
+ }
+ __ lw(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ context = next;
+ }
+ }
+ // Check that last extension is NULL.
+ __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+ __ Branch(slow, ne, temp, Operand(zero_reg));
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an cp-based operand (the write barrier cannot be allowed to
+ // destroy the cp register).
+ return ContextOperand(context, slot->index());
}
-void FullCodeGenerator::EmitVariableLoad(Variable* var,
- Expression::Context context) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
+ Slot* slot,
+ TypeofState typeof_state,
+ Label* slow,
+ Label* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+ EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
+ __ Branch(done);
+ } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
+ Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
+ if (potential_slot != NULL) {
+ // Generate fast case for locals that rewrite to slots.
+ __ lw(v0, ContextSlotOperandCheckExtensions(potential_slot, slow));
+ if (potential_slot->var()->mode() == Variable::CONST) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ subu(at, v0, at); // Sub as compare: at == 0 on eq.
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ __ movz(v0, a0, at); // Conditional move.
+ }
+ __ Branch(done);
+ } else if (rewrite != NULL) {
+ // Generate fast case for calls of an argument function.
+ Property* property = rewrite->AsProperty();
+ if (property != NULL) {
+ VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+ Literal* key_literal = property->key()->AsLiteral();
+ if (obj_proxy != NULL &&
+ key_literal != NULL &&
+ obj_proxy->IsArguments() &&
+ key_literal->handle()->IsSmi()) {
+ // Load arguments object if there are no eval-introduced
+ // variables. Then load the argument from the arguments
+ // object using keyed load.
+ __ lw(a1,
+ ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
+ slow));
+ __ li(a0, Operand(key_literal->handle()));
+ Handle<Code> ic =
+ isolate()->builtins()->KeyedLoadIC_Initialize();
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+ __ Branch(done);
+ }
+ }
+ }
+ }
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(Variable* var) {
+ // Three cases: non-this global variables, lookup slots, and all other
+ // types of slots.
+ Slot* slot = var->AsSlot();
+ ASSERT((var->is_global() && !var->is_this()) == (slot == NULL));
+
+ if (slot == NULL) {
+ Comment cmnt(masm_, "Global variable");
+ // Use inline caching. Variable name is passed in a2 and the global
+ // object (receiver) in a0.
+ __ lw(a0, GlobalObjectOperand());
+ __ li(a2, Operand(var->name()));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ context()->Plug(v0);
+
+ } else if (slot->type() == Slot::LOOKUP) {
+ Label done, slow;
+
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLoadFromSlotFastCase(slot, NOT_INSIDE_TYPEOF, &slow, &done);
+
+ __ bind(&slow);
+ Comment cmnt(masm_, "Lookup slot");
+ __ li(a1, Operand(var->name()));
+ __ Push(cp, a1); // Context and name.
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ bind(&done);
+
+ context()->Plug(v0);
+
+ } else {
+ Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+ ? "Context slot"
+ : "Stack slot");
+ if (var->mode() == Variable::CONST) {
+ // Constants may be the hole value if they have not been initialized.
+ // Unhole them.
+ MemOperand slot_operand = EmitSlotSearch(slot, a0);
+ __ lw(v0, slot_operand);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ subu(at, v0, at); // Sub as compare: at == 0 on eq.
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ __ movz(v0, a0, at); // Conditional move.
+ context()->Plug(v0);
+ } else {
+ context()->Plug(slot);
+ }
+ }
}
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ Label materialized;
+ // Registers will be used as follows:
+ // t1 = materialized value (RegExp literal)
+ // t0 = JS function, literals array
+ // a3 = literal index
+ // a2 = RegExp pattern
+ // a1 = RegExp flags
+ // a0 = RegExp literal clone
+ __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(t0, FieldMemOperand(a0, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ lw(t1, FieldMemOperand(t0, literal_offset));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&materialized, ne, t1, Operand(at));
+
+ // Create regexp literal using runtime function.
+ // Result will be in v0.
+ __ li(a3, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a2, Operand(expr->pattern()));
+ __ li(a1, Operand(expr->flags()));
+ __ Push(t0, a3, a2, a1);
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ mov(t1, v0);
+
+ __ bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+ __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ push(t1);
+ __ li(a0, Operand(Smi::FromInt(size)));
+ __ push(a0);
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ pop(t1);
+
+ __ bind(&allocated);
+
+ // After this, registers are used as follows:
+ // v0: Newly allocated regexp.
+ // t1: Materialized regexp.
+ // a2: temp.
+ __ CopyFields(v0, t1, a2.bit(), size / kPointerSize);
+ context()->Plug(v0);
}
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ ObjectLiteral");
+ __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+ __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a1, Operand(expr->constant_properties()));
+ int flags = expr->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ li(a0, Operand(Smi::FromInt(flags)));
+ __ Push(a3, a2, a1, a0);
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ } else {
+ __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ }
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in v0.
+ bool result_saved = false;
+
+ // Mark all computed expressions that are bound to a key that
+ // is shadowed by a later occurrence of the same key. For the
+ // marked expressions, no store code is emitted.
+ expr->CalculateEmitStore();
+
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(v0); // Save result on stack.
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->handle()->IsSymbol()) {
+ if (property->emit_store()) {
+ VisitForAccumulatorValue(value);
+ __ mov(a0, result_register());
+ __ li(a2, Operand(key->handle()));
+ __ lw(a1, MemOperand(sp));
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, key->id());
+ PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ } else {
+ VisitForEffect(value);
+ }
+ break;
+ }
+ // Fall through.
+ case ObjectLiteral::Property::PROTOTYPE:
+ // Duplicate receiver on stack.
+ __ lw(a0, MemOperand(sp));
+ __ push(a0);
+ VisitForStackValue(key);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ __ li(a0, Operand(Smi::FromInt(NONE))); // PropertyAttributes.
+ __ push(a0);
+ __ CallRuntime(Runtime::kSetProperty, 4);
+ } else {
+ __ Drop(3);
+ }
+ break;
+ case ObjectLiteral::Property::GETTER:
+ case ObjectLiteral::Property::SETTER:
+ // Duplicate receiver on stack.
+ __ lw(a0, MemOperand(sp));
+ __ push(a0);
+ VisitForStackValue(key);
+ __ li(a1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
+ Smi::FromInt(1) :
+ Smi::FromInt(0)));
+ __ push(a1);
+ VisitForStackValue(value);
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ break;
+ }
+ }
+
+ if (expr->has_function()) {
+ ASSERT(result_saved);
+ __ lw(a0, MemOperand(sp));
+ __ push(a0);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(v0);
+ }
}
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ ArrayLiteral");
+
+ ZoneList<Expression*>* subexprs = expr->values();
+ int length = subexprs->length();
+ __ mov(a0, result_register());
+ __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+ __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a1, Operand(expr->constant_elements()));
+ __ Push(a3, a2, a1);
+ if (expr->constant_elements()->map() ==
+ isolate()->heap()->fixed_cow_array_map()) {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ __ CallStub(&stub);
+ __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
+ 1, a1, a2);
+ } else if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
+ } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ } else {
+ FastCloneShallowArrayStub stub(
+ FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+ __ CallStub(&stub);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ for (int i = 0; i < length; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (subexpr->AsLiteral() != NULL ||
+ CompileTimeValue::IsCompileTimeValue(subexpr)) {
+ continue;
+ }
+
+ if (!result_saved) {
+ __ push(v0);
+ result_saved = true;
+ }
+ VisitForAccumulatorValue(subexpr);
+
+ // Store the subexpression value in the array's elements.
+ __ lw(a1, MemOperand(sp)); // Copy of array literal.
+ __ lw(a1, FieldMemOperand(a1, JSObject::kElementsOffset));
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ sw(result_register(), FieldMemOperand(a1, offset));
+
+ // Update the write barrier for the array store with v0 as the scratch
+ // register.
+ __ li(a2, Operand(offset));
+ // TODO(PJ): double check this RecordWrite call.
+ __ RecordWrite(a1, a2, result_register());
+
+ PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(v0);
+ }
}
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ Assignment");
+ // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+ // on the left-hand side.
+ if (!expr->target()->IsValidLeftHandSide()) {
+ VisitForEffect(expr->target());
+ return;
+ }
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* property = expr->target()->AsProperty();
+ if (property != NULL) {
+ assign_type = (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ if (expr->is_compound()) {
+ // We need the receiver both on the stack and in the accumulator.
+ VisitForAccumulatorValue(property->obj());
+ __ push(result_register());
+ } else {
+ VisitForStackValue(property->obj());
+ }
+ break;
+ case KEYED_PROPERTY:
+ // We need the key and receiver on both the stack and in v0 and a1.
+ if (expr->is_compound()) {
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
+ __ lw(a1, MemOperand(sp, 0));
+ __ push(v0);
+ } else {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ }
+ break;
+ }
+
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
+ if (expr->is_compound()) {
+ { AccumulatorValueContext context(this);
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ PrepareForBailout(expr->target(), TOS_REG);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+ break;
+ }
+ }
+
+ Token::Value op = expr->binary_op();
+ __ push(v0); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
+
+ OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
+ SetSourcePosition(expr->position() + 1);
+ AccumulatorValueContext context(this);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr->binary_operation(),
+ op,
+ mode,
+ expr->target(),
+ expr->value());
+ } else {
+ EmitBinaryOp(expr->binary_operation(), op, mode);
+ }
+
+ // Deoptimization point in case the binary operation may have side effects.
+ PrepareForBailout(expr->binary_operation(), TOS_REG);
+ } else {
+ VisitForAccumulatorValue(expr->value());
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->op());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(v0);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
}
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- UNIMPLEMENTED_MIPS();
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ __ mov(a0, result_register());
+ __ li(a2, Operand(key->handle()));
+ // Call load IC. It has arguments receiver and property name a0 and a2.
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- UNIMPLEMENTED_MIPS();
+ SetSourcePosition(prop->position());
+ __ mov(a0, result_register());
+ // Call keyed load IC. It has arguments key and receiver in a0 and a1.
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- Expression::Context context) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode,
+ Expression* left_expr,
+ Expression* right_expr) {
+ Label done, smi_case, stub_call;
+
+ Register scratch1 = a2;
+ Register scratch2 = a3;
+
+ // Get the arguments.
+ Register left = a1;
+ Register right = a0;
+ __ pop(left);
+ __ mov(a0, result_register());
+
+ // Perform combined smi check on both operands.
+ __ Or(scratch1, left, Operand(right));
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(scratch1, &smi_case);
+
+ __ bind(&stub_call);
+ BinaryOpStub stub(op, mode);
+ __ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
+ __ jmp(&done);
+
+ __ bind(&smi_case);
+ // Smi case. This code works the same way as the smi-smi case in the type
+ // recording binary operation stub, see
+ // BinaryOpStub::GenerateSmiSmiOperation for comments.
+ switch (op) {
+ case Token::SAR:
+ __ Branch(&stub_call);
+ __ GetLeastBitsFromSmi(scratch1, right, 5);
+ __ srav(right, left, scratch1);
+ __ And(v0, right, Operand(~kSmiTagMask));
+ break;
+ case Token::SHL: {
+ __ Branch(&stub_call);
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ sllv(scratch1, scratch1, scratch2);
+ __ Addu(scratch2, scratch1, Operand(0x40000000));
+ __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
+ __ SmiTag(v0, scratch1);
+ break;
+ }
+ case Token::SHR: {
+ __ Branch(&stub_call);
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ srlv(scratch1, scratch1, scratch2);
+ __ And(scratch2, scratch1, 0xc0000000);
+ __ Branch(&stub_call, ne, scratch2, Operand(zero_reg));
+ __ SmiTag(v0, scratch1);
+ break;
+ }
+ case Token::ADD:
+ __ AdduAndCheckForOverflow(v0, left, right, scratch1);
+ __ BranchOnOverflow(&stub_call, scratch1);
+ break;
+ case Token::SUB:
+ __ SubuAndCheckForOverflow(v0, left, right, scratch1);
+ __ BranchOnOverflow(&stub_call, scratch1);
+ break;
+ case Token::MUL: {
+ __ SmiUntag(scratch1, right);
+ __ Mult(left, scratch1);
+ __ mflo(scratch1);
+ __ mfhi(scratch2);
+ __ sra(scratch1, scratch1, 31);
+ __ Branch(&stub_call, ne, scratch1, Operand(scratch2));
+ __ mflo(v0);
+ __ Branch(&done, ne, v0, Operand(zero_reg));
+ __ Addu(scratch2, right, left);
+ __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
+ ASSERT(Smi::FromInt(0) == 0);
+ __ mov(v0, zero_reg);
+ break;
+ }
+ case Token::BIT_OR:
+ __ Or(v0, left, Operand(right));
+ break;
+ case Token::BIT_AND:
+ __ And(v0, left, Operand(right));
+ break;
+ case Token::BIT_XOR:
+ __ Xor(v0, left, Operand(right));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ OverwriteMode mode) {
+ __ mov(a0, result_register());
+ __ pop(a1);
+ BinaryOpStub stub(op, mode);
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ __ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
+ // Invalid left-hand sides are rewritten to have a 'throw
+ // ReferenceError' on the left-hand side.
+ if (!expr->IsValidLeftHandSide()) {
+ VisitForEffect(expr);
+ return;
+ }
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->AsProperty();
+ if (prop != NULL) {
+ assign_type = (prop->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* var = expr->AsVariableProxy()->var();
+ EffectContext context(this);
+ EmitVariableAssignment(var, Token::ASSIGN);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ __ push(result_register()); // Preserve value.
+ VisitForAccumulatorValue(prop->obj());
+ __ mov(a1, result_register());
+ __ pop(a0); // Restore value.
+ __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ __ CallWithAstId(ic);
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ push(result_register()); // Preserve value.
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ mov(a1, result_register());
+ __ pop(a2);
+ __ pop(a0); // Restore value.
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ __ CallWithAstId(ic);
+ break;
+ }
+ }
+ PrepareForBailoutForId(bailout_ast_id, TOS_REG);
+ context()->Plug(v0);
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Expression::Context context) {
- UNIMPLEMENTED_MIPS();
+ Token::Value op) {
+ ASSERT(var != NULL);
+ ASSERT(var->is_global() || var->AsSlot() != NULL);
+
+ if (var->is_global()) {
+ ASSERT(!var->is_this());
+ // Assignment to a global variable. Use inline caching for the
+ // assignment. Right-hand-side value is passed in a0, variable name in
+ // a2, and the global object in a1.
+ __ mov(a0, result_register());
+ __ li(a2, Operand(var->name()));
+ __ lw(a1, GlobalObjectOperand());
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET_CONTEXT);
+
+ } else if (op == Token::INIT_CONST) {
+ // Like var declarations, const declarations are hoisted to function
+ // scope. However, unlike var initializers, const initializers are able
+ // to drill a hole to that function context, even from inside a 'with'
+ // context. We thus bypass the normal static scope lookup.
+ Slot* slot = var->AsSlot();
+ Label skip;
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ // No const parameters.
+ UNREACHABLE();
+ break;
+ case Slot::LOCAL:
+ // Detect const reinitialization by checking for the hole value.
+ __ lw(a1, MemOperand(fp, SlotOffset(slot)));
+ __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ __ Branch(&skip, ne, a1, Operand(t0));
+ __ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
+ break;
+ case Slot::CONTEXT:
+ case Slot::LOOKUP:
+ __ push(result_register());
+ __ li(a0, Operand(slot->var()->name()));
+ __ Push(cp, a0); // Context and name.
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ break;
+ }
+ __ bind(&skip);
+
+ } else if (var->mode() != Variable::CONST) {
+ // Perform the assignment for non-const variables. Const assignments
+ // are simply skipped.
+ Slot* slot = var->AsSlot();
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ // Perform the assignment.
+ __ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
+ break;
+
+ case Slot::CONTEXT: {
+ MemOperand target = EmitSlotSearch(slot, a1);
+ // Perform the assignment and issue the write barrier.
+ __ sw(result_register(), target);
+ // RecordWrite may destroy all its register arguments.
+ __ mov(a3, result_register());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ RecordWrite(a1, Operand(offset), a2, a3);
+ break;
+ }
+
+ case Slot::LOOKUP:
+ // Call the runtime for the assignment.
+ __ push(v0); // Value.
+ __ li(a1, Operand(slot->var()->name()));
+ __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
+ __ Push(cp, a1, a0); // Context, name, strict mode.
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ break;
+ }
+ }
}
void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- UNIMPLEMENTED_MIPS();
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ push(result_register());
+ __ lw(t0, MemOperand(sp, kPointerSize)); // Receiver is now under value.
+ __ push(t0);
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
+ }
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ __ mov(a0, result_register()); // Load the value.
+ __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
+ // Load receiver to a1. Leave a copy in the stack if needed for turning the
+ // receiver into fast case.
+ if (expr->ends_initialization_block()) {
+ __ lw(a1, MemOperand(sp));
+ } else {
+ __ pop(a1);
+ }
+
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(v0); // Result of assignment, saved even if not needed.
+ // Receiver is under the result value.
+ __ lw(t0, MemOperand(sp, kPointerSize));
+ __ push(t0);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(v0);
+ __ Drop(1);
+ }
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(v0);
}
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- UNIMPLEMENTED_MIPS();
+ // Assignment to a property, using a keyed store IC.
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ push(result_register());
+ // Receiver is now under the key and value.
+ __ lw(t0, MemOperand(sp, 2 * kPointerSize));
+ __ push(t0);
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
+ }
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ // Call keyed store IC.
+ // The arguments are:
+ // - a0 is the value,
+ // - a1 is the key,
+ // - a2 is the receiver.
+ __ mov(a0, result_register());
+ __ pop(a1); // Key.
+ // Load receiver to a2. Leave a copy in the stack if needed for turning the
+ // receiver into fast case.
+ if (expr->ends_initialization_block()) {
+ __ lw(a2, MemOperand(sp));
+ } else {
+ __ pop(a2);
+ }
+
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(v0); // Result of assignment, saved even if not needed.
+ // Receiver is under the result value.
+ __ lw(t0, MemOperand(sp, kPointerSize));
+ __ push(t0);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(v0);
+ __ Drop(1);
+ }
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(v0);
}
void FullCodeGenerator::VisitProperty(Property* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+
+ if (key->IsPropertyName()) {
+ VisitForAccumulatorValue(expr->obj());
+ EmitNamedPropertyLoad(expr);
+ context()->Plug(v0);
+ } else {
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ pop(a1);
+ EmitKeyedPropertyLoad(expr);
+ context()->Plug(v0);
+ }
}
+
void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> ignored,
+ Handle<Object> name,
RelocInfo::Mode mode) {
- UNIMPLEMENTED_MIPS();
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ __ li(a2, Operand(name));
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ // Call the IC initialization code.
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
+ __ CallWithAstId(ic, mode, expr->id());
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
+ Expression* key) {
+ // Load the key.
+ VisitForAccumulatorValue(key);
+
+ // Swap the name of the function and the receiver on the stack to follow
+ // the calling convention for call ICs.
+ __ pop(a1);
+ __ push(v0);
+ __ push(a1);
+
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ // Call the IC initialization code.
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
+ __ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, v0); // Drop the key still on the stack.
+}
+
+
+void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ { PreservePositionScope scope(masm()->positions_recorder());
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub stub(arg_count, in_loop, flags);
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, v0);
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
+ int arg_count) {
+ // Push copy of the first argument or undefined if it doesn't exist.
+ if (arg_count > 0) {
+ __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
+ } else {
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ }
+ __ push(a1);
+
+ // Push the receiver of the enclosing function and do runtime call.
+ int receiver_offset = 2 + info_->scope()->num_parameters();
+ __ lw(a1, MemOperand(fp, receiver_offset * kPointerSize));
+ __ push(a1);
+ // Push the strict mode flag.
+ __ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
+ __ push(a1);
+
+ __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
+ ? Runtime::kResolvePossiblyDirectEvalNoLookup
+ : Runtime::kResolvePossiblyDirectEval, 4);
}
void FullCodeGenerator::VisitCall(Call* expr) {
- UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+ // We want to verify that RecordJSReturnSite gets called on all paths
+ // through this function. Avoid early returns.
+ expr->return_is_recorded_ = false;
+#endif
+
+ Comment cmnt(masm_, "[ Call");
+ Expression* fun = expr->expression();
+ Variable* var = fun->AsVariableProxy()->AsVariable();
+
+ if (var != NULL && var->is_possibly_eval()) {
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to
+ // resolve the function we need to call and the receiver of the
+ // call. Then we call the resolved function using the given
+ // arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ { PreservePositionScope pos_scope(masm()->positions_recorder());
+ VisitForStackValue(fun);
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ push(a2); // Reserved receiver slot.
+
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ // If we know that eval can only be shadowed by eval-introduced
+ // variables we attempt to load the global eval function directly
+ // in generated code. If we succeed, there is no need to perform a
+ // context lookup in the runtime system.
+ Label done;
+ if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+ Label slow;
+ EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
+ NOT_INSIDE_TYPEOF,
+ &slow);
+ // Push the function and resolve eval.
+ __ push(v0);
+ EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
+ __ jmp(&done);
+ __ bind(&slow);
+ }
+
+ // Push copy of the function (found below the arguments) and
+ // resolve eval.
+ __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ push(a1);
+ EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
+ if (done.is_linked()) {
+ __ bind(&done);
+ }
+
+ // The runtime call returns a pair of values in v0 (function) and
+ // v1 (receiver). Touch up the stack with the right values.
+ __ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ sw(v1, MemOperand(sp, arg_count * kPointerSize));
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_IMPLICIT);
+ __ CallStub(&stub);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, v0);
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // Push global object as receiver for the call IC.
+ __ lw(a0, GlobalObjectOperand());
+ __ push(a0);
+ EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
+ } else if (var != NULL && var->AsSlot() != NULL &&
+ var->AsSlot()->type() == Slot::LOOKUP) {
+ // Call to a lookup slot (dynamically introduced variable).
+ Label slow, done;
+
+ { PreservePositionScope scope(masm()->positions_recorder());
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
+ NOT_INSIDE_TYPEOF,
+ &slow,
+ &done);
+ }
+
+ __ bind(&slow);
+ // Call the runtime to find the function to call (returned in v0)
+ // and the object holding it (returned in v1).
+ __ push(context_register());
+ __ li(a2, Operand(var->name()));
+ __ push(a2);
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ Push(v0, v1); // Function, receiver.
+
+ // If fast case code has been generated, emit code to push the
+ // function and receiver and have the slow path jump around this
+ // code.
+ if (done.is_linked()) {
+ Label call;
+ __ Branch(&call);
+ __ bind(&done);
+ // Push function.
+ __ push(v0);
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
+ __ push(a1);
+ __ bind(&call);
+ }
+
+ // The receiver is either the global receiver or an object found
+ // by LoadContextSlot. That object could be the hole if the
+ // receiver is implicitly the global object.
+ EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
+ } else if (fun->AsProperty() != NULL) {
+ // Call to an object property.
+ Property* prop = fun->AsProperty();
+ Literal* key = prop->key()->AsLiteral();
+ if (key != NULL && key->handle()->IsSymbol()) {
+ // Call to a named property, use call IC.
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(prop->obj());
+ }
+ EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
+ } else {
+ // Call to a keyed property.
+ // For a synthetic property use keyed load IC followed by function call,
+ // for a regular property use EmitKeyedCallWithIC.
+ if (prop->is_synthetic()) {
+ // Do not visit the object and key subexpressions (they are shared
+ // by all occurrences of the same rewritten parameter).
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
+ Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
+ MemOperand operand = EmitSlotSearch(slot, a1);
+ __ lw(a1, operand);
+
+ ASSERT(prop->key()->AsLiteral() != NULL);
+ ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
+ __ li(a0, Operand(prop->key()->AsLiteral()->handle()));
+
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ lw(a1, GlobalObjectOperand());
+ __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+ __ Push(v0, a1); // Function, receiver.
+ EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+ } else {
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(prop->obj());
+ }
+ EmitKeyedCallWithIC(expr, prop->key());
+ }
+ }
+ } else {
+ { PreservePositionScope scope(masm()->positions_recorder());
+ VisitForStackValue(fun);
+ }
+ // Load global receiver object.
+ __ lw(a1, GlobalObjectOperand());
+ __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+ __ push(a1);
+ // Emit function call.
+ EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+ }
+
+#ifdef DEBUG
+ // RecordJSReturnSite should have been called.
+ ASSERT(expr->return_is_recorded_);
+#endif
}
void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ VisitForStackValue(expr->expression());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function and argument count into a1 and a0.
+ __ li(a0, Operand(arg_count));
+ __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
+
+ Handle<Code> construct_builtin =
+ isolate()->builtins()->JSConstructCall();
+ __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ __ And(t0, v0, Operand(kSmiTagMask));
+ Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ __ And(at, v0, Operand(kSmiTagMask | 0x80000000));
+ Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(if_true, eq, v0, Operand(at));
+ __ lw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ lbu(a1, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ And(at, a1, Operand(1 << Map::kIsUndetectable));
+ __ Branch(if_false, ne, at, Operand(zero_reg));
+ __ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(le, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE),
+ if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a1);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
+ if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
+ __ And(at, a1, Operand(1 << Map::kIsUndetectable));
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args) {
+
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ if (FLAG_debug_code) __ AbortIfSmi(v0);
+
+ __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(a1, Map::kBitField2Offset));
+ __ And(t0, t0, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+ __ Branch(if_true, ne, t0, Operand(zero_reg));
+
+ // Check for fast case object. Generate false result for slow case object.
+ __ lw(a2, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ LoadRoot(t0, Heap::kHashTableMapRootIndex);
+ __ Branch(if_false, eq, a2, Operand(t0));
+
+ // Look for valueOf symbol in the descriptor array, and indicate false if
+ // found. The type is not checked, so if it is a transition it is a false
+ // negative.
+ __ LoadInstanceDescriptors(a1, t0);
+ __ lw(a3, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ // t0: descriptor array
+ // a3: length of descriptor array
+ // Calculate the end of the descriptor array.
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kPointerSize == 4);
+ __ Addu(a2, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(t1, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a2, a2, t1);
+
+ // Calculate location of the first key name.
+ __ Addu(t0,
+ t0,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag +
+ DescriptorArray::kFirstIndex * kPointerSize));
+ // Loop through all the keys in the descriptor array. If one of these is the
+ // symbol valueOf the result is false.
+ Label entry, loop;
+ // The use of t2 to store the valueOf symbol asumes that it is not otherwise
+ // used in the loop below.
+ __ li(t2, Operand(FACTORY->value_of_symbol()));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ lw(a3, MemOperand(t0, 0));
+ __ Branch(if_false, eq, a3, Operand(t2));
+ __ Addu(t0, t0, Operand(kPointerSize));
+ __ bind(&entry);
+ __ Branch(&loop, ne, t0, Operand(a2));
+
+ // If a valueOf property is not found on the object check that it's
+ // prototype is the un-modified String prototype. If not result is false.
+ __ lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
+ __ JumpIfSmi(a2, if_false);
+ __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ lw(a3, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
+ __ lw(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ Branch(if_false, ne, a2, Operand(a3));
+
+ // Set the bit in the map to indicate that it has been checked safe for
+ // default valueOf and set true result.
+ __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+ __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+ __ jmp(if_true);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a2);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(if_false);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a1);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(eq, a1, Operand(JS_ARRAY_TYPE),
+ if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a1);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Get the frame pointer for the calling frame.
+ __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ lw(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&check_frame_marker, ne,
+ a1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
+ if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ pop(a1);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // ArgumentsAccessStub expects the key in a1 and the formal
+ // parameter count in a0.
+ VisitForAccumulatorValue(args->at(0));
+ __ mov(a1, v0);
+ __ li(a0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ Label exit;
+ // Get the number of formal parameters.
+ __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&exit, ne, a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame.
+ __ lw(v0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+ __ bind(&exit);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ Label done, null, function, non_function_constructor;
+
+ VisitForAccumulatorValue(args->at(0));
+
+ // If the object is a smi, we return null.
+ __ JumpIfSmi(v0, &null);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ __ GetObjectType(v0, v0, a1); // Map is now in v0.
+ __ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+ __ Branch(&function, ge, a1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
+
+ // Check if the constructor in the map is a function.
+ __ lw(v0, FieldMemOperand(v0, Map::kConstructorOffset));
+ __ GetObjectType(v0, a1, a1);
+ __ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE));
+
+ // v0 now contains the constructor function. Grab the
+ // instance class name from there.
+ __ lw(v0, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(v0, FieldMemOperand(v0, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ Branch(&done);
+
+ // Functions have class 'Function'.
+ __ bind(&function);
+ __ LoadRoot(v0, Heap::kfunction_class_symbolRootIndex);
+ __ jmp(&done);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ bind(&non_function_constructor);
+ __ LoadRoot(v0, Heap::kfunction_class_symbolRootIndex);
+ __ jmp(&done);
+
+ // Non-JS objects have class null.
+ __ bind(&null);
+ __ LoadRoot(v0, Heap::kNullValueRootIndex);
+
+ // All done.
+ __ bind(&done);
+
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+ // Conditionally generate a log call.
+ // Args:
+ // 0 (literal string): The type of logging (corresponds to the flags).
+ // This is used to determine whether or not to generate the log call.
+ // 1 (string): Format string. Access the string at argument index 2
+ // with '%2s' (see Logger::LogRuntime for all the formats).
+ // 2 (array): Arguments to the format string.
+ ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallRuntime(Runtime::kLog, 2);
+ }
+#endif
+ // Finally, we're expected to leave a value on the top of the stack.
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ Label slow_allocate_heapnumber;
+ Label heapnumber_allocated;
+
+ // Save the new heap number in callee-saved register s0, since
+ // we call out to external C code below.
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(s0, a1, a2, t6, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+
+ // Allocate a heap number.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(s0, v0); // Save result in s0, so it is saved thru CFunc call.
+
+ __ bind(&heapnumber_allocated);
+
+ // Convert 32 random bits in v0 to 0.(32 random bits) in a double
+ // by computing:
+ // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+ if (CpuFeatures::IsSupported(FPU)) {
+ __ PrepareCallCFunction(1, a0);
+ __ li(a0, Operand(ExternalReference::isolate_address()));
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+
+
+ CpuFeatures::Scope scope(FPU);
+ // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+ __ li(a1, Operand(0x41300000));
+ // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
+ __ Move(f12, v0, a1);
+ // Move 0x4130000000000000 to FPU.
+ __ Move(f14, zero_reg, a1);
+ // Subtract and store the result in the heap number.
+ __ sub_d(f0, f12, f14);
+ __ sdc1(f0, MemOperand(s0, HeapNumber::kValueOffset - kHeapObjectTag));
+ __ mov(v0, s0);
+ } else {
+ __ PrepareCallCFunction(2, a0);
+ __ mov(a0, s0);
+ __ li(a1, Operand(ExternalReference::isolate_address()));
+ __ CallCFunction(
+ ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
+ }
+
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+ // Load the arguments on the stack and call the stub.
+ SubStringStub stub;
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+ // Load the arguments on the stack and call the stub.
+ RegExpExecStub stub;
+ ASSERT(args->length() == 4);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ VisitForStackValue(args->at(3));
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(v0, &done);
+ // If the object is not a value type, return the object.
+ __ GetObjectType(v0, a1, a1);
+ __ Branch(&done, ne, a1, Operand(JS_VALUE_TYPE));
+
+ __ lw(v0, FieldMemOperand(v0, JSValue::kValueOffset));
+
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+ // Load the arguments on the stack and call the runtime function.
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ MathPowStub stub;
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0)); // Load the object.
+ VisitForAccumulatorValue(args->at(1)); // Load the value.
+ __ pop(a1); // v0 = value. a1 = object.
+
+ Label done;
+ // If the object is a smi, return the value.
+ __ JumpIfSmi(a1, &done);
+
+ // If the object is not a value type, return the value.
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&done, ne, a2, Operand(JS_VALUE_TYPE));
+
+ // Store the value.
+ __ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset));
+ // Update the write barrier. Save the value as it will be
+ // overwritten by the write barrier code and is needed afterward.
+ __ RecordWrite(a1, Operand(JSValue::kValueOffset - kHeapObjectTag), a2, a3);
+
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 1);
+
+ // Load the argument on the stack and call the stub.
+ VisitForStackValue(args->at(0));
+
+ NumberToStringStub stub;
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label done;
+ StringCharFromCodeGenerator generator(v0, a1);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(a1);
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+ __ mov(a0, result_register());
+
+ Register object = a1;
+ Register index = a0;
+ Register scratch = a2;
+ Register result = v0;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object,
+ index,
+ scratch,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ LoadRoot(result, Heap::kNanValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Load the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+ __ mov(a0, result_register());
+
+ Register object = a1;
+ Register index = a0;
+ Register scratch1 = a2;
+ Register scratch2 = a3;
+ Register result = v0;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ li(result, Operand(Smi::FromInt(0)));
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ StringCompareStub stub;
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::SIN,
+ TranscendentalCacheStub::TAGGED);
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::COS,
+ TranscendentalCacheStub::TAGGED);
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the stub.
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::TAGGED);
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ mov(a0, result_register()); // Stub requires parameter in a0 and on tos.
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+ // Load the argument on the stack and call the runtime function.
+ ASSERT(args->length() == 1);
+ VisitForStackValue(args->at(0));
+ __ CallRuntime(Runtime::kMath_sqrt, 1);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+ ASSERT(args->length() >= 2);
+
+ int arg_count = args->length() - 2; // 2 ~ receiver and function.
+ for (int i = 0; i < arg_count + 1; i++) {
+ VisitForStackValue(args->at(i));
+ }
+ VisitForAccumulatorValue(args->last()); // Function.
+
+ // InvokeFunction requires the function in a1. Move it in there.
+ __ mov(a1, result_register());
+ ParameterCount count(arg_count);
+ __ InvokeFunction(a1, count, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+ RegExpConstructResultStub stub;
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 3);
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+ VisitForStackValue(args->at(2));
+ Label done;
+ Label slow_case;
+ Register object = a0;
+ Register index1 = a1;
+ Register index2 = a2;
+ Register elements = a3;
+ Register scratch1 = t0;
+ Register scratch2 = t1;
+
+ __ lw(object, MemOperand(sp, 2 * kPointerSize));
+ // Fetch the map and check if array is in fast case.
+ // Check that object doesn't require security checks and
+ // has no indexed interceptor.
+ __ GetObjectType(object, scratch1, scratch2);
+ __ Branch(&slow_case, ne, scratch2, Operand(JS_ARRAY_TYPE));
+ // Map is now in scratch1.
+
+ __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
+ __ And(scratch2, scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ Branch(&slow_case, ne, scratch2, Operand(zero_reg));
+
+ // Check the object's elements are in fast case and writable.
+ __ lw(elements, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(scratch2, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&slow_case, ne, scratch1, Operand(scratch2));
+
+ // Check that both indices are smis.
+ __ lw(index1, MemOperand(sp, 1 * kPointerSize));
+ __ lw(index2, MemOperand(sp, 0));
+ __ JumpIfNotBothSmi(index1, index2, &slow_case);
+
+ // Check that both indices are valid.
+ Label not_hi;
+ __ lw(scratch1, FieldMemOperand(object, JSArray::kLengthOffset));
+ __ Branch(&slow_case, ls, scratch1, Operand(index1));
+ __ Branch(&not_hi, NegateCondition(hi), scratch1, Operand(index1));
+ __ Branch(&slow_case, ls, scratch1, Operand(index2));
+ __ bind(&not_hi);
+
+ // Bring the address of the elements into index1 and index2.
+ __ Addu(scratch1, elements,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(index1, index1, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(index1, scratch1, index1);
+ __ sll(index2, index2, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(index2, scratch1, index2);
+
+ // Swap elements.
+ __ lw(scratch1, MemOperand(index1, 0));
+ __ lw(scratch2, MemOperand(index2, 0));
+ __ sw(scratch1, MemOperand(index2, 0));
+ __ sw(scratch2, MemOperand(index1, 0));
+
+ Label new_space;
+ __ InNewSpace(elements, scratch1, eq, &new_space);
+ // Possible optimization: do a check that both values are Smis
+ // (or them and test against Smi mask).
+
+ __ mov(scratch1, elements);
+ __ RecordWriteHelper(elements, index1, scratch2);
+ __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements.
+
+ __ bind(&new_space);
+ // We are done. Drop elements from the stack, and return undefined.
+ __ Drop(3);
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&slow_case);
+ __ CallRuntime(Runtime::kSwapElements, 3);
+
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ ASSERT_NE(NULL, args->at(0)->AsLiteral());
+ int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+ Handle<FixedArray> jsfunction_result_caches(
+ isolate()->global_context()->jsfunction_result_caches());
+ if (jsfunction_result_caches->length() <= cache_id) {
+ __ Abort("Attempt to use undefined cache.");
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ context()->Plug(v0);
+ return;
+ }
+
+ VisitForAccumulatorValue(args->at(1));
+
+ Register key = v0;
+ Register cache = a1;
+ __ lw(cache, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ lw(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset));
+ __ lw(cache,
+ ContextOperand(
+ cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ lw(cache,
+ FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+
+ Label done, not_found;
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ lw(a2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
+ // a2 now holds finger offset as a smi.
+ __ Addu(a3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // a3 now points to the start of fixed array elements.
+ __ sll(at, a2, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(a3, a3, at);
+ // a3 now points to key of indexed element of cache.
+ __ lw(a2, MemOperand(a3));
+ __ Branch(&not_found, ne, key, Operand(a2));
+
+ __ lw(v0, MemOperand(a3, kPointerSize));
+ __ Branch(&done);
+
+ __ bind(&not_found);
+ // Call runtime to perform the lookup.
+ __ Push(cache, key);
+ __ CallRuntime(Runtime::kGetFromCache, 2);
+
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Register right = v0;
+ Register left = a1;
+ Register tmp = a2;
+ Register tmp2 = a3;
+
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1)); // Result (right) in v0.
+ __ pop(left);
+
+ Label done, fail, ok;
+ __ Branch(&ok, eq, left, Operand(right));
+ // Fail if either is a non-HeapObject.
+ __ And(tmp, left, Operand(right));
+ __ And(at, tmp, Operand(kSmiTagMask));
+ __ Branch(&fail, eq, at, Operand(zero_reg));
+ __ lw(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ lbu(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+ __ Branch(&fail, ne, tmp2, Operand(JS_REGEXP_TYPE));
+ __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ Branch(&fail, ne, tmp, Operand(tmp2));
+ __ lw(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
+ __ lw(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
+ __ Branch(&ok, eq, tmp, Operand(tmp2));
+ __ bind(&fail);
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+ __ bind(&ok);
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ __ bind(&done);
+
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ lw(a0, FieldMemOperand(v0, String::kHashFieldOffset));
+ __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(v0);
+ }
+
+ __ lw(v0, FieldMemOperand(v0, String::kHashFieldOffset));
+ __ IndexFromHash(v0, v0);
+
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+ Label bailout, done, one_char_separator, long_separator,
+ non_trivial_array, not_size_one_array, loop,
+ empty_separator_loop, one_char_separator_loop,
+ one_char_separator_loop_entry, long_separator_loop;
+
+ ASSERT(args->length() == 2);
+ VisitForStackValue(args->at(1));
+ VisitForAccumulatorValue(args->at(0));
+
+ // All aliases of the same register have disjoint lifetimes.
+ Register array = v0;
+ Register elements = no_reg; // Will be v0.
+ Register result = no_reg; // Will be v0.
+ Register separator = a1;
+ Register array_length = a2;
+ Register result_pos = no_reg; // Will be a2.
+ Register string_length = a3;
+ Register string = t0;
+ Register element = t1;
+ Register elements_end = t2;
+ Register scratch1 = t3;
+ Register scratch2 = t5;
+ Register scratch3 = t4;
+ Register scratch4 = v1;
+
+ // Separator operand is on the stack.
+ __ pop(separator);
+
+ // Check that the array is a JSArray.
+ __ JumpIfSmi(array, &bailout);
+ __ GetObjectType(array, scratch1, scratch2);
+ __ Branch(&bailout, ne, scratch2, Operand(JS_ARRAY_TYPE));
+
+ // Check that the array has fast elements.
+ __ CheckFastElements(scratch1, scratch2, &bailout);
+
+ // If the array has length zero, return the empty string.
+ __ lw(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
+ __ SmiUntag(array_length);
+ __ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg));
+ __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
+ __ Branch(&done);
+
+ __ bind(&non_trivial_array);
+
+ // Get the FixedArray containing array's elements.
+ elements = array;
+ __ lw(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+ array = no_reg; // End of array's live range.
+
+ // Check that all array elements are sequential ASCII strings, and
+ // accumulate the sum of their lengths, as a smi-encoded value.
+ __ mov(string_length, zero_reg);
+ __ Addu(element,
+ elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(elements_end, array_length, kPointerSizeLog2);
+ __ Addu(elements_end, element, elements_end);
+ // Loop condition: while (element < elements_end).
+ // Live values in registers:
+ // elements: Fixed array of strings.
+ // array_length: Length of the fixed array of strings (not smi)
+ // separator: Separator string
+ // string_length: Accumulated sum of string lengths (smi).
+ // element: Current array element.
+ // elements_end: Array end.
+ if (FLAG_debug_code) {
+ __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin",
+ array_length, Operand(zero_reg));
+ }
+ __ bind(&loop);
+ __ lw(string, MemOperand(element));
+ __ Addu(element, element, kPointerSize);
+ __ JumpIfSmi(string, &bailout);
+ __ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ lw(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
+ __ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
+ __ BranchOnOverflow(&bailout, scratch3);
+ __ Branch(&loop, lt, element, Operand(elements_end));
+
+ // If array_length is 1, return elements[0], a string.
+ __ Branch(&not_size_one_array, ne, array_length, Operand(1));
+ __ lw(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ __ Branch(&done);
+
+ __ bind(&not_size_one_array);
+
+ // Live values in registers:
+ // separator: Separator string
+ // array_length: Length of the array.
+ // string_length: Sum of string lengths (smi).
+ // elements: FixedArray of strings.
+
+ // Check that the separator is a flat ASCII string.
+ __ JumpIfSmi(separator, &bailout);
+ __ lw(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+
+ // Add (separator length times array_length) - separator length to the
+ // string_length to get the length of the result string. array_length is not
+ // smi but the other values are, so the result is a smi.
+ __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ Subu(string_length, string_length, Operand(scratch1));
+ __ Mult(array_length, scratch1);
+ // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
+ // zero.
+ __ mfhi(scratch2);
+ __ Branch(&bailout, ne, scratch2, Operand(zero_reg));
+ __ mflo(scratch2);
+ __ And(scratch3, scratch2, Operand(0x80000000));
+ __ Branch(&bailout, ne, scratch3, Operand(zero_reg));
+ __ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
+ __ BranchOnOverflow(&bailout, scratch3);
+ __ SmiUntag(string_length);
+
+ // Get first element in the array to free up the elements register to be used
+ // for the result.
+ __ Addu(element,
+ elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ result = elements; // End of live range for elements.
+ elements = no_reg;
+ // Live values in registers:
+ // element: First array element
+ // separator: Separator string
+ // string_length: Length of result string (not smi)
+ // array_length: Length of the array.
+ __ AllocateAsciiString(result,
+ string_length,
+ scratch1,
+ scratch2,
+ elements_end,
+ &bailout);
+ // Prepare for looping. Set up elements_end to end of the array. Set
+ // result_pos to the position of the result where to write the first
+ // character.
+ __ sll(elements_end, array_length, kPointerSizeLog2);
+ __ Addu(elements_end, element, elements_end);
+ result_pos = array_length; // End of live range for array_length.
+ array_length = no_reg;
+ __ Addu(result_pos,
+ result,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+
+ // Check the length of the separator.
+ __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ li(at, Operand(Smi::FromInt(1)));
+ __ Branch(&one_char_separator, eq, scratch1, Operand(at));
+ __ Branch(&long_separator, gt, scratch1, Operand(at));
+
+ // Empty separator case.
+ __ bind(&empty_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+
+ // Copy next array element to the result.
+ __ lw(string, MemOperand(element));
+ __ Addu(element, element, kPointerSize);
+ __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ // End while (element < elements_end).
+ __ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
+ ASSERT(result.is(v0));
+ __ Branch(&done);
+
+ // One-character separator case.
+ __ bind(&one_char_separator);
+ // Replace separator with its ascii character value.
+ __ lbu(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
+ // Jump into the loop after the code that copies the separator, so the first
+ // element is not preceded by a separator.
+ __ jmp(&one_char_separator_loop_entry);
+
+ __ bind(&one_char_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Single separator ascii char (in lower byte).
+
+ // Copy the separator character to the result.
+ __ sb(separator, MemOperand(result_pos));
+ __ Addu(result_pos, result_pos, 1);
+
+ // Copy next array element to the result.
+ __ bind(&one_char_separator_loop_entry);
+ __ lw(string, MemOperand(element));
+ __ Addu(element, element, kPointerSize);
+ __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ // End while (element < elements_end).
+ __ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
+ ASSERT(result.is(v0));
+ __ Branch(&done);
+
+ // Long separator case (separator is more than one character). Entry is at the
+ // label long_separator below.
+ __ bind(&long_separator_loop);
+ // Live values in registers:
+ // result_pos: the position to which we are currently copying characters.
+ // element: Current array element.
+ // elements_end: Array end.
+ // separator: Separator string.
+
+ // Copy the separator to the result.
+ __ lw(string_length, FieldMemOperand(separator, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ Addu(string,
+ separator,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+
+ __ bind(&long_separator);
+ __ lw(string, MemOperand(element));
+ __ Addu(element, element, kPointerSize);
+ __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ SmiUntag(string_length);
+ __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ // End while (element < elements_end).
+ __ Branch(&long_separator_loop, lt, element, Operand(elements_end));
+ ASSERT(result.is(v0));
+ __ Branch(&done);
+
+ __ bind(&bailout);
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // Load the function into v0.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Prepare for the test.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Test for strict mode function.
+ __ lw(a1, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a1, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(at, a1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ Branch(if_true, ne, at, Operand(zero_reg));
+
+ // Test for native function.
+ __ And(at, a1, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ Branch(if_true, ne, at, Operand(zero_reg));
+
+ // Not native or strict-mode function.
+ __ Branch(if_false);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- UNIMPLEMENTED_MIPS();
+ Handle<String> name = expr->name();
+ if (name->length() > 0 && name->Get(0) == '_') {
+ Comment cmnt(masm_, "[ InlineRuntimeCall");
+ EmitInlineRuntimeCall(expr);
+ return;
+ }
+
+ Comment cmnt(masm_, "[ CallRuntime");
+ ZoneList<Expression*>* args = expr->arguments();
+
+ if (expr->is_jsruntime()) {
+ // Prepare for calling JS runtime function.
+ __ lw(a0, GlobalObjectOperand());
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kBuiltinsOffset));
+ __ push(a0);
+ }
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ if (expr->is_jsruntime()) {
+ // Call the JS runtime function.
+ __ li(a2, Operand(expr->name()));
+ RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+ Handle<Code> ic =
+ isolate()->stub_cache()->ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP,
+ mode);
+ __ CallWithAstId(ic, mode, expr->id());
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ } else {
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ }
+ context()->Plug(v0);
}
void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- UNIMPLEMENTED_MIPS();
+ switch (expr->op()) {
+ case Token::DELETE: {
+ Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+ Property* prop = expr->expression()->AsProperty();
+ Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+
+ if (prop != NULL) {
+ if (prop->is_synthetic()) {
+ // Result of deleting parameters is false, even when they rewrite
+ // to accesses on the arguments object.
+ context()->Plug(false);
+ } else {
+ VisitForStackValue(prop->obj());
+ VisitForStackValue(prop->key());
+ __ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
+ __ push(a1);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(v0);
+ }
+ } else if (var != NULL) {
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // but "delete this" is.
+ ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+ if (var->is_global()) {
+ __ lw(a2, GlobalObjectOperand());
+ __ li(a1, Operand(var->name()));
+ __ li(a0, Operand(Smi::FromInt(kNonStrictMode)));
+ __ Push(a2, a1, a0);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(v0);
+ } else if (var->AsSlot() != NULL &&
+ var->AsSlot()->type() != Slot::LOOKUP) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ context()->Plug(false);
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ push(context_register());
+ __ li(a2, Operand(var->name()));
+ __ push(a2);
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ context()->Plug(v0);
+ }
+ } else {
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
+ }
+ break;
+ }
+
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ context()->Plug(Heap::kUndefinedValueRootIndex);
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+
+ // Notice that the labels are swapped.
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
+ if (context()->IsTest()) ForwardBailoutToChild(expr);
+ VisitForControl(expr->expression(), if_true, if_false, fall_through);
+ context()->Plug(if_false, if_true); // Labels swapped.
+ }
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ { StackValueContext context(this);
+ VisitForTypeofValue(expr->expression());
+ }
+ __ CallRuntime(Runtime::kTypeof, 1);
+ context()->Plug(v0);
+ break;
+ }
+
+ case Token::ADD: {
+ Comment cmt(masm_, "[ UnaryOperation (ADD)");
+ VisitForAccumulatorValue(expr->expression());
+ Label no_conversion;
+ __ JumpIfSmi(result_register(), &no_conversion);
+ __ mov(a0, result_register());
+ ToNumberStub convert_stub;
+ __ CallStub(&convert_stub);
+ __ bind(&no_conversion);
+ context()->Plug(result_register());
+ break;
+ }
+
+ case Token::SUB:
+ EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
+ break;
+
+ case Token::BIT_NOT:
+ EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+ const char* comment) {
+ // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+ Comment cmt(masm_, comment);
+ bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOverwriteMode overwrite =
+ can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ UnaryOpStub stub(expr->op(), overwrite);
+ // GenericUnaryOpStub expects the argument to be in a0.
+ VisitForAccumulatorValue(expr->expression());
+ SetSourcePosition(expr->position());
+ __ mov(a0, result_register());
+ __ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ context()->Plug(v0);
}
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ CountOperation");
+ SetSourcePosition(expr->position());
+
+ // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+ // as the left-hand side.
+ if (!expr->expression()->IsValidLeftHandSide()) {
+ VisitForEffect(expr->expression());
+ return;
+ }
+
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ AccumulatorValueContext context(this);
+ EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && !context()->IsEffect()) {
+ __ li(at, Operand(Smi::FromInt(0)));
+ __ push(at);
+ }
+ if (assign_type == NAMED_PROPERTY) {
+ // Put the object both on the stack and in the accumulator.
+ VisitForAccumulatorValue(prop->obj());
+ __ push(v0);
+ EmitNamedPropertyLoad(prop);
+ } else {
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ lw(a1, MemOperand(sp, 0));
+ __ push(v0);
+ EmitKeyedPropertyLoad(prop);
+ }
+ }
+
+ // We need a second deoptimization point after loading the value
+ // in case evaluating the property load my have a side effect.
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(expr->CountId(), TOS_REG);
+ }
+
+ // Call ToNumber only if operand is not a smi.
+ Label no_conversion;
+ __ JumpIfSmi(v0, &no_conversion);
+ __ mov(a0, v0);
+ ToNumberStub convert_stub;
+ __ CallStub(&convert_stub);
+ __ bind(&no_conversion);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(v0);
+ break;
+ case NAMED_PROPERTY:
+ __ sw(v0, MemOperand(sp, kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ sw(v0, MemOperand(sp, 2 * kPointerSize));
+ break;
+ }
+ }
+ }
+ __ mov(a0, result_register());
+
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
+ int count_value = expr->op() == Token::INC ? 1 : -1;
+ __ li(a1, Operand(Smi::FromInt(count_value)));
+
+ if (ShouldInlineSmiCase(expr->op())) {
+ __ AdduAndCheckForOverflow(v0, a0, a1, t0);
+ __ BranchOnOverflow(&stub_call, t0); // Do stub on overflow.
+
+ // We could eliminate this smi check if we split the code at
+ // the first smi check before calling ToNumber.
+ patch_site.EmitJumpIfSmi(v0, &done);
+ __ bind(&stub_call);
+ }
+
+ // Record position before stub call.
+ SetSourcePosition(expr->position());
+
+ BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
+ __ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ patch_site.EmitPatchInfo();
+ __ bind(&done);
+
+ // Store the value returned in v0.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ { EffectContext context(this);
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context.Plug(v0);
+ }
+ // For all contexts except EffectConstant we have the result on
+ // top of the stack.
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(v0);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ mov(a0, result_register()); // Value.
+ __ li(a2, Operand(prop->key()->AsLiteral()->handle())); // Name.
+ __ pop(a1); // Receiver.
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->StoreIC_Initialize_Strict()
+ : isolate()->builtins()->StoreIC_Initialize();
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(v0);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ __ mov(a0, result_register()); // Value.
+ __ pop(a1); // Key.
+ __ pop(a2); // Receiver.
+ Handle<Code> ic = is_strict_mode()
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate()->builtins()->KeyedStoreIC_Initialize();
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(v0);
+ }
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ lw(a0, GlobalObjectOperand());
+ __ li(a2, Operand(proxy->name()));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ CallWithAstId(ic);
+ PrepareForBailout(expr, TOS_REG);
+ context()->Plug(v0);
+ } else if (proxy != NULL &&
+ proxy->var()->AsSlot() != NULL &&
+ proxy->var()->AsSlot()->type() == Slot::LOOKUP) {
+ Label done, slow;
+
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ Slot* slot = proxy->var()->AsSlot();
+ EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
+
+ __ bind(&slow);
+ __ li(a0, Operand(proxy->name()));
+ __ Push(cp, a0);
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ PrepareForBailout(expr, TOS_REG);
+ __ bind(&done);
+
+ context()->Plug(v0);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitInCurrentContext(expr);
+ }
+}
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ { AccumulatorValueContext context(this);
+ VisitForTypeofValue(expr);
+ }
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+ if (check->Equals(isolate()->heap()->number_symbol())) {
+ __ JumpIfSmi(v0, if_true);
+ __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->string_symbol())) {
+ __ JumpIfSmi(v0, if_false);
+ // Check for undetectable objects => false.
+ __ GetObjectType(v0, v0, a1);
+ __ Branch(if_false, ge, a1, Operand(FIRST_NONSTRING_TYPE));
+ __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+ Split(eq, a1, Operand(zero_reg),
+ if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(if_true, eq, v0, Operand(at));
+ __ LoadRoot(at, Heap::kFalseValueRootIndex);
+ Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(if_true, eq, v0, Operand(at));
+ __ JumpIfSmi(v0, if_false);
+ // Check for undetectable objects => true.
+ __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+ Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
+ } else if (check->Equals(isolate()->heap()->function_symbol())) {
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, v0); // Leave map in a1.
+ Split(ge, v0, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE),
+ if_true, if_false, fall_through);
+
+ } else if (check->Equals(isolate()->heap()->object_symbol())) {
+ __ JumpIfSmi(v0, if_false);
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(if_true, eq, v0, Operand(at));
+ // Check for JS objects => true.
+ __ GetObjectType(v0, v0, a1);
+ __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ lbu(a1, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+ __ Branch(if_false, gt, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ // Check for undetectable objects => false.
+ __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+ Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
+ } else {
+ if (if_false != fall_through) __ jmp(if_false);
+ }
}
-void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ VisitForAccumulatorValue(expr);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ Split(eq, v0, Operand(at), if_true, if_false, fall_through);
}
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- UNIMPLEMENTED_MIPS();
+ Comment cmnt(masm_, "[ CompareOperation");
+ SetSourcePosition(expr->position());
+
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
+ context()->Plug(if_true, if_false);
+ return;
+ }
+
+ Token::Value op = expr->op();
+ VisitForStackValue(expr->left());
+ switch (op) {
+ case Token::IN:
+ VisitForStackValue(expr->right());
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+ Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForStackValue(expr->right());
+ InstanceofStub stub(InstanceofStub::kNoFlags);
+ __ CallStub(&stub);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ // The stub returns 0 for true.
+ Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through);
+ break;
+ }
+
+ default: {
+ VisitForAccumulatorValue(expr->right());
+ Condition cc = eq;
+ bool strict = false;
+ switch (op) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through.
+ case Token::EQ:
+ cc = eq;
+ __ mov(a0, result_register());
+ __ pop(a1);
+ break;
+ case Token::LT:
+ cc = lt;
+ __ mov(a0, result_register());
+ __ pop(a1);
+ break;
+ case Token::GT:
+ // Reverse left and right sides to obtain ECMA-262 conversion order.
+ cc = lt;
+ __ mov(a1, result_register());
+ __ pop(a0);
+ break;
+ case Token::LTE:
+ // Reverse left and right sides to obtain ECMA-262 conversion order.
+ cc = ge;
+ __ mov(a1, result_register());
+ __ pop(a0);
+ break;
+ case Token::GTE:
+ cc = ge;
+ __ mov(a0, result_register());
+ __ pop(a1);
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+
+ bool inline_smi_code = ShouldInlineSmiCase(op);
+ JumpPatchSite patch_site(masm_);
+ if (inline_smi_code) {
+ Label slow_case;
+ __ Or(a2, a0, Operand(a1));
+ patch_site.EmitJumpIfNotSmi(a2, &slow_case);
+ Split(cc, a1, Operand(a0), if_true, if_false, NULL);
+ __ bind(&slow_case);
+ }
+ // Record position and call the compare IC.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+ Comment cmnt(masm_, "[ CompareToNull");
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ VisitForAccumulatorValue(expr->expression());
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ __ mov(a0, result_register());
+ __ LoadRoot(a1, Heap::kNullValueRootIndex);
+ if (expr->is_strict()) {
+ Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
+ } else {
+ __ Branch(if_true, eq, a0, Operand(a1));
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ Branch(if_true, eq, a0, Operand(a1));
+ __ And(at, a0, Operand(kSmiTagMask));
+ __ Branch(if_false, eq, at, Operand(zero_reg));
+ // It can be an undetectable object.
+ __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
+ __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+ Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
+ }
+ context()->Plug(if_true, if_false);
}
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- UNIMPLEMENTED_MIPS();
+ __ lw(v0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ context()->Plug(v0);
}
-Register FullCodeGenerator::result_register() { return v0; }
+Register FullCodeGenerator::result_register() {
+ return v0;
+}
-Register FullCodeGenerator::context_register() { return cp; }
+Register FullCodeGenerator::context_register() {
+ return cp;
+}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- UNIMPLEMENTED_MIPS();
+ ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ __ sw(value, MemOperand(fp, frame_offset));
}
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- UNIMPLEMENTED_MIPS();
+ __ lw(dst, ContextOperand(cp, context_index));
+}
+
+
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope()) {
+ // Contexts nested in the global context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ __ li(at, Operand(Smi::FromInt(0)));
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ lw(at, ContextOperand(cp, Context::CLOSURE_INDEX));
+ } else {
+ ASSERT(declaration_scope->is_function_scope());
+ __ lw(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ __ push(at);
}
@@ -261,12 +4286,28 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
// Non-local control flow support.
void FullCodeGenerator::EnterFinallyBlock() {
- UNIMPLEMENTED_MIPS();
+ ASSERT(!result_register().is(a1));
+ // Store result register while executing finally block.
+ __ push(result_register());
+ // Cook return address in link register to stack (smi encoded Code* delta).
+ __ Subu(a1, ra, Operand(masm_->CodeObject()));
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ ASSERT_EQ(0, kSmiTag);
+ __ Addu(a1, a1, Operand(a1)); // Convert to smi.
+ __ push(a1);
}
void FullCodeGenerator::ExitFinallyBlock() {
- UNIMPLEMENTED_MIPS();
+ ASSERT(!result_register().is(a1));
+ // Restore result register from stack.
+ __ pop(a1);
+ // Uncook return address and return.
+ __ pop(result_register());
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ __ sra(a1, a1, 1); // Un-smi-tag value.
+ __ Addu(at, a1, Operand(masm_->CodeObject()));
+ __ Jump(at);
}
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index e5c2ad80c..cbae8e46e 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,7 +31,8 @@
#if defined(V8_TARGET_ARCH_MIPS)
-#include "codegen-inl.h"
+#include "codegen.h"
+#include "code-stubs.h"
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
@@ -47,38 +48,568 @@ namespace internal {
#define __ ACCESS_MASM(masm)
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+ Register type,
+ Label* global_object) {
+ // Register usage:
+ // type: holds the receiver instance type on entry.
+ __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
+ __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register elements,
+ Register scratch0,
+ Register scratch1,
+ Label* miss) {
+ // Register usage:
+ // receiver: holds the receiver on entry and is unchanged.
+ // elements: holds the property dictionary on fall through.
+ // Scratch registers:
+ // scratch0: used to holds the receiver map.
+ // scratch1: used to holds the receiver instance type, receiver bit mask
+ // and elements map.
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // Check that the receiver is a valid JS object.
+ __ GetObjectType(receiver, scratch0, scratch1);
+ __ Branch(miss, lt, scratch1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // If this assert fails, we have to check upper bound too.
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+
+ GenerateGlobalInstanceTypeCheck(masm, scratch1, miss);
+
+ // Check that the global object does not require access checks.
+ __ lbu(scratch1, FieldMemOperand(scratch0, Map::kBitFieldOffset));
+ __ And(scratch1, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
+ (1 << Map::kHasNamedInterceptor)));
+ __ Branch(miss, ne, scratch1, Operand(zero_reg));
+
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(scratch0, Heap::kHashTableMapRootIndex);
+ __ Branch(miss, ne, scratch1, Operand(scratch0));
+}
+
+
+// Helper function used from LoadIC/CallIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done. Can be the same as elements or name clobbering
+// one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register result,
+ Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry check that the value is a normal
+ // property.
+ __ bind(&done); // scratch2 == elements + 4 * index.
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ And(at,
+ scratch1,
+ Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
+ __ Branch(miss, ne, at, Operand(zero_reg));
+
+ // Get the value at the masked, scaled index and return.
+ __ lw(result,
+ FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+// label is done.
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// value: The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register name,
+ Register value,
+ Register scratch1,
+ Register scratch2) {
+ // Main use of the scratch registers.
+ // scratch1: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch2: Used as temporary.
+ Label done;
+
+ // Probe the dictionary.
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+ miss,
+ &done,
+ elements,
+ name,
+ scratch1,
+ scratch2);
+
+ // If probing finds an entry in the dictionary check that the value
+ // is a normal property that is not read only.
+ __ bind(&done); // scratch2 == elements + 4 * index.
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kTypeAndReadOnlyMask
+ = (PropertyDetails::TypeField::mask() |
+ PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+ __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+ __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
+ __ Branch(miss, ne, at, Operand(zero_reg));
+
+ // Store the value at the masked, scaled index and return.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
+ __ sw(value, MemOperand(scratch2));
+
+ // Update the write barrier. Make sure not to clobber the value.
+ __ mov(scratch1, value);
+ __ RecordWrite(elements, scratch2, scratch1);
+}
+
+
+static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register reg0,
+ Register reg1,
+ Register reg2) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+ //
+ // Scratch registers:
+ //
+ // reg0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // reg1 - Used to hold the capacity mask of the dictionary.
+ //
+ // reg2 - Used for the index into the dictionary.
+ // at - Temporary (avoid MacroAssembler instructions also using 'at').
+ Label done;
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ __ nor(reg1, reg0, zero_reg);
+ __ sll(at, reg0, 15);
+ __ addu(reg0, reg1, at);
+
+ // hash = hash ^ (hash >> 12);
+ __ srl(at, reg0, 12);
+ __ xor_(reg0, reg0, at);
+
+ // hash = hash + (hash << 2);
+ __ sll(at, reg0, 2);
+ __ addu(reg0, reg0, at);
+
+ // hash = hash ^ (hash >> 4);
+ __ srl(at, reg0, 4);
+ __ xor_(reg0, reg0, at);
+
+ // hash = hash * 2057;
+ __ li(reg1, Operand(2057));
+ __ mul(reg0, reg0, reg1);
+
+ // hash = hash ^ (hash >> 16);
+ __ srl(at, reg0, 16);
+ __ xor_(reg0, reg0, at);
+
+ // Compute the capacity mask.
+ __ lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
+ __ sra(reg1, reg1, kSmiTagSize);
+ __ Subu(reg1, reg1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ static const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Use reg2 for index calculations and keep the hash intact in reg0.
+ __ mov(reg2, reg0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ __ Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
+ }
+ __ and_(reg2, reg2, reg1);
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(NumberDictionary::kEntrySize == 3);
+ __ sll(at, reg2, 1); // 2x.
+ __ addu(reg2, reg2, at); // reg2 = reg2 * 3.
+
+ // Check if the key is identical to the name.
+ __ sll(at, reg2, kPointerSizeLog2);
+ __ addu(reg2, elements, at);
+
+ __ lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
+ if (i != kProbes - 1) {
+ __ Branch(&done, eq, key, Operand(at));
+ } else {
+ __ Branch(miss, ne, key, Operand(at));
+ }
+ }
+
+ __ bind(&done);
+ // Check that the value is a normal property.
+ // reg2: elements + (index * kPointerSize).
+ const int kDetailsOffset =
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ __ lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
+ __ And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
+ __ Branch(miss, ne, at, Operand(zero_reg));
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ NumberDictionary::kElementsStartOffset + kPointerSize;
+ __ lw(result, FieldMemOperand(reg2, kValueOffset));
+}
+
+
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ Label miss;
+
+ StubCompiler::GenerateLoadArrayLength(masm, a0, a3, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
-void LoadIC::GenerateStringLength(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- lr : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ Label miss;
+
+ StubCompiler::GenerateLoadStringLength(masm, a0, a1, a3, &miss,
+ support_wrappers);
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- lr : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ Label miss;
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, a0, a1, a3, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+ Register receiver,
+ Register map,
+ Register scratch,
+ int interceptor_bit,
+ Label* slow) {
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, slow);
+ // Get the map of the receiver.
+ __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check bit field.
+ __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ And(at, scratch, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ Branch(slow, ne, at, Operand(zero_reg));
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object,
+ // we enter the runtime system to make sure that indexing into string
+ // objects work as intended.
+ ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* not_fast_array,
+ Label* out_of_range) {
+ // Register use:
+ //
+ // receiver - holds the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // elements - holds the elements of the receiver on exit.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the the same as 'receiver' or 'key'.
+ // Unchanged on bailout so 'receiver' and 'key' can be safely
+ // used by further computation.
+ //
+ // Scratch registers:
+ //
+ // scratch1 - used to hold elements map and elements length.
+ // Holds the elements map if not_fast_array branch is taken.
+ //
+ // scratch2 - used to hold the loaded value.
+
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ if (not_fast_array != NULL) {
+ // Check that the object is in fast mode (not dictionary).
+ __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ Branch(not_fast_array, ne, scratch1, Operand(at));
+ } else {
+ __ AssertFastElements(elements);
+ }
+
+ // Check that the key (index) is within bounds.
+ __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Branch(out_of_range, hs, key, Operand(scratch1));
+
+ // Fast case: Do the load.
+ __ Addu(scratch1, elements,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // The key is a smi.
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(at, at, scratch1);
+ __ lw(scratch2, MemOperand(at));
+
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ Branch(out_of_range, eq, scratch2, Operand(at));
+ __ mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a symbol string.
+// Falls through if a key is a symbol.
+static void GenerateKeyStringCheck(MacroAssembler* masm,
+ Register key,
+ Register map,
+ Register hash,
+ Label* index_string,
+ Label* not_symbol) {
+ // The key is not a smi.
+ // Is it a string?
+ __ GetObjectType(key, map, hash);
+ __ Branch(not_symbol, ge, hash, Operand(FIRST_NONSTRING_TYPE));
+
+ // Is the string an array index, with cached numeric value?
+ __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
+ __ And(at, hash, Operand(String::kContainsCachedArrayIndexMask));
+ __ Branch(index_string, eq, at, Operand(zero_reg));
+
+ // Is the string a symbol?
+ // map: key map
+ __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ ASSERT(kSymbolTag != 0);
+ __ And(at, hash, Operand(kIsSymbolMask));
+ __ Branch(not_symbol, eq, at, Operand(zero_reg));
}
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
+// The generated code does not accept smi keys.
+// The generated code falls through if both probes miss.
+static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+ int argc,
+ Code::Kind kind,
+ Code::ExtraICState extra_ic_state) {
+ // ----------- S t a t e -------------
+ // -- a1 : receiver
+ // -- a2 : name
+ // -----------------------------------
+ Label number, non_number, non_string, boolean, probe, miss;
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeFlags(kind,
+ NOT_IN_LOOP,
+ MONOMORPHIC,
+ extra_ic_state,
+ NORMAL,
+ argc);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, a1, a2, a3, t0, t1);
+
+ // If the stub cache probing failed, the receiver might be a value.
+ // For value objects, we use the map of the prototype objects for
+ // the corresponding JSValue for the cache and that is what we need
+ // to probe.
+ //
+ // Check for number.
+ __ JumpIfSmi(a1, &number, t1);
+ __ GetObjectType(a1, a3, a3);
+ __ Branch(&non_number, ne, a3, Operand(HEAP_NUMBER_TYPE));
+ __ bind(&number);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::NUMBER_FUNCTION_INDEX, a1);
+ __ Branch(&probe);
+
+ // Check for string.
+ __ bind(&non_number);
+ __ Branch(&non_string, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::STRING_FUNCTION_INDEX, a1);
+ __ Branch(&probe);
+
+ // Check for boolean.
+ __ bind(&non_string);
+ __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+ __ Branch(&boolean, eq, a1, Operand(t0));
+ __ LoadRoot(t1, Heap::kFalseValueRootIndex);
+ __ Branch(&miss, ne, a1, Operand(t1));
+ __ bind(&boolean);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::BOOLEAN_FUNCTION_INDEX, a1);
+
+ // Probe the stub cache for the value object.
+ __ bind(&probe);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, a1, a2, a3, t0, t1);
+
+ __ bind(&miss);
}
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
+static void GenerateFunctionTailCall(MacroAssembler* masm,
+ int argc,
+ Label* miss,
+ Register scratch) {
+ // a1: function
+
+ // Check that the value isn't a smi.
+ __ JumpIfSmi(a1, miss);
+
+ // Check that the value is a JSFunction.
+ __ GetObjectType(a1, scratch, scratch);
+ __ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- UNIMPLEMENTED_MIPS();
- // Registers:
- // a2: name
- // ra: return address
+
+static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Get the receiver of the function from the stack into a1.
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+ GenerateStringDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss);
+
+ // a0: elements
+ // Search the dictionary - put result in register a1.
+ GenerateDictionaryLoad(masm, &miss, a0, a2, a1, a3, t0);
+
+ GenerateFunctionTailCall(masm, argc, &miss, t0);
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+}
+
+
+static void GenerateCallMiss(MacroAssembler* masm,
+ int argc,
+ IC::UtilityId id,
+ Code::ExtraICState extra_ic_state) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ if (id == IC::kCallIC_Miss) {
+ __ IncrementCounter(isolate->counters()->call_miss(), 1, a3, t0);
+ } else {
+ __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, a3, t0);
+ }
// Get the receiver of the function from the stack.
__ lw(a3, MemOperand(sp, argc*kPointerSize));
@@ -86,123 +617,1130 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
__ EnterInternalFrame();
// Push the receiver and the name of the function.
- __ MultiPush(a2.bit() | a3.bit());
+ __ Push(a3, a2);
// Call the entry.
__ li(a0, Operand(2));
- __ li(a1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
+ __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
CEntryStub stub(1);
__ CallStub(&stub);
- // Move result to r1 and leave the internal frame.
+ // Move result to a1 and leave the internal frame.
__ mov(a1, v0);
__ LeaveInternalFrame();
// Check if the receiver is a global object of some sort.
- Label invoke, global;
- __ lw(a2, MemOperand(sp, argc * kPointerSize));
- __ andi(t0, a2, kSmiTagMask);
- __ Branch(eq, &invoke, t0, Operand(zero_reg));
- __ GetObjectType(a2, a3, a3);
- __ Branch(eq, &global, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ Branch(ne, &invoke, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
- __ sw(a2, MemOperand(sp, argc * kPointerSize));
+ // This can happen only for regular CallIC but not KeyedCallIC.
+ if (id == IC::kCallIC_Miss) {
+ Label invoke, global;
+ __ lw(a2, MemOperand(sp, argc * kPointerSize));
+ __ andi(t0, a2, kSmiTagMask);
+ __ Branch(&invoke, eq, t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, a3);
+ __ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
+ // Patch the receiver on the stack.
+ __ bind(&global);
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+ __ sw(a2, MemOperand(sp, argc * kPointerSize));
+ __ bind(&invoke);
+ }
// Invoke the function.
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
ParameterCount actual(argc);
- __ bind(&invoke);
- __ InvokeFunction(a1, actual, JUMP_FUNCTION);
+ __ InvokeFunction(a1,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ call_kind);
+}
+
+
+void CallIC::GenerateMiss(MacroAssembler* masm,
+ int argc,
+ Code::ExtraICState extra_ic_state) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
}
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm,
+ int argc,
+ Code::ExtraICState extra_ic_state) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack into a1.
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+ GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+ GenerateMiss(masm, argc, extra_ic_state);
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ GenerateCallNormal(masm, argc);
+ GenerateMiss(masm, argc, Code::kNoExtraICState);
+}
+
+
+void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
+}
+
+
+void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack into a1.
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+ Label do_call, slow_call, slow_load, slow_reload_receiver;
+ Label check_number_dictionary, check_string, lookup_monomorphic_cache;
+ Label index_smi, index_string;
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(a2, &check_string);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, a1, a0, a3, Map::kHasIndexedInterceptor, &slow_call);
+
+ GenerateFastArrayLoad(
+ masm, a1, a2, t0, a3, a0, a1, &check_number_dictionary, &slow_load);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, a0, a3);
+
+ __ bind(&do_call);
+ // receiver in a1 is not used after this point.
+ // a2: key
+ // a1: function
+
+ GenerateFunctionTailCall(masm, argc, &slow_call, a0);
+
+ __ bind(&check_number_dictionary);
+ // a2: key
+ // a3: elements map
+ // t0: elements pointer
+ // Check whether the elements is a number dictionary.
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&slow_load, ne, a3, Operand(at));
+ __ sra(a0, a2, kSmiTagSize);
+ // a0: untagged index
+ GenerateNumberDictionaryLoad(masm, &slow_load, t0, a2, a1, a0, a3, t1);
+ __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3);
+ __ jmp(&do_call);
+
+ __ bind(&slow_load);
+ // This branch is taken when calling KeyedCallIC_Miss is neither required
+ // nor beneficial.
+ __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
+ __ EnterInternalFrame();
+ __ push(a2); // Save the key.
+ __ Push(a1, a2); // Pass the receiver and the key.
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(a2); // Restore the key.
+ __ LeaveInternalFrame();
+ __ mov(a1, v0);
+ __ jmp(&do_call);
+
+ __ bind(&check_string);
+ GenerateKeyStringCheck(masm, a2, a0, a3, &index_string, &slow_call);
+
+ // The key is known to be a symbol.
+ // If the receiver is a regular JS object with slow properties then do
+ // a quick inline probe of the receiver's dictionary.
+ // Otherwise do the monomorphic cache probe.
+ GenerateKeyedLoadReceiverCheck(
+ masm, a1, a0, a3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
+
+ __ lw(a0, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+ __ lw(a3, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&lookup_monomorphic_cache, ne, a3, Operand(at));
+
+ GenerateDictionaryLoad(masm, &slow_load, a0, a2, a1, a3, t0);
+ __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, a0, a3);
+ __ jmp(&do_call);
+
+ __ bind(&lookup_monomorphic_cache);
+ __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, a0, a3);
+ GenerateMonomorphicCacheProbe(masm,
+ argc,
+ Code::KEYED_CALL_IC,
+ Code::kNoExtraICState);
+ // Fall through on miss.
+
+ __ bind(&slow_call);
+ // This branch is taken if:
+ // - the receiver requires boxing or access check,
+ // - the key is neither smi nor symbol,
+ // - the value loaded is not a function,
+ // - there is hope that the runtime will create a monomorphic call stub,
+ // that will get fetched next time.
+ __ IncrementCounter(counters->keyed_call_generic_slow(), 1, a0, a3);
+ GenerateMiss(masm, argc);
+
+ __ bind(&index_string);
+ __ IndexFromHash(a3, a2);
+ // Now jump to the place where smi keys are handled.
+ __ jmp(&index_smi);
+}
+
+
+void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ // Check if the name is a string.
+ Label miss;
+ __ JumpIfSmi(a2, &miss);
+ __ IsObjectJSStringType(a2, a0, &miss);
+
+ GenerateCallNormal(masm, argc);
+ __ bind(&miss);
+ GenerateMiss(masm, argc);
+}
+
+
// Defined in ic.cc.
Object* LoadIC_Miss(Arguments args);
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, a0, a2, a3, t0, t1);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
}
void LoadIC::GenerateNormal(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- lr : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ Label miss;
+
+ GenerateStringDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
+
+ // a1: elements
+ GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0);
+ __ Ret();
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void LoadIC::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+
+ __ mov(a3, a0);
+ __ Push(a3, a2);
+
+ // Perform tail call to the entry.
+ ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+ __ TailCallExternalReference(ref, 2, 1);
}
-void LoadIC::ClearInlinedVersion(Address address) {}
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
- return false;
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the map check
+ // later, we do not need to check for interceptors or whether it
+ // requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ GetObjectType(object, scratch1, scratch2);
+ __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
+
+ // Check that the key is a positive smi.
+ __ And(scratch1, key, Operand(0x8000001));
+ __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
+
+ // Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+ __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
+ __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
+
+ // Load element index and check whether it is the hole.
+ const int kOffset =
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+ __ li(scratch3, Operand(kPointerSize >> 1));
+ __ mul(scratch3, key, scratch3);
+ __ Addu(scratch3, scratch3, Operand(kOffset));
+
+ __ Addu(scratch2, scratch1, scratch3);
+ __ lw(scratch2, MemOperand(scratch2));
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ li(scratch3, Operand(kPointerSize >> 1));
+ __ mul(scratch3, scratch2, scratch3);
+ __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
+ __ Addu(scratch2, scratch1, scratch3);
+ return MemOperand(scratch2);
+}
+
+
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map. The parameter_map register
+ // must be loaded with the parameter map of the arguments object and is
+ // overwritten.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+ __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
+ __ li(scratch, Operand(kPointerSize >> 1));
+ __ mul(scratch, key, scratch);
+ __ Addu(scratch,
+ scratch,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Addu(scratch, backing_store, scratch);
+ return MemOperand(scratch);
}
-void KeyedLoadIC::ClearInlinedVersion(Address address) {}
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
- return false;
+
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label slow, notin;
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, &notin, &slow);
+ __ lw(v0, mapped_location);
+ __ Ret();
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in a2.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, a0, a2, a3, &slow);
+ __ lw(a2, unmapped_location);
+ __ Branch(&slow, eq, a2, Operand(a3));
+ __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+ __ mov(v0, a2);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, notin;
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, &notin, &slow);
+ __ sw(a0, mapped_location);
+ // Verify mapped_location MemOperand is register, with no offset.
+ ASSERT_EQ(mapped_location.offset(), 0);
+ __ RecordWrite(a3, mapped_location.rm(), t5);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // (In delay slot) return the value stored in v0.
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in a3.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
+ __ sw(a0, unmapped_location);
+ ASSERT_EQ(unmapped_location.offset(), 0);
+ __ RecordWrite(a3, unmapped_location.rm(), t5);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // (In delay slot) return the value stored in v0.
+ __ bind(&slow);
+ GenerateMiss(masm, false);
}
-void KeyedStoreIC::ClearInlinedVersion(Address address) {}
-void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
- return false;
+
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+ int argc) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, notin;
+ // Load receiver.
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, a1, a2, a3, t0, t1, &notin, &slow);
+ __ lw(a1, mapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow, a3);
+ __ bind(&notin);
+ // The unmapped lookup expects that the parameter map is in a3.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, a2, a3, t0, &slow);
+ __ lw(a1, unmapped_location);
+ __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+ __ Branch(&slow, eq, a1, Operand(a3));
+ GenerateFunctionTailCall(masm, argc, &slow, a3);
+ __ bind(&slow);
+ GenerateMiss(masm, argc);
}
Object* KeyedLoadIC_Miss(Arguments args);
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Isolate* isolate = masm->isolate();
+
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+
+ __ Push(a1, a0);
+
+ // Perform tail call to the entry.
+ ExternalReference ref = force_generic
+ ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
+ : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+
+ __ Push(a1, a0);
+
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
}
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label slow, check_string, index_smi, index_string, property_array_property;
+ Label probe_dictionary, check_number_dictionary;
+
+ Register key = a0;
+ Register receiver = a1;
+
+ Isolate* isolate = masm->isolate();
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &check_string);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(a2, a3, &check_number_dictionary);
+
+ GenerateFastArrayLoad(
+ masm, receiver, key, t0, a3, a2, v0, NULL, &slow);
+
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a2, a3);
+ __ Ret();
+
+ __ bind(&check_number_dictionary);
+ __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
+
+ // Check whether the elements is a number dictionary.
+ // a0: key
+ // a3: elements map
+ // t0: elements
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&slow, ne, a3, Operand(at));
+ __ sra(a2, a0, kSmiTagSize);
+ GenerateNumberDictionaryLoad(masm, &slow, t0, a0, v0, a2, a3, t1);
+ __ Ret();
+
+ // Slow case, key and receiver still in a0 and a1.
+ __ bind(&slow);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
+ 1,
+ a2,
+ a3);
+ GenerateRuntimeGetProperty(masm);
+
+ __ bind(&check_string);
+ GenerateKeyStringCheck(masm, key, a2, a3, &index_string, &slow);
+
+ GenerateKeyedLoadReceiverCheck(
+ masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+
+
+ // If the receiver is a fast-case object, check the keyed lookup
+ // cache. Otherwise probe the dictionary.
+ __ lw(a3, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+ __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&probe_dictionary, eq, t0, Operand(at));
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the string hash.
+ __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ sra(a3, a2, KeyedLookupCache::kMapHashShift);
+ __ lw(t0, FieldMemOperand(a0, String::kHashFieldOffset));
+ __ sra(at, t0, String::kHashShift);
+ __ xor_(a3, a3, at);
+ __ And(a3, a3, Operand(KeyedLookupCache::kCapacityMask));
+
+ // Load the key (consisting of map and symbol) from the cache and
+ // check for match.
+ ExternalReference cache_keys =
+ ExternalReference::keyed_lookup_cache_keys(isolate);
+ __ li(t0, Operand(cache_keys));
+ __ sll(at, a3, kPointerSizeLog2 + 1);
+ __ addu(t0, t0, at);
+ __ lw(t1, MemOperand(t0)); // Move t0 to symbol.
+ __ Addu(t0, t0, Operand(kPointerSize));
+ __ Branch(&slow, ne, a2, Operand(t1));
+ __ lw(t1, MemOperand(t0));
+ __ Branch(&slow, ne, a0, Operand(t1));
+
+ // Get field offset.
+ // a0 : key
+ // a1 : receiver
+ // a2 : receiver's map
+ // a3 : lookup cache index
+ ExternalReference cache_field_offsets =
+ ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+ __ li(t0, Operand(cache_field_offsets));
+ __ sll(at, a3, kPointerSizeLog2);
+ __ addu(at, t0, at);
+ __ lw(t1, MemOperand(at));
+ __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
+ __ Subu(t1, t1, t2);
+ __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
+
+ // Load in-object property.
+ __ lbu(t2, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ addu(t2, t2, t1); // Index from start of object.
+ __ Subu(a1, a1, Operand(kHeapObjectTag)); // Remove the heap tag.
+ __ sll(at, t2, kPointerSizeLog2);
+ __ addu(at, a1, at);
+ __ lw(v0, MemOperand(at));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1,
+ a2,
+ a3);
+ __ Ret();
+
+ // Load property array property.
+ __ bind(&property_array_property);
+ __ lw(a1, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+ __ Addu(a1, a1, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ sll(t0, t1, kPointerSizeLog2);
+ __ Addu(t0, t0, a1);
+ __ lw(v0, MemOperand(t0));
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+ 1,
+ a2,
+ a3);
+ __ Ret();
+
+
+ // Do a quick inline probe of the receiver's dictionary, if it
+ // exists.
+ __ bind(&probe_dictionary);
+ // a1: receiver
+ // a0: key
+ // a3: elements
+ __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(a2, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, a2, &slow);
+ // Load the property to v0.
+ GenerateDictionaryLoad(masm, &slow, a3, a0, v0, a2, t0);
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
+ 1,
+ a2,
+ a3);
+ __ Ret();
+
+ __ bind(&index_string);
+ __ IndexFromHash(a3, key);
+ // Now jump to the place where smi keys are handled.
+ __ Branch(&index_smi);
}
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key (index)
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ Register receiver = a1;
+ Register index = a0;
+ Register scratch1 = a2;
+ Register scratch2 = a3;
+ Register result = v0;
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &miss, // When index out of range.
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
+
+ __ bind(&miss);
+ GenerateMiss(masm, false);
}
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(a2, a1, a0);
+ __ li(a1, Operand(Smi::FromInt(NONE))); // PropertyAttributes.
+ __ li(a0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
+ __ Push(a1, a0);
+
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ Label slow, fast, array, extra, exit;
+
+ // Register usage.
+ Register value = a0;
+ Register key = a1;
+ Register receiver = a2;
+ Register elements = a3; // Elements array of the receiver.
+ // t0 is used as ip in the arm version.
+ // t3-t4 are used as temporaries.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &slow);
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(receiver, &slow);
+
+ // Get the map of the object.
+ __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to do this because this generic stub does not perform map checks.
+ __ lbu(t0, FieldMemOperand(t3, Map::kBitFieldOffset));
+ __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ Branch(&slow, ne, t0, Operand(zero_reg));
+ // Check if the object is a JS array or not.
+ __ lbu(t3, FieldMemOperand(t3, Map::kInstanceTypeOffset));
+
+ __ Branch(&array, eq, t3, Operand(JS_ARRAY_TYPE));
+ // Check that the object is some kind of JSObject.
+ __ Branch(&slow, lt, t3, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ Branch(&slow, eq, t3, Operand(JS_PROXY_TYPE));
+ __ Branch(&slow, eq, t3, Operand(JS_FUNCTION_PROXY_TYPE));
+
+ // Object case: Check key against length in the elements array.
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // Check that the object is in fast mode and writable.
+ __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&slow, ne, t3, Operand(t0));
+ // Check array bounds. Both the key and the length of FixedArray are smis.
+ __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Branch(&fast, lo, key, Operand(t0));
+ // Fall thru to slow if un-tagged index >= length.
+
+ // Slow case, handle jump to runtime.
+ __ bind(&slow);
+
+ // Entry registers are intact.
+ // a0: value.
+ // a1: key.
+ // a2: receiver.
+
+ GenerateRuntimeSetProperty(masm, strict_mode);
+
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+
+ __ bind(&extra);
+ // Only support writing to array[array.length].
+ __ Branch(&slow, ne, key, Operand(t0));
+ // Check for room in the elements backing store.
+ // Both the key and the length of FixedArray are smis.
+ __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Branch(&slow, hs, key, Operand(t0));
+ // Calculate key + 1 as smi.
+ ASSERT_EQ(0, kSmiTag);
+ __ Addu(t3, key, Operand(Smi::FromInt(1)));
+ __ sw(t3, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Branch(&fast);
+
+
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode (and writable); if it
+ // is the length is always a smi.
+
+ __ bind(&array);
+ __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&slow, ne, t3, Operand(t0));
+
+ // Check the key against the length in the array.
+ __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Branch(&extra, hs, key, Operand(t0));
+ // Fall through to fast case.
+
+ __ bind(&fast);
+ // Fast case, store the value to the elements backing store.
+ __ Addu(t4, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(t1, key, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t4, t4, Operand(t1));
+ __ sw(value, MemOperand(t4));
+ // Skip write barrier if the written value is a smi.
+ __ JumpIfSmi(value, &exit);
+
+ // Update write barrier for the elements array address.
+ __ Subu(t3, t4, Operand(elements));
+
+ __ RecordWrite(elements, Operand(t3), t4, t5);
+ __ bind(&exit);
+
+ __ mov(v0, a0); // Return the value written.
+ __ Ret();
}
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label slow;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(a1, &slow);
+
+ // Check that the key is an array index, that is Uint32.
+ __ And(t0, a0, Operand(kSmiTagMask | kSmiSignMask));
+ __ Branch(&slow, ne, t0, Operand(zero_reg));
+
+ // Get the map of the receiver.
+ __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+
+ // Check that it has indexed interceptor and access checks
+ // are not enabled for this object.
+ __ lbu(a3, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ And(a3, a3, Operand(kSlowCaseBitFieldMask));
+ __ Branch(&slow, ne, a3, Operand(1 << Map::kHasIndexedInterceptor));
+ // Everything is fine, call runtime.
+ __ Push(a1, a0); // Receiver, key.
+
+ // Perform tail call to the entry.
+ __ TailCallExternalReference(ExternalReference(
+ IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
+
+ __ bind(&slow);
+ GenerateMiss(masm, false);
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(a2, a1, a0);
+
+ ExternalReference ref = force_generic
+ ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
+ masm->isolate())
+ : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ // We can't use MultiPush as the order of the registers is important.
+ __ Push(a2, a1, a0);
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ ExternalReference ref =
+ ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+
+ __ TailCallExternalReference(ref, 3, 1);
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ // Get the receiver from the stack and probe the stub cache.
+ Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC,
+ strict_mode);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, a1, a2, a3, t0, t1);
+
+ // Cache miss: Jump to runtime.
+ GenerateMiss(masm);
}
void StoreIC::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ __ Push(a1, a2, a0);
+ // Perform tail call to the entry.
+ ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss),
+ masm->isolate());
+ __ TailCallExternalReference(ref, 3, 1);
}
void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ //
+ // This accepts as a receiver anything JSObject::SetElementsLength accepts
+ // (currently anything except for external and pixel arrays which means
+ // anything with elements of FixedArray type.), but currently is restricted
+ // to JSArray.
+ // Value must be a number, but only smis are accepted as the most common case.
+
+ Label miss;
+
+ Register receiver = a1;
+ Register value = a0;
+ Register scratch = a3;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the object is a JS array.
+ __ GetObjectType(receiver, scratch, scratch);
+ __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+ // Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
+ __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
+ __ GetObjectType(scratch, scratch, scratch);
+ __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value, &miss);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver, value);
+
+ ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength),
+ masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&miss);
+
+ GenerateMiss(masm);
}
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ GenerateStringDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
+
+ GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
+ __ Ret();
+
+ __ bind(&miss);
+ __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
+ GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ __ Push(a1, a2, a0);
+
+ __ li(a1, Operand(Smi::FromInt(NONE))); // PropertyAttributes.
+ __ li(a0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(a1, a0);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+}
+
+
#undef __
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ // Reverse left and right operands to obtain ECMA-262 conversion order.
+ return lt;
+ case Token::LTE:
+ // Reverse left and right operands to obtain ECMA-262 conversion order.
+ return ge;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return kNoCondition;
+ }
+}
+
+
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+ HandleScope scope;
+ Handle<Code> rewritten;
+ State previous_state = GetState();
+ State state = TargetState(previous_state, false, x, y);
+ if (state == GENERIC) {
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
+ rewritten = stub.GetCode();
+ } else {
+ ICCompareStub stub(op_, state);
+ rewritten = stub.GetCode();
+ }
+ set_target(*rewritten);
+
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[CompareIC (%s->%s)#%s]\n",
+ GetStateName(previous_state),
+ GetStateName(state),
+ Token::Name(op_));
+ }
+#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address());
+ }
+}
+
+
+void PatchInlinedSmiCode(Address address) {
+ Address andi_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a andi at, rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(andi_instruction_address);
+ if (!Assembler::IsAndImmediate(instr)) {
+ return;
+ }
+
+ // The delta to the start of the map check instruction and the
+ // condition code uses at the patched jump.
+ int delta = Assembler::GetImmediate16(instr);
+ delta += Assembler::GetRs(instr) * kImm16Mask;
+ // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
+ // signals that nothing was inlined.
+ if (delta == 0) {
+ return;
+ }
+
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
+ address, andi_instruction_address, delta);
+ }
+#endif
+
+ Address patch_address =
+ andi_instruction_address - delta * Instruction::kInstrSize;
+ Instr instr_at_patch = Assembler::instr_at(patch_address);
+ Instr branch_instr =
+ Assembler::instr_at(patch_address + Instruction::kInstrSize);
+ ASSERT(Assembler::IsAndImmediate(instr_at_patch));
+ ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
+ ASSERT(Assembler::IsBranch(branch_instr));
+ if (Assembler::IsBeq(branch_instr)) {
+ // This is patching a "jump if not smi" site to be active.
+ // Changing:
+ // andi at, rx, 0
+ // Branch <target>, eq, at, Operand(zero_reg)
+ // to:
+ // andi at, rx, #kSmiTagMask
+ // Branch <target>, ne, at, Operand(zero_reg)
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+ patcher.masm()->andi(at, reg, kSmiTagMask);
+ patcher.ChangeBranchCondition(ne);
+ } else {
+ ASSERT(Assembler::IsBne(branch_instr));
+ // This is patching a "jump if smi" site to be active.
+ // Changing:
+ // andi at, rx, 0
+ // Branch <target>, ne, at, Operand(zero_reg)
+ // to:
+ // andi at, rx, #kSmiTagMask
+ // Branch <target>, eq, at, Operand(zero_reg)
+ CodePatcher patcher(patch_address, 2);
+ Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+ patcher.masm()->andi(at, reg, kSmiTagMask);
+ patcher.ChangeBranchCondition(eq);
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/jump-target-mips.cc b/deps/v8/src/mips/jump-target-mips.cc
deleted file mode 100644
index 408f75e79..000000000
--- a/deps/v8/src/mips/jump-target-mips.cc
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
- ASSERT(cgen()->has_valid_frame());
- // Live non-frame registers are not allowed at unconditional jumps
- // because we have no way of invalidating the corresponding results
- // which are still live in the C++ code.
- ASSERT(cgen()->HasValidEntryRegisters());
-
- if (is_bound()) {
- // Backward jump. There already a frame expectation at the target.
- ASSERT(direction_ == BIDIRECTIONAL);
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- } else {
- // Use the current frame as the expected one at the target if necessary.
- if (entry_frame_ == NULL) {
- entry_frame_ = cgen()->frame();
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- } else {
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- }
-
- // The predicate is_linked() should be made true. Its implementation
- // detects the presence of a frame pointer in the reaching_frames_ list.
- if (!is_linked()) {
- reaching_frames_.Add(NULL);
- ASSERT(is_linked());
- }
- }
- __ b(&entry_label_);
- __ nop(); // Branch delay slot nop.
-}
-
-
-void JumpTarget::DoBranch(Condition cc, Hint ignored) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void JumpTarget::Call() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void JumpTarget::DoBind() {
- ASSERT(!is_bound());
-
- // Live non-frame registers are not allowed at the start of a basic
- // block.
- ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
- if (cgen()->has_valid_frame()) {
- // If there is a current frame we can use it on the fall through.
- if (entry_frame_ == NULL) {
- entry_frame_ = new VirtualFrame(cgen()->frame());
- } else {
- ASSERT(cgen()->frame()->Equals(entry_frame_));
- }
- } else {
- // If there is no current frame we must have an entry frame which we can
- // copy.
- ASSERT(entry_frame_ != NULL);
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- }
-
- // The predicate is_linked() should be made false. Its implementation
- // detects the presence (or absence) of frame pointers in the
- // reaching_frames_ list. If we inserted a bogus frame to make
- // is_linked() true, remove it now.
- if (is_linked()) {
- reaching_frames_.Clear();
- }
-
- __ bind(&entry_label_);
-}
-
-
-void BreakTarget::Jump() {
- // On ARM we do not currently emit merge code for jumps, so we need to do
- // it explicitly here. The only merging necessary is to drop extra
- // statement state from the stack.
- ASSERT(cgen()->has_valid_frame());
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->Drop(count);
- DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even
- // on the fall through. This is so we can bind the return target
- // with state on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- // On ARM we do not currently emit merge code at binding sites, so we need
- // to do it explicitly here. The only merging necessary is to drop extra
- // statement state from the stack.
- cgen()->frame()->Drop(count);
- }
-
- DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/register-allocator-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index e056fb807..2aec68456 100644
--- a/deps/v8/src/mips/register-allocator-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,22 +25,41 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-#define V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
+#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
-#include "mips/constants-mips.h"
+#include "mips/lithium-mips.h"
+
+#include "deoptimizer.h"
+#include "safepoint-table.h"
+#include "scopes.h"
+
+// Note: this file was taken from the X64 version. ARM has a partially working
+// lithium implementation, but for now it is not ported to mips.
namespace v8 {
namespace internal {
-class RegisterAllocatorConstants : public AllStatic {
+// Forward declarations.
+class LDeferredCode;
+
+class LCodeGen BASE_EMBEDDED {
public:
- static const int kNumRegisters = assembler::mips::kNumRegisters;
- static const int kInvalidRegister = assembler::mips::kInvalidRegister;
-};
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
+ // Try to generate code for the entire chunk, but it may fail if the
+ // chunk contains constructs we cannot handle. Returns true if the
+ // code generation attempt succeeded.
+ bool GenerateCode() {
+ UNIMPLEMENTED();
+ return false;
+ }
-} } // namespace v8::internal
+ // Finish the code by setting stack height, safepoint, and bailout
+ // information on it.
+ void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
+};
-#endif // V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
+} } // namespace v8::internal
+#endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
new file mode 100644
index 000000000..ebc1e43bf
--- /dev/null
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -0,0 +1,307 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_LITHIUM_MIPS_H_
+#define V8_MIPS_LITHIUM_MIPS_H_
+
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "lithium.h"
+#include "safepoint-table.h"
+
+// Note: this file was taken from the X64 version. ARM has a partially working
+// lithium implementation, but for now it is not ported to mips.
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+class LEnvironment;
+class Translation;
+
+class LInstruction: public ZoneObject {
+ public:
+ LInstruction() { }
+ virtual ~LInstruction() { }
+
+ // Predicates should be generated by macro as in lithium-ia32.h.
+ virtual bool IsLabel() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+ virtual bool IsOsrEntry() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ LPointerMap* pointer_map() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ bool HasPointerMap() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ void set_environment(LEnvironment* env) { UNIMPLEMENTED(); }
+
+ LEnvironment* environment() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ bool HasEnvironment() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
+
+ virtual bool IsControl() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ void MarkAsCall() { UNIMPLEMENTED(); }
+ void MarkAsSaveDoubles() { UNIMPLEMENTED(); }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ bool IsMarkedAsSaveDoubles() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ virtual bool HasResult() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ virtual LOperand* result() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ virtual int InputCount() {
+ UNIMPLEMENTED();
+ return 0;
+ }
+
+ virtual LOperand* InputAt(int i) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ virtual int TempCount() {
+ UNIMPLEMENTED();
+ return 0;
+ }
+
+ virtual LOperand* TempAt(int i) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LOperand* FirstInput() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LOperand* Output() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+#ifdef DEBUG
+ void VerifyCall() { UNIMPLEMENTED(); }
+#endif
+};
+
+
+class LGap: public LInstruction {
+ public:
+ explicit LGap(HBasicBlock* block) { }
+
+ HBasicBlock* block() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LParallelMove* GetParallelMove(InnerPosition pos) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+};
+
+
+class LLabel: public LGap {
+ public:
+ explicit LLabel(HBasicBlock* block) : LGap(block) { }
+};
+
+
+class LOsrEntry: public LInstruction {
+ public:
+ // Function could be generated by a macro as in lithium-ia32.h.
+ static LOsrEntry* cast(LInstruction* instr) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LOperand** SpilledRegisterArray() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+ LOperand** SpilledDoubleRegisterArray() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
+ UNIMPLEMENTED();
+ }
+ void MarkSpilledDoubleRegister(int allocation_index,
+ LOperand* spill_operand) {
+ UNIMPLEMENTED();
+ }
+};
+
+
+class LChunk: public ZoneObject {
+ public:
+ explicit LChunk(HGraph* graph) { }
+
+ HGraph* graph() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ const ZoneList<LPointerMap*>* pointer_maps() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LOperand* GetNextSpillSlot(bool double_slot) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LConstantOperand* DefineConstantOperand(HConstant* constant) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LLabel* GetLabel(int block_id) const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ const ZoneList<LInstruction*>* instructions() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ int GetParameterStackSlot(int index) const {
+ UNIMPLEMENTED();
+ return 0;
+ }
+
+ void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
+
+ LGap* GetGapAt(int index) const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ bool IsGapAt(int index) const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ int NearestGapPos(int index) const {
+ UNIMPLEMENTED();
+ return 0;
+ }
+
+ void MarkEmptyBlocks() { UNIMPLEMENTED(); }
+
+ CompilationInfo* info() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+#ifdef DEBUG
+ void Verify() { UNIMPLEMENTED(); }
+#endif
+};
+
+
+class LChunkBuilder BASE_EMBEDDED {
+ public:
+ LChunkBuilder(CompilationInfo*&, HGraph* graph, LAllocator* allocator) { }
+
+ // Build the sequence for the graph.
+ LChunk* Build() {
+ UNIMPLEMENTED();
+ return NULL;
+ };
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
+ UNIMPLEMENTED(); \
+ return NULL; \
+ }
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_LITHIUM_MIPS_H_
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index e096028e3..7c085baac 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,84 +25,109 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
+#include <limits.h> // For LONG_MIN, LONG_MAX.
#include "v8.h"
#if defined(V8_TARGET_ARCH_MIPS)
#include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
#include "runtime.h"
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(void* buffer, int size)
- : Assembler(buffer, size),
- unresolved_(0),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+ : Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
- code_object_(Heap::undefined_value()) {
+ allow_stub_calls_(true) {
+ if (isolate() != NULL) {
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
}
+// Arguments macros.
+#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
+#define COND_ARGS cond, r1, r2
-void MacroAssembler::Jump(Register target, Condition cond,
- Register r1, const Operand& r2) {
- Jump(Operand(target), cond, r1, r2);
+#define REGISTER_TARGET_BODY(Name) \
+void MacroAssembler::Name(Register target, \
+ BranchDelaySlot bd) { \
+ Name(Operand(target), bd); \
+} \
+void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
+ BranchDelaySlot bd) { \
+ Name(Operand(target), COND_ARGS, bd); \
}
-void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- Jump(Operand(target, rmode), cond, r1, r2);
+#define INT_PTR_TARGET_BODY(Name) \
+void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \
+ BranchDelaySlot bd) { \
+ Name(Operand(target, rmode), bd); \
+} \
+void MacroAssembler::Name(intptr_t target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS, \
+ BranchDelaySlot bd) { \
+ Name(Operand(target, rmode), COND_ARGS, bd); \
}
-void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
+#define BYTE_PTR_TARGET_BODY(Name) \
+void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \
+ BranchDelaySlot bd) { \
+ Name(reinterpret_cast<intptr_t>(target), rmode, bd); \
+} \
+void MacroAssembler::Name(byte* target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS, \
+ BranchDelaySlot bd) { \
+ Name(reinterpret_cast<intptr_t>(target), rmode, COND_ARGS, bd); \
}
-void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+#define CODE_TARGET_BODY(Name) \
+void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \
+ BranchDelaySlot bd) { \
+ Name(reinterpret_cast<intptr_t>(target.location()), rmode, bd); \
+} \
+void MacroAssembler::Name(Handle<Code> target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS, \
+ BranchDelaySlot bd) { \
+ Name(reinterpret_cast<intptr_t>(target.location()), rmode, COND_ARGS, bd); \
}
-void MacroAssembler::Call(Register target,
- Condition cond, Register r1, const Operand& r2) {
- Call(Operand(target), cond, r1, r2);
-}
+REGISTER_TARGET_BODY(Jump)
+REGISTER_TARGET_BODY(Call)
+INT_PTR_TARGET_BODY(Jump)
+INT_PTR_TARGET_BODY(Call)
+BYTE_PTR_TARGET_BODY(Jump)
+BYTE_PTR_TARGET_BODY(Call)
+CODE_TARGET_BODY(Jump)
+CODE_TARGET_BODY(Call)
-
-void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- Call(Operand(target, rmode), cond, r1, r2);
-}
+#undef COND_TYPED_ARGS
+#undef COND_ARGS
+#undef REGISTER_TARGET_BODY
+#undef BYTE_PTR_TARGET_BODY
+#undef CODE_TARGET_BODY
-void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
+void MacroAssembler::Ret(BranchDelaySlot bd) {
+ Jump(Operand(ra), bd);
}
-void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
-}
-
-
-void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) {
- Jump(Operand(ra), cond, r1, r2);
+void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2,
+ BranchDelaySlot bd) {
+ Jump(Operand(ra), cond, r1, r2, bd);
}
@@ -111,51 +136,324 @@ void MacroAssembler::LoadRoot(Register destination,
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
}
+
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond,
Register src1, const Operand& src2) {
- Branch(NegateCondition(cond), 2, src1, src2);
+ Branch(2, NegateCondition(cond), src1, src2);
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
}
-void MacroAssembler::RecordWrite(Register object, Register offset,
+void MacroAssembler::StoreRoot(Register source,
+ Heap::RootListIndex index) {
+ sw(source, MemOperand(s6, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+ Heap::RootListIndex index,
+ Condition cond,
+ Register src1, const Operand& src2) {
+ Branch(2, NegateCondition(cond), src1, src2);
+ sw(source, MemOperand(s6, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::RecordWriteHelper(Register object,
+ Register address,
+ Register scratch) {
+ if (emit_debug_code()) {
+ // Check that the object is not in new space.
+ Label not_in_new_space;
+ InNewSpace(object, scratch, ne, &not_in_new_space);
+ Abort("new-space object passed to RecordWriteHelper");
+ bind(&not_in_new_space);
+ }
+
+ // Calculate page address: Clear bits from 0 to kPageSizeBits.
+ if (mips32r2) {
+ Ins(object, zero_reg, 0, kPageSizeBits);
+ } else {
+ // The Ins macro is slow on r1, so use shifts instead.
+ srl(object, object, kPageSizeBits);
+ sll(object, object, kPageSizeBits);
+ }
+
+ // Calculate region number.
+ Ext(address, address, Page::kRegionSizeLog2,
+ kPageSizeBits - Page::kRegionSizeLog2);
+
+ // Mark region dirty.
+ lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+ li(at, Operand(1));
+ sllv(at, at, address);
+ or_(scratch, scratch, at);
+ sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+}
+
+
+// Push and pop all registers that can hold pointers.
+void MacroAssembler::PushSafepointRegisters() {
+ // Safepoints expect a block of kNumSafepointRegisters values on the
+ // stack, so adjust the stack for unsaved registers.
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ ASSERT(num_unsaved >= 0);
+ Subu(sp, sp, Operand(num_unsaved * kPointerSize));
+ MultiPush(kSafepointSavedRegisters);
+}
+
+
+void MacroAssembler::PopSafepointRegisters() {
+ const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+ MultiPop(kSafepointSavedRegisters);
+ Addu(sp, sp, Operand(num_unsaved * kPointerSize));
+}
+
+
+void MacroAssembler::PushSafepointRegistersAndDoubles() {
+ PushSafepointRegisters();
+ Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
+ for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
+ FPURegister reg = FPURegister::FromAllocationIndex(i);
+ sdc1(reg, MemOperand(sp, i * kDoubleSize));
+ }
+}
+
+
+void MacroAssembler::PopSafepointRegistersAndDoubles() {
+ for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
+ FPURegister reg = FPURegister::FromAllocationIndex(i);
+ ldc1(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
+ PopSafepointRegisters();
+}
+
+
+void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
+ Register dst) {
+ sw(src, SafepointRegistersAndDoublesSlot(dst));
+}
+
+
+void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
+ sw(src, SafepointRegisterSlot(dst));
+}
+
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ lw(dst, SafepointRegisterSlot(src));
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+ // The registers are pushed starting with the highest encoding,
+ // which means that lowest encodings are closest to the stack pointer.
+ return kSafepointRegisterStackIndexMap[reg_code];
+}
+
+
+MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
+ return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
+MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+ // General purpose registers are pushed last on the stack.
+ int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
+ int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
+ return MemOperand(sp, doubles_size + register_offset);
+}
+
+
+
+
+void MacroAssembler::InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* branch) {
+ ASSERT(cc == eq || cc == ne);
+ And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
+ Branch(branch, cc, scratch,
+ Operand(ExternalReference::new_space_start(isolate())));
+}
+
+
+// Will clobber 4 registers: object, scratch0, scratch1, at. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are cp.
+ ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
+
+ Label done;
+
+ // First, test that the object is not in the new space. We cannot set
+ // region marks for new space pages.
+ InNewSpace(object, scratch0, eq, &done);
+
+ // Add offset into the object.
+ Addu(scratch0, object, offset);
+
+ // Record the actual write.
+ RecordWriteHelper(object, scratch0, scratch1);
+
+ bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ li(object, Operand(BitCast<int32_t>(kZapValue)));
+ li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
+ li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
+// Will clobber 4 registers: object, address, scratch, ip. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object,
+ Register address,
Register scratch) {
- UNIMPLEMENTED_MIPS();
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are cp.
+ ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+
+ Label done;
+
+ // First, test that the object is not in the new space. We cannot set
+ // region marks for new space pages.
+ InNewSpace(object, scratch, eq, &done);
+
+ // Record the actual write.
+ RecordWriteHelper(object, address, scratch);
+
+ bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ li(object, Operand(BitCast<int32_t>(kZapValue)));
+ li(address, Operand(BitCast<int32_t>(kZapValue)));
+ li(scratch, Operand(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Allocation support.
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch,
+ Label* miss) {
+ Label same_contexts;
+
+ ASSERT(!holder_reg.is(scratch));
+ ASSERT(!holder_reg.is(at));
+ ASSERT(!scratch.is(at));
+
+ // Load current lexical context from the stack frame.
+ lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+ Check(ne, "we should not have an empty lexical context",
+ scratch, Operand(zero_reg));
+#endif
+
+ // Load the global context of the current context.
+ int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ lw(scratch, FieldMemOperand(scratch, offset));
+ lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+ // Check the context is a global context.
+ if (emit_debug_code()) {
+ // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
+ push(holder_reg); // Temporarily save holder on the stack.
+ // Read the first word and compare to the global_context_map.
+ lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ LoadRoot(at, Heap::kGlobalContextMapRootIndex);
+ Check(eq, "JSGlobalObject::global_context should be a global context.",
+ holder_reg, Operand(at));
+ pop(holder_reg); // Restore holder.
+ }
+
+ // Check if both contexts are the same.
+ lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ Branch(&same_contexts, eq, scratch, Operand(at));
+
+ // Check the context is a global context.
+ if (emit_debug_code()) {
+ // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
+ push(holder_reg); // Temporarily save holder on the stack.
+ mov(holder_reg, at); // Move at to its holding place.
+ LoadRoot(at, Heap::kNullValueRootIndex);
+ Check(ne, "JSGlobalProxy::context() should not be null.",
+ holder_reg, Operand(at));
+
+ lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
+ LoadRoot(at, Heap::kGlobalContextMapRootIndex);
+ Check(eq, "JSGlobalObject::global_context should be a global context.",
+ holder_reg, Operand(at));
+ // Restore at is not needed. at is reloaded below.
+ pop(holder_reg); // Restore holder.
+ // Restore at to holder's context.
+ lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ }
+
+ // Check that the security token in the calling global object is
+ // compatible with the security token in the receiving global
+ // object.
+ int token_offset = Context::kHeaderSize +
+ Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+ lw(scratch, FieldMemOperand(scratch, token_offset));
+ lw(at, FieldMemOperand(at, token_offset));
+ Branch(miss, ne, scratch, Operand(at));
+
+ bind(&same_contexts);
}
// ---------------------------------------------------------------------------
-// Instruction macros
+// Instruction macros.
-void MacroAssembler::Add(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- add(rd, rs, rt.rm());
+ addu(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
- addi(rd, rs, rt.imm32_);
+ if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
+ addiu(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
- add(rd, rs, at);
+ addu(rd, rs, at);
}
}
}
-void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- addu(rd, rs, rt.rm());
+ subu(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
- addiu(rd, rs, rt.imm32_);
+ if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
+ addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
- addu(rd, rs, at);
+ subu(rd, rs, at);
}
}
}
@@ -225,7 +523,7 @@ void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
andi(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -241,7 +539,7 @@ void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
or_(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
ori(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -257,7 +555,7 @@ void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
xor_(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
xori(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -281,11 +579,20 @@ void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
}
+void MacroAssembler::Neg(Register rs, const Operand& rt) {
+ ASSERT(rt.is_reg());
+ ASSERT(!at.is(rs));
+ ASSERT(!at.is(rt.rm()));
+ li(at, -1);
+ xor_(rs, rt.rm(), at);
+}
+
+
void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
slti(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -301,7 +608,7 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
sltiu(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -313,60 +620,61 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
}
-//------------Pseudo-instructions-------------
-
-void MacroAssembler::movn(Register rd, Register rt) {
- addiu(at, zero_reg, -1); // Fill at with ones.
- xor_(rd, rt, at);
+void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
+ if (mips32r2) {
+ if (rt.is_reg()) {
+ rotrv(rd, rs, rt.rm());
+ } else {
+ rotr(rd, rs, rt.imm32_);
+ }
+ } else {
+ if (rt.is_reg()) {
+ subu(at, zero_reg, rt.rm());
+ sllv(at, rs, at);
+ srlv(rd, rs, rt.rm());
+ or_(rd, rd, at);
+ } else {
+ if (rt.imm32_ == 0) {
+ srl(rd, rs, 0);
+ } else {
+ srl(at, rs, rt.imm32_);
+ sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
+ or_(rd, rd, at);
+ }
+ }
+ }
}
+//------------Pseudo-instructions-------------
+
void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
ASSERT(!j.is_reg());
-
- if (!MustUseAt(j.rmode_) && !gen2instr) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (!MustUseReg(j.rmode_) && !gen2instr) {
// Normal load of an immediate value which does not need Relocation Info.
if (is_int16(j.imm32_)) {
addiu(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & HIMask)) {
+ } else if (!(j.imm32_ & kHiMask)) {
ori(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & LOMask)) {
- lui(rd, (HIMask & j.imm32_) >> 16);
+ } else if (!(j.imm32_ & kImm16Mask)) {
+ lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
} else {
- lui(rd, (HIMask & j.imm32_) >> 16);
- ori(rd, rd, (LOMask & j.imm32_));
+ lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+ ori(rd, rd, (j.imm32_ & kImm16Mask));
}
- } else if (MustUseAt(j.rmode_) || gen2instr) {
- if (MustUseAt(j.rmode_)) {
+ } else if (MustUseReg(j.rmode_) || gen2instr) {
+ if (MustUseReg(j.rmode_)) {
RecordRelocInfo(j.rmode_, j.imm32_);
}
// We need always the same number of instructions as we may need to patch
// this code to load another value which may need 2 instructions to load.
- if (is_int16(j.imm32_)) {
- nop();
- addiu(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & HIMask)) {
- nop();
- ori(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & LOMask)) {
- nop();
- lui(rd, (HIMask & j.imm32_) >> 16);
- } else {
- lui(rd, (HIMask & j.imm32_) >> 16);
- ori(rd, rd, (LOMask & j.imm32_));
- }
+ lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+ ori(rd, rd, (j.imm32_ & kImm16Mask));
}
}
-// Exception-generating instructions and debugging support
-void MacroAssembler::stop(const char* msg) {
- // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
- // We use the 0x54321 value to be able to find it easily when reading memory.
- break_(0x54321);
-}
-
-
void MacroAssembler::MultiPush(RegList regs) {
int16_t NumSaved = 0;
int16_t NumToPush = NumberOfBitsSet(regs);
@@ -417,153 +725,1000 @@ void MacroAssembler::MultiPopReversed(RegList regs) {
}
+void MacroAssembler::Ext(Register rt,
+ Register rs,
+ uint16_t pos,
+ uint16_t size) {
+ ASSERT(pos < 32);
+ ASSERT(pos + size < 32);
+
+ if (mips32r2) {
+ ext_(rt, rs, pos, size);
+ } else {
+ // Move rs to rt and shift it left then right to get the
+ // desired bitfield on the right side and zeroes on the left.
+ sll(rt, rs, 32 - (pos + size));
+ srl(rt, rt, 32 - size);
+ }
+}
+
+
+void MacroAssembler::Ins(Register rt,
+ Register rs,
+ uint16_t pos,
+ uint16_t size) {
+ ASSERT(pos < 32);
+ ASSERT(pos + size < 32);
+
+ if (mips32r2) {
+ ins_(rt, rs, pos, size);
+ } else {
+ ASSERT(!rt.is(t8) && !rs.is(t8));
+
+ srl(t8, rt, pos + size);
+ // The left chunk from rt that needs to
+ // be saved is on the right side of t8.
+ sll(at, t8, pos + size);
+ // The 'at' register now contains the left chunk on
+ // the left (proper position) and zeroes.
+ sll(t8, rt, 32 - pos);
+ // t8 now contains the right chunk on the left and zeroes.
+ srl(t8, t8, 32 - pos);
+ // t8 now contains the right chunk on
+ // the right (proper position) and zeroes.
+ or_(rt, at, t8);
+ // rt now contains the left and right chunks from the original rt
+ // in their proper position and zeroes in the middle.
+ sll(t8, rs, 32 - size);
+ // t8 now contains the chunk from rs on the left and zeroes.
+ srl(t8, t8, 32 - size - pos);
+ // t8 now contains the original chunk from rs in
+ // the middle (proper position).
+ or_(rt, rt, t8);
+ // rt now contains the result of the ins instruction in R2 mode.
+ }
+}
+
+
+void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
+ // Move the data from fs to t4.
+ mfc1(t4, fs);
+ return Cvt_d_uw(fd, t4);
+}
+
+
+void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd (and fd + 1).
+ // We do this by converting rs minus the MSB to avoid sign conversion,
+ // then adding 2^31-1 and 1 to the result.
+
+ ASSERT(!fd.is(f20));
+ ASSERT(!rs.is(t9));
+ ASSERT(!rs.is(t8));
+
+ // Save rs's MSB to t8.
+ And(t8, rs, 0x80000000);
+ // Remove rs's MSB.
+ And(t9, rs, 0x7FFFFFFF);
+ // Move t9 to fd.
+ mtc1(t9, fd);
+
+ // Convert fd to a real FP value.
+ cvt_d_w(fd, fd);
+
+ Label conversion_done;
+
+ // If rs's MSB was 0, it's done.
+ // Otherwise we need to add that to the FP register.
+ Branch(&conversion_done, eq, t8, Operand(zero_reg));
+
+ // First load 2^31 - 1 into f20.
+ Or(t9, zero_reg, 0x7FFFFFFF);
+ mtc1(t9, f20);
+
+ // Convert it to FP and add it to fd.
+ cvt_d_w(f20, f20);
+ add_d(fd, fd, f20);
+ // Now add 1.
+ Or(t9, zero_reg, 1);
+ mtc1(t9, f20);
+
+ cvt_d_w(f20, f20);
+ add_d(fd, fd, f20);
+ bind(&conversion_done);
+}
+
+
+void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs) {
+ Trunc_uw_d(fs, t4);
+ mtc1(t4, fd);
+}
+
+
+void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) {
+ ASSERT(!fd.is(f22));
+ ASSERT(!rs.is(t8));
+
+ // Load 2^31 into f22.
+ Or(t8, zero_reg, 0x80000000);
+ Cvt_d_uw(f22, t8);
+
+ // Test if f22 > fd.
+ c(OLT, D, fd, f22);
+
+ Label simple_convert;
+ // If fd < 2^31 we can convert it normally.
+ bc1t(&simple_convert);
+
+ // First we subtract 2^31 from fd, then trunc it to rs
+ // and add 2^31 to rs.
+
+ sub_d(f22, fd, f22);
+ trunc_w_d(f22, f22);
+ mfc1(rs, f22);
+ or_(rs, rs, t8);
+
+ Label done;
+ Branch(&done);
+ // Simple conversion.
+ bind(&simple_convert);
+ trunc_w_d(f22, fd);
+ mfc1(rs, f22);
+
+ bind(&done);
+}
+
+
+// Tries to get a signed int32 out of a double precision floating point heap
+// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
+// 32bits signed integer range.
+// This method implementation differs from the ARM version for performance
+// reasons.
+void MacroAssembler::ConvertToInt32(Register source,
+ Register dest,
+ Register scratch,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label *not_int32) {
+ Label right_exponent, done;
+ // Get exponent word (ENDIAN issues).
+ lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
+ // Load dest with zero. We use this either for the final shift or
+ // for the answer.
+ mov(dest, zero_reg);
+ // Check whether the exponent matches a 32 bit signed int that is not a Smi.
+ // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
+ // the exponent that we are fastest at and also the highest exponent we can
+ // handle here.
+ const uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ // If we have a match of the int32-but-not-Smi exponent then skip some logic.
+ Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
+ // If the exponent is higher than that then go to not_int32 case. This
+ // catches numbers that don't fit in a signed int32, infinities and NaNs.
+ Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
+
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent =
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+ Subu(scratch2, scratch2, Operand(zero_exponent));
+ // Dest already has a Smi zero.
+ Branch(&done, lt, scratch2, Operand(zero_reg));
+ if (!CpuFeatures::IsSupported(FPU)) {
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ srl(dest, scratch2, HeapNumber::kExponentShift);
+ // We now have the exponent in dest. Subtract from 30 to get
+ // how much to shift down.
+ li(at, Operand(30));
+ subu(dest, at, dest);
+ }
+ bind(&right_exponent);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // MIPS FPU instructions implementing double precision to integer
+ // conversion using round to zero. Since the FP value was qualified
+ // above, the resulting integer should be a legal int32.
+ // The original 'Exponent' word is still in scratch.
+ lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
+ trunc_w_d(double_scratch, double_scratch);
+ mfc1(dest, double_scratch);
+ } else {
+ // On entry, dest has final downshift, scratch has original sign/exp/mant.
+ // Save sign bit in top bit of dest.
+ And(scratch2, scratch, Operand(0x80000000));
+ Or(dest, dest, Operand(scratch2));
+ // Put back the implicit 1, just above mantissa field.
+ Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
+
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to leave the sign bit 0 so we subtract 2 bits from the shift
+ // distance. But we want to clear the sign-bit so shift one more bit
+ // left, then shift right one bit.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ sll(scratch, scratch, shift_distance + 1);
+ srl(scratch, scratch, 1);
+
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
+ // The width of the field here is the same as the shift amount above.
+ const int field_width = shift_distance;
+ Ext(scratch2, scratch2, 32-shift_distance, field_width);
+ Ins(scratch, scratch2, 0, field_width);
+ // Move down according to the exponent.
+ srlv(scratch, scratch, dest);
+ // Prepare the negative version of our integer.
+ subu(scratch2, zero_reg, scratch);
+ // Trick to check sign bit (msb) held in dest, count leading zero.
+ // 0 indicates negative, save negative version with conditional move.
+ clz(dest, dest);
+ movz(scratch, scratch2, dest);
+ mov(dest, scratch);
+ }
+ bind(&done);
+}
+
+
+void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
+ Register input_high,
+ Register input_low,
+ Register scratch) {
+ Label done, normal_exponent, restore_sign;
+ // Extract the biased exponent in result.
+ Ext(result,
+ input_high,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // Check for Infinity and NaNs, which should return 0.
+ Subu(scratch, result, HeapNumber::kExponentMask);
+ movz(result, zero_reg, scratch);
+ Branch(&done, eq, scratch, Operand(zero_reg));
+
+ // Express exponent as delta to (number of mantissa bits + 31).
+ Subu(result,
+ result,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
+
+ // If the delta is strictly positive, all bits would be shifted away,
+ // which means that we can return 0.
+ Branch(&normal_exponent, le, result, Operand(zero_reg));
+ mov(result, zero_reg);
+ Branch(&done);
+
+ bind(&normal_exponent);
+ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ // Calculate shift.
+ Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
+
+ // Save the sign.
+ Register sign = result;
+ result = no_reg;
+ And(sign, input_high, Operand(HeapNumber::kSignMask));
+
+ // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
+ // to check for this specific case.
+ Label high_shift_needed, high_shift_done;
+ Branch(&high_shift_needed, lt, scratch, Operand(32));
+ mov(input_high, zero_reg);
+ Branch(&high_shift_done);
+ bind(&high_shift_needed);
+
+ // Set the implicit 1 before the mantissa part in input_high.
+ Or(input_high,
+ input_high,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ // Shift the mantissa bits to the correct position.
+ // We don't need to clear non-mantissa bits as they will be shifted away.
+ // If they weren't, it would mean that the answer is in the 32bit range.
+ sllv(input_high, input_high, scratch);
+
+ bind(&high_shift_done);
+
+ // Replace the shifted bits with bits from the lower mantissa word.
+ Label pos_shift, shift_done;
+ li(at, 32);
+ subu(scratch, at, scratch);
+ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
+
+ // Negate scratch.
+ Subu(scratch, zero_reg, scratch);
+ sllv(input_low, input_low, scratch);
+ Branch(&shift_done);
+
+ bind(&pos_shift);
+ srlv(input_low, input_low, scratch);
+
+ bind(&shift_done);
+ Or(input_high, input_high, Operand(input_low));
+ // Restore sign if necessary.
+ mov(scratch, sign);
+ result = sign;
+ sign = no_reg;
+ Subu(result, zero_reg, input_high);
+ movz(result, input_high, scratch);
+ bind(&done);
+}
+
+
+void MacroAssembler::EmitECMATruncate(Register result,
+ FPURegister double_input,
+ FPURegister single_scratch,
+ Register scratch,
+ Register input_high,
+ Register input_low) {
+ CpuFeatures::Scope scope(FPU);
+ ASSERT(!input_high.is(result));
+ ASSERT(!input_low.is(result));
+ ASSERT(!input_low.is(input_high));
+ ASSERT(!scratch.is(result) &&
+ !scratch.is(input_high) &&
+ !scratch.is(input_low));
+ ASSERT(!single_scratch.is(double_input));
+
+ Label done;
+ Label manual;
+
+ // Clear cumulative exception flags and save the FCSR.
+ Register scratch2 = input_high;
+ cfc1(scratch2, FCSR);
+ ctc1(zero_reg, FCSR);
+ // Try a conversion to a signed integer.
+ trunc_w_d(single_scratch, double_input);
+ mfc1(result, single_scratch);
+ // Retrieve and restore the FCSR.
+ cfc1(scratch, FCSR);
+ ctc1(scratch2, FCSR);
+ // Check for overflow and NaNs.
+ And(scratch,
+ scratch,
+ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
+ // If we had no exceptions we are done.
+ Branch(&done, eq, scratch, Operand(zero_reg));
+
+ // Load the double value and perform a manual truncation.
+ Move(input_low, input_high, double_input);
+ EmitOutOfInt32RangeTruncate(result,
+ input_high,
+ input_low,
+ scratch);
+ bind(&done);
+}
+
+
+void MacroAssembler::GetLeastBitsFromSmi(Register dst,
+ Register src,
+ int num_least_bits) {
+ Ext(dst, src, kSmiTagSize, num_least_bits);
+}
+
+
+void MacroAssembler::GetLeastBitsFromInt32(Register dst,
+ Register src,
+ int num_least_bits) {
+ And(dst, src, Operand((1 << num_least_bits) - 1));
+}
+
+
// Emulated condtional branches do not emit a nop in the branch delay slot.
+//
+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
+#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
+ (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
+ (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
+
+
+bool MacroAssembler::UseAbsoluteCodePointers() {
+ if (is_trampoline_emitted()) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
+void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
+ BranchShort(offset, bdslot);
+}
+
+
+void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BranchShort(offset, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Jr(L, bdslot);
+ } else {
+ BranchShort(L, bdslot);
+ }
+}
+
+
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ } else {
+ BranchShort(L, cond, rs, rt, bdslot);
+ }
+}
+
+
+void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
+ b(offset);
-// Trashes the at register if no scratch register is provided.
-void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
- const Operand& rt, Register scratch) {
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ ASSERT(!rs.is(zero_reg));
Register r2 = no_reg;
+ Register scratch = at;
+
if (rt.is_reg()) {
// We don't want any other register but scratch clobbered.
ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
r2 = rt.rm_;
- } else if (cond != cc_always) {
- // We don't want any other register but scratch clobbered.
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
+ switch (cond) {
+ case cc_always:
+ b(offset);
+ break;
+ case eq:
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ bne(rs, r2, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ if (r2.is(zero_reg)) {
+ bgtz(rs, offset);
+ } else {
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case greater_equal:
+ if (r2.is(zero_reg)) {
+ bgez(rs, offset);
+ } else {
+ slt(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case less:
+ if (r2.is(zero_reg)) {
+ bltz(rs, offset);
+ } else {
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case less_equal:
+ if (r2.is(zero_reg)) {
+ blez(rs, offset);
+ } else {
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ // Unsigned comparison.
+ case Ugreater:
+ if (r2.is(zero_reg)) {
+ bgtz(rs, offset);
+ } else {
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Ugreater_equal:
+ if (r2.is(zero_reg)) {
+ bgez(rs, offset);
+ } else {
+ sltu(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless:
+ if (r2.is(zero_reg)) {
+ // No code needs to be emitted.
+ return;
+ } else {
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless_equal:
+ if (r2.is(zero_reg)) {
+ b(offset);
+ } else {
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ switch (cond) {
+ case cc_always:
+ b(offset);
+ break;
+ case eq:
+ // We don't want any other register but scratch clobbered.
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ // We don't want any other register but scratch clobbered.
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ bne(rs, r2, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ if (rt.imm32_ == 0) {
+ bgtz(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case greater_equal:
+ if (rt.imm32_ == 0) {
+ bgez(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ slti(scratch, rs, rt.imm32_);
+ beq(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case less:
+ if (rt.imm32_ == 0) {
+ bltz(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ slti(scratch, rs, rt.imm32_);
+ bne(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case less_equal:
+ if (rt.imm32_ == 0) {
+ blez(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ // Unsigned comparison.
+ case Ugreater:
+ if (rt.imm32_ == 0) {
+ bgtz(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Ugreater_equal:
+ if (rt.imm32_ == 0) {
+ bgez(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ sltiu(scratch, rs, rt.imm32_);
+ beq(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless:
+ if (rt.imm32_ == 0) {
+ // No code needs to be emitted.
+ return;
+ } else if (is_int16(rt.imm32_)) {
+ sltiu(scratch, rs, rt.imm32_);
+ bne(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless_equal:
+ if (rt.imm32_ == 0) {
+ b(offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- beq(rs, r2, offset);
- break;
- case ne:
- bne(rs, r2, offset);
- break;
- // Signed comparison
- case greater:
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- break;
- case less:
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- break;
- case less_equal:
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- break;
+void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
+ // We use branch_offset as an argument for the branch instructions to be sure
+ // it is called just before generating the branch instruction, as needed.
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- break;
- case Uless:
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- break;
+ b(shifted_branch_offset(L, false));
- default:
- UNREACHABLE();
- }
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
-void MacroAssembler::Branch(Condition cond, Label* L, Register rs,
- const Operand& rt, Register scratch) {
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ int32_t offset;
Register r2 = no_reg;
+ Register scratch = at;
if (rt.is_reg()) {
r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ switch (cond) {
+ case cc_always:
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ break;
+ case eq:
+ offset = shifted_branch_offset(L, false);
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ offset = shifted_branch_offset(L, false);
+ bne(rs, r2, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bgtz(rs, offset);
+ } else {
+ slt(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case greater_equal:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bgez(rs, offset);
+ } else {
+ slt(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case less:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bltz(rs, offset);
+ } else {
+ slt(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case less_equal:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ blez(rs, offset);
+ } else {
+ slt(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ // Unsigned comparison.
+ case Ugreater:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bgtz(rs, offset);
+ } else {
+ sltu(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Ugreater_equal:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bgez(rs, offset);
+ } else {
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless:
+ if (r2.is(zero_reg)) {
+ // No code needs to be emitted.
+ return;
+ } else {
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless_equal:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ } else {
+ sltu(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ switch (cond) {
+ case cc_always:
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ break;
+ case eq:
+ r2 = scratch;
+ li(r2, rt);
+ offset = shifted_branch_offset(L, false);
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ r2 = scratch;
+ li(r2, rt);
+ offset = shifted_branch_offset(L, false);
+ bne(rs, r2, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bgtz(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case greater_equal:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bgez(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ slti(scratch, rs, rt.imm32_);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case less:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bltz(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ slti(scratch, rs, rt.imm32_);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case less_equal:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ blez(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ // Unsigned comparison.
+ case Ugreater:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bgtz(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Ugreater_equal:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bgez(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ sltiu(scratch, rs, rt.imm32_);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless:
+ if (rt.imm32_ == 0) {
+ // No code needs to be emitted.
+ return;
+ } else if (is_int16(rt.imm32_)) {
+ sltiu(scratch, rs, rt.imm32_);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless_equal:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+ // Check that offset could actually hold on an int16_t.
+ ASSERT(is_int16(offset));
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
- // We use branch_offset as an argument for the branch instructions to be sure
- // it is called just before generating the branch instruction, as needed.
- switch (cond) {
- case cc_always:
- b(shifted_branch_offset(L, false));
- break;
- case eq:
- beq(rs, r2, shifted_branch_offset(L, false));
- break;
- case ne:
- bne(rs, r2, shifted_branch_offset(L, false));
- break;
+void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
+ BranchAndLinkShort(offset, bdslot);
+}
- // Signed comparison
- case greater:
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case less:
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case less_equal:
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case Uless:
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
+void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BranchAndLinkShort(offset, cond, rs, rt, bdslot);
+}
- default:
- UNREACHABLE();
+
+void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Jalr(L, bdslot);
+ } else {
+ BranchAndLinkShort(L, bdslot);
+ }
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jalr(L, bdslot);
+ bind(&skip);
+ } else {
+ BranchAndLinkShort(L, cond, rs, rt, bdslot);
}
- // Emit a nop in the branch delay slot.
- nop();
}
-// Trashes the at register if no scratch register is provided.
// We need to use a bgezal or bltzal, but they can't be used directly with the
// slt instructions. We could use sub or add instead but we would miss overflow
// cases, so we keep slt and add an intermediate third instruction.
-void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
- const Operand& rt, Register scratch) {
+void MacroAssembler::BranchAndLinkShort(int16_t offset,
+ BranchDelaySlot bdslot) {
+ bal(offset);
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
Register r2 = no_reg;
+ Register scratch = at;
+
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@@ -586,7 +1741,7 @@ void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
bal(offset);
break;
- // Signed comparison
+ // Signed comparison.
case greater:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
@@ -633,14 +1788,29 @@ void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
default:
UNREACHABLE();
}
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
-void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
- const Operand& rt, Register scratch) {
+void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
+ bal(shifted_branch_offset(L, false));
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ int32_t offset;
Register r2 = no_reg;
+ Register scratch = at;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@@ -650,161 +1820,370 @@ void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
switch (cond) {
case cc_always:
- bal(shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
break;
case eq:
bne(rs, r2, 2);
nop();
- bal(shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
break;
case ne:
beq(rs, r2, 2);
nop();
- bal(shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
break;
- // Signed comparison
+ // Signed comparison.
case greater:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
- bgezal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
break;
case greater_equal:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
- bltzal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
break;
case less:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
- bgezal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
break;
case less_equal:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
- bltzal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
break;
// Unsigned comparison.
case Ugreater:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
- bgezal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
break;
case Ugreater_equal:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
- bltzal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
break;
case Uless:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
- bgezal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
break;
case Uless_equal:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
- bltzal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
break;
default:
UNREACHABLE();
}
- // Emit a nop in the branch delay slot.
- nop();
+
+ // Check that offset could actually hold on an int16_t.
+ ASSERT(is_int16(offset));
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint32_t imm28;
+ imm28 = jump_address(L);
+ imm28 &= kImm28Mask;
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ j(imm28);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint32_t imm32;
+ imm32 = jump_address(L);
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ jr(at);
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint32_t imm32;
+ imm32 = jump_address(L);
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ jalr(at);
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (target.is_reg()) {
+ jr(target.rm());
+ } else {
+ if (!MustUseReg(target.rmode_)) {
+ j(target.imm32_);
+ } else {
+ li(t9, target);
+ jr(t9);
+ }
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
void MacroAssembler::Jump(const Operand& target,
- Condition cond, Register rs, const Operand& rt) {
+ Condition cond, Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ BRANCH_ARGS_CHECK(cond, rs, rt);
if (target.is_reg()) {
if (cond == cc_always) {
jr(target.rm());
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
jr(target.rm());
}
- } else { // !target.is_reg()
- if (!MustUseAt(target.rmode_)) {
+ } else { // Not register target.
+ if (!MustUseReg(target.rmode_)) {
if (cond == cc_always) {
j(target.imm32_);
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
j(target.imm32_); // Will generate only one instruction.
}
- } else { // MustUseAt(target)
- li(at, target);
+ } else { // MustUseReg(target).
+ li(t9, target);
if (cond == cc_always) {
- jr(at);
+ jr(t9);
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
- jr(at); // Will generate only one instruction.
+ Branch(2, NegateCondition(cond), rs, rt);
+ jr(t9); // Will generate only one instruction.
}
}
}
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
+ return 4 * kInstrSize;
+}
+
+
+int MacroAssembler::CallSize(Register reg) {
+ return 2 * kInstrSize;
+}
+
+
+// Note: To call gcc-compiled C code on mips, you must call thru t9.
+void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (target.is_reg()) {
+ jalr(target.rm());
+ } else { // !target.is_reg().
+ if (!MustUseReg(target.rmode_)) {
+ jal(target.imm32_);
+ } else { // MustUseReg(target).
+ // Must record previous source positions before the
+ // li() generates a new code target.
+ positions_recorder()->WriteRecordedPositions();
+ li(t9, target);
+ jalr(t9);
+ }
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
+// Note: To call gcc-compiled C code on mips, you must call thru t9.
void MacroAssembler::Call(const Operand& target,
- Condition cond, Register rs, const Operand& rt) {
+ Condition cond, Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ BRANCH_ARGS_CHECK(cond, rs, rt);
if (target.is_reg()) {
if (cond == cc_always) {
jalr(target.rm());
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
jalr(target.rm());
}
- } else { // !target.is_reg()
- if (!MustUseAt(target.rmode_)) {
+ } else { // !target.is_reg().
+ if (!MustUseReg(target.rmode_)) {
if (cond == cc_always) {
jal(target.imm32_);
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
jal(target.imm32_); // Will generate only one instruction.
}
- } else { // MustUseAt(target)
- li(at, target);
+ } else { // MustUseReg(target)
+ li(t9, target);
if (cond == cc_always) {
- jalr(at);
+ jalr(t9);
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
- jalr(at); // Will generate only one instruction.
+ Branch(2, NegateCondition(cond), rs, rt);
+ jalr(t9); // Will generate only one instruction.
}
}
}
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
-void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
- UNIMPLEMENTED_MIPS();
+
+void MacroAssembler::CallWithAstId(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id,
+ Condition cond,
+ Register r1,
+ const Operand& r2) {
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+ ASSERT(ast_id_for_reloc_info_ == kNoASTId);
+ ast_id_for_reloc_info_ = ast_id;
+ rmode = RelocInfo::CODE_TARGET_WITH_ID;
+ }
+ Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
}
-void MacroAssembler::Drop(int count, Condition cond) {
- UNIMPLEMENTED_MIPS();
+void MacroAssembler::Drop(int count,
+ Condition cond,
+ Register reg,
+ const Operand& op) {
+ if (count <= 0) {
+ return;
+ }
+
+ Label skip;
+
+ if (cond != al) {
+ Branch(&skip, NegateCondition(cond), reg, op);
+ }
+
+ if (count > 0) {
+ addiu(sp, sp, count * kPointerSize);
+ }
+
+ if (cond != al) {
+ bind(&skip);
+ }
+}
+
+
+void MacroAssembler::DropAndRet(int drop,
+ Condition cond,
+ Register r1,
+ const Operand& r2) {
+ // This is a workaround to make sure only one branch instruction is
+ // generated. It relies on Drop and Ret not creating branches if
+ // cond == cc_always.
+ Label skip;
+ if (cond != cc_always) {
+ Branch(&skip, NegateCondition(cond), r1, r2);
+ }
+
+ Drop(drop);
+ Ret();
+
+ if (cond != cc_always) {
+ bind(&skip);
+ }
+}
+
+
+void MacroAssembler::Swap(Register reg1,
+ Register reg2,
+ Register scratch) {
+ if (scratch.is(no_reg)) {
+ Xor(reg1, reg1, Operand(reg2));
+ Xor(reg2, reg2, Operand(reg1));
+ Xor(reg1, reg1, Operand(reg2));
+ } else {
+ mov(scratch, reg1);
+ mov(reg1, reg2);
+ mov(reg2, scratch);
+ }
}
void MacroAssembler::Call(Label* target) {
- UNIMPLEMENTED_MIPS();
+ BranchAndLink(target);
+}
+
+
+void MacroAssembler::Push(Handle<Object> handle) {
+ li(at, Operand(handle));
+ push(at);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
- // ---------------------------------------------------------------------------
- // Debugger Support
- void MacroAssembler::DebugBreak() {
- UNIMPLEMENTED_MIPS();
- }
-#endif
+void MacroAssembler::DebugBreak() {
+ ASSERT(allow_stub_calls());
+ mov(a0, zero_reg);
+ li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
+ CEntryStub ces(1);
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+}
+
+#endif // ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
-// Exception handling
+// Exception handling.
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
@@ -822,7 +2201,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize
&& StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// Save the current handler as the next handler.
- LoadExternalReference(t2, ExternalReference(Top::k_handler_address));
+ li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
lw(t1, MemOperand(t2));
addiu(sp, sp, -StackHandlerConstants::kSize);
@@ -848,7 +2227,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
li(t0, Operand(StackHandler::ENTRY));
// Save the current handler as the next handler.
- LoadExternalReference(t2, ExternalReference(Top::k_handler_address));
+ li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
lw(t1, MemOperand(t2));
addiu(sp, sp, -StackHandlerConstants::kSize);
@@ -864,57 +2243,692 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
void MacroAssembler::PopTryHandler() {
- UNIMPLEMENTED_MIPS();
+ ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
+ pop(a1);
+ Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
+ li(at, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+ sw(a1, MemOperand(at));
}
+void MacroAssembler::Throw(Register value) {
+ // v0 is expected to hold the exception.
+ Move(v0, value);
-// -----------------------------------------------------------------------------
-// Activation frames
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop the sp to the top of the handler.
+ li(a3, Operand(ExternalReference(Isolate::k_handler_address,
+ isolate())));
+ lw(sp, MemOperand(a3));
+
+ // Restore the next handler and frame pointer, discard handler state.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ pop(a2);
+ sw(a2, MemOperand(a3));
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+ MultiPop(a3.bit() | fp.bit());
+
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of a
+ // JS entry frame.
+ // Set cp to NULL if fp is NULL.
+ Label done;
+ Branch(USE_DELAY_SLOT, &done, eq, fp, Operand(zero_reg));
+ mov(cp, zero_reg); // In branch delay slot.
+ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ bind(&done);
+
+#ifdef DEBUG
+ // When emitting debug_code, set ra as return address for the jump.
+ // 5 instructions: add: 1, pop: 2, jump: 2.
+ const int kOffsetRaInstructions = 5;
+ Label find_ra;
+
+ if (emit_debug_code()) {
+ // Compute ra for the Jump(t9).
+ const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
+
+ // This branch-and-link sequence is needed to get the current PC on mips,
+ // saved to the ra register. Then adjusted for instruction count.
+ bal(&find_ra); // bal exposes branch-delay.
+ nop(); // Branch delay slot nop.
+ bind(&find_ra);
+ addiu(ra, ra, kOffsetRaBytes);
+ }
+#endif
+
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ pop(t9); // 2 instructions: lw, add sp.
+ Jump(t9); // 2 instructions: jr, nop (in delay slot).
+
+ if (emit_debug_code()) {
+ // Make sure that the expected number of instructions were generated.
+ ASSERT_EQ(kOffsetRaInstructions,
+ InstructionsGeneratedSince(&find_ra));
+ }
+}
+
+
+void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
+ Register value) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // v0 is expected to hold the exception.
+ Move(v0, value);
+
+ // Drop sp to the top stack handler.
+ li(a3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+ lw(sp, MemOperand(a3));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ lw(a2, MemOperand(sp, kStateOffset));
+ Branch(&done, eq, a2, Operand(StackHandler::ENTRY));
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ lw(sp, MemOperand(sp, kNextOffset));
+ jmp(&loop);
+ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ pop(a2);
+ sw(a2, MemOperand(a3));
+
+ if (type == OUT_OF_MEMORY) {
+ // Set external caught exception to false.
+ ExternalReference external_caught(
+ Isolate::k_external_caught_exception_address, isolate());
+ li(a0, Operand(false, RelocInfo::NONE));
+ li(a2, Operand(external_caught));
+ sw(a0, MemOperand(a2));
+
+ // Set pending exception and v0 to out of memory exception.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
+ isolate())));
+ sw(v0, MemOperand(a2));
+ }
+
+ // Stack layout at this point. See also StackHandlerConstants.
+ // sp -> state (ENTRY)
+ // fp
+ // ra
+
+ // Discard handler state (a2 is not used) and restore frame pointer.
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+ MultiPop(a2.bit() | fp.bit()); // a2: discarded state.
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of a
+ // JS entry frame.
+ Label cp_null;
+ Branch(USE_DELAY_SLOT, &cp_null, eq, fp, Operand(zero_reg));
+ mov(cp, zero_reg); // In the branch delay slot.
+ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ bind(&cp_null);
+
+#ifdef DEBUG
+ // When emitting debug_code, set ra as return address for the jump.
+ // 5 instructions: add: 1, pop: 2, jump: 2.
+ const int kOffsetRaInstructions = 5;
+ Label find_ra;
+
+ if (emit_debug_code()) {
+ // Compute ra for the Jump(t9).
+ const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
+
+ // This branch-and-link sequence is needed to get the current PC on mips,
+ // saved to the ra register. Then adjusted for instruction count.
+ bal(&find_ra); // bal exposes branch-delay slot.
+ nop(); // Branch delay slot nop.
+ bind(&find_ra);
+ addiu(ra, ra, kOffsetRaBytes);
+ }
+#endif
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ pop(t9); // 2 instructions: lw, add sp.
+ Jump(t9); // 2 instructions: jr, nop (in delay slot).
+
+ if (emit_debug_code()) {
+ // Make sure that the expected number of instructions were generated.
+ ASSERT_EQ(kOffsetRaInstructions,
+ InstructionsGeneratedSince(&find_ra));
+ }
+}
+
+
+void MacroAssembler::AllocateInNewSpace(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ li(result, 0x7091);
+ li(scratch1, 0x7191);
+ li(scratch2, 0x7291);
+ }
+ jmp(gc_required);
+ return;
+ }
+
+ ASSERT(!result.is(scratch1));
+ ASSERT(!result.is(scratch2));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!scratch1.is(t9));
+ ASSERT(!scratch2.is(t9));
+ ASSERT(!result.is(t9));
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ ASSERT_EQ(0, object_size & kObjectAlignmentMask);
+
+ // Check relative positions of allocation top and limit addresses.
+ // ARM adds additional checks to make sure the ldm instruction can be
+ // used. On MIPS we don't have ldm so we don't need additional checks either.
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address(isolate());
+ intptr_t top =
+ reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+ intptr_t limit =
+ reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register topaddr = scratch1;
+ Register obj_size_reg = scratch2;
+ li(topaddr, Operand(new_space_allocation_top));
+ li(obj_size_reg, Operand(object_size));
+
+ // This code stores a temporary value in t9.
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and allocation limit into t9.
+ lw(result, MemOperand(topaddr));
+ lw(t9, MemOperand(topaddr, kPointerSize));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry. t9 is used
+ // immediately below so this use of t9 does not cause difference with
+ // respect to register content between debug and release mode.
+ lw(t9, MemOperand(topaddr));
+ Check(eq, "Unexpected allocation top", result, Operand(t9));
+ }
+ // Load allocation limit into t9. Result already contains allocation top.
+ lw(t9, MemOperand(topaddr, limit - top));
+ }
+
+ // Calculate new top and bail out if new space is exhausted. Use result
+ // to calculate the new top.
+ Addu(scratch2, result, Operand(obj_size_reg));
+ Branch(gc_required, Ugreater, scratch2, Operand(t9));
+ sw(scratch2, MemOperand(topaddr));
+
+ // Tag object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ Addu(result, result, Operand(kHeapObjectTag));
+ }
+}
+
+
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (!FLAG_inline_new) {
+ if (emit_debug_code()) {
+ // Trash the registers to simulate an allocation failure.
+ li(result, 0x7091);
+ li(scratch1, 0x7191);
+ li(scratch2, 0x7291);
+ }
+ jmp(gc_required);
+ return;
+ }
+
+ ASSERT(!result.is(scratch1));
+ ASSERT(!result.is(scratch2));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
+
+ // Check relative positions of allocation top and limit addresses.
+ // ARM adds additional checks to make sure the ldm instruction can be
+ // used. On MIPS we don't have ldm so we don't need additional checks either.
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address(isolate());
+ intptr_t top =
+ reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+ intptr_t limit =
+ reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register topaddr = scratch1;
+ li(topaddr, Operand(new_space_allocation_top));
+
+ // This code stores a temporary value in t9.
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and allocation limit into t9.
+ lw(result, MemOperand(topaddr));
+ lw(t9, MemOperand(topaddr, kPointerSize));
+ } else {
+ if (emit_debug_code()) {
+ // Assert that result actually contains top on entry. t9 is used
+ // immediately below so this use of t9 does not cause difference with
+ // respect to register content between debug and release mode.
+ lw(t9, MemOperand(topaddr));
+ Check(eq, "Unexpected allocation top", result, Operand(t9));
+ }
+ // Load allocation limit into t9. Result already contains allocation top.
+ lw(t9, MemOperand(topaddr, limit - top));
+ }
-void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) {
- Label extra_push, end;
+ // Calculate new top and bail out if new space is exhausted. Use result
+ // to calculate the new top. Object size may be in words so a shift is
+ // required to get the number of bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ sll(scratch2, object_size, kPointerSizeLog2);
+ Addu(scratch2, result, scratch2);
+ } else {
+ Addu(scratch2, result, Operand(object_size));
+ }
+ Branch(gc_required, Ugreater, scratch2, Operand(t9));
- andi(scratch, sp, 7);
+ // Update allocation top. result temporarily holds the new top.
+ if (emit_debug_code()) {
+ And(t9, scratch2, Operand(kObjectAlignmentMask));
+ Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
+ }
+ sw(scratch2, MemOperand(topaddr));
- // We check for args and receiver size on the stack, all of them word sized.
- // We add one for sp, that we also want to store on the stack.
- if (((arg_count + 1) % kPointerSizeLog2) == 0) {
- Branch(ne, &extra_push, at, Operand(zero_reg));
- } else { // ((arg_count + 1) % 2) == 1
- Branch(eq, &extra_push, at, Operand(zero_reg));
+ // Tag object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ Addu(result, result, Operand(kHeapObjectTag));
}
+}
- // Save sp on the stack.
- mov(scratch, sp);
- Push(scratch);
- b(&end);
- // Align before saving sp on the stack.
- bind(&extra_push);
- mov(scratch, sp);
- addiu(sp, sp, -8);
- sw(scratch, MemOperand(sp));
+void MacroAssembler::UndoAllocationInNewSpace(Register object,
+ Register scratch) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
- // The stack is aligned and sp is stored on the top.
- bind(&end);
+ // Make sure the object has no tag before resetting top.
+ And(object, object, Operand(~kHeapObjectTagMask));
+#ifdef DEBUG
+ // Check that the object un-allocated is below the current top.
+ li(scratch, Operand(new_space_allocation_top));
+ lw(scratch, MemOperand(scratch));
+ Check(less, "Undo allocation of non allocated memory",
+ object, Operand(scratch));
+#endif
+ // Write the address of the object to un-allocate as the current top.
+ li(scratch, Operand(new_space_allocation_top));
+ sw(object, MemOperand(scratch));
+}
+
+
+void MacroAssembler::AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ sll(scratch1, length, 1); // Length in bytes, not chars.
+ addiu(scratch1, scratch1,
+ kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
+ And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
+
+ // Allocate two-byte string in new space.
+ AllocateInNewSpace(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kStringMapRootIndex,
+ scratch1,
+ scratch2);
}
-void MacroAssembler::ReturnFromAlignedCall() {
- lw(sp, MemOperand(sp));
+void MacroAssembler::AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string
+ // while observing object alignment.
+ ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT(kCharSize == 1);
+ addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
+ And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
+
+ // Allocate ASCII string in new space.
+ AllocateInNewSpace(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ AllocateInNewSpace(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+ InitializeNewString(result,
+ length,
+ Heap::kConsStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ AllocateInNewSpace(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+ InitializeNewString(result,
+ length,
+ Heap::kConsAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+// Allocates a heap number or jumps to the label if the young space is full and
+// a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* need_gc) {
+ // Allocate an object in the heap for the heap number and tag it as a heap
+ // object.
+ AllocateInNewSpace(HeapNumber::kSize,
+ result,
+ scratch1,
+ scratch2,
+ need_gc,
+ TAG_OBJECT);
+
+ // Store heap number map in the allocated object.
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+}
+
+
+void MacroAssembler::AllocateHeapNumberWithValue(Register result,
+ FPURegister value,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+ AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
+ sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+// Copies a fixed number of fields of heap objects from src to dst.
+void MacroAssembler::CopyFields(Register dst,
+ Register src,
+ RegList temps,
+ int field_count) {
+ ASSERT((temps & dst.bit()) == 0);
+ ASSERT((temps & src.bit()) == 0);
+ // Primitive implementation using only one temporary register.
+
+ Register tmp = no_reg;
+ // Find a temp register in temps list.
+ for (int i = 0; i < kNumRegisters; i++) {
+ if ((temps & (1 << i)) != 0) {
+ tmp.code_ = i;
+ break;
+ }
+ }
+ ASSERT(!tmp.is(no_reg));
+
+ for (int i = 0; i < field_count; i++) {
+ lw(tmp, FieldMemOperand(src, i * kPointerSize));
+ sw(tmp, FieldMemOperand(dst, i * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::CopyBytes(Register src,
+ Register dst,
+ Register length,
+ Register scratch) {
+ Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+
+ // Align src before copying in word size chunks.
+ bind(&align_loop);
+ Branch(&done, eq, length, Operand(zero_reg));
+ bind(&align_loop_1);
+ And(scratch, src, kPointerSize - 1);
+ Branch(&word_loop, eq, scratch, Operand(zero_reg));
+ lbu(scratch, MemOperand(src));
+ Addu(src, src, 1);
+ sb(scratch, MemOperand(dst));
+ Addu(dst, dst, 1);
+ Subu(length, length, Operand(1));
+ Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+
+ // Copy bytes in word size chunks.
+ bind(&word_loop);
+ if (emit_debug_code()) {
+ And(scratch, src, kPointerSize - 1);
+ Assert(eq, "Expecting alignment for CopyBytes",
+ scratch, Operand(zero_reg));
+ }
+ Branch(&byte_loop, lt, length, Operand(kPointerSize));
+ lw(scratch, MemOperand(src));
+ Addu(src, src, kPointerSize);
+
+ // TODO(kalmard) check if this can be optimized to use sw in most cases.
+ // Can't use unaligned access - copy byte by byte.
+ sb(scratch, MemOperand(dst, 0));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 1));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 2));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 3));
+ Addu(dst, dst, 4);
+
+ Subu(length, length, Operand(kPointerSize));
+ Branch(&word_loop);
+
+ // Copy the last bytes if any left.
+ bind(&byte_loop);
+ Branch(&done, eq, length, Operand(zero_reg));
+ bind(&byte_loop_1);
+ lbu(scratch, MemOperand(src));
+ Addu(src, src, 1);
+ sb(scratch, MemOperand(dst));
+ Addu(dst, dst, 1);
+ Subu(length, length, Operand(1));
+ Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+ bind(&done);
+}
+
+
+void MacroAssembler::CheckFastElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
+ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+ lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ li(at, Operand(map));
+ Branch(fail, ne, scratch, Operand(at));
+}
+
+
+void MacroAssembler::DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type) {
+ Label fail;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, &fail);
+ }
+ lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
+ bind(&fail);
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(obj, fail);
+ }
+ lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ LoadRoot(at, index);
+ Branch(fail, ne, scratch, Operand(at));
+}
+
+
+void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+ CpuFeatures::Scope scope(FPU);
+ if (IsMipsSoftFloatABI) {
+ Move(dst, v0, v1);
+ } else {
+ Move(dst, f0); // Reg f0 is o32 ABI FP return value.
+ }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
+ CpuFeatures::Scope scope(FPU);
+ if (!IsMipsSoftFloatABI) {
+ Move(f12, dreg);
+ } else {
+ Move(a0, a1, dreg);
+ }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
+ DoubleRegister dreg2) {
+ CpuFeatures::Scope scope(FPU);
+ if (!IsMipsSoftFloatABI) {
+ if (dreg2.is(f12)) {
+ ASSERT(!dreg1.is(f14));
+ Move(f14, dreg2);
+ Move(f12, dreg1);
+ } else {
+ Move(f12, dreg1);
+ Move(f14, dreg2);
+ }
+ } else {
+ Move(a0, a1, dreg1);
+ Move(a2, a3, dreg2);
+ }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
+ Register reg) {
+ CpuFeatures::Scope scope(FPU);
+ if (!IsMipsSoftFloatABI) {
+ Move(f12, dreg);
+ Move(a2, reg);
+ } else {
+ Move(a2, reg);
+ Move(a0, a1, dreg);
+ }
+}
+
+
+void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
+ // This macro takes the dst register to make the code more readable
+ // at the call sites. However, the dst register has to be t1 to
+ // follow the calling convention which requires the call type to be
+ // in t1.
+ ASSERT(dst.is(t1));
+ if (call_kind == CALL_AS_FUNCTION) {
+ li(dst, Operand(Smi::FromInt(1)));
+ } else {
+ li(dst, Operand(Smi::FromInt(0)));
+ }
}
// -----------------------------------------------------------------------------
-// JavaScript invokes
+// JavaScript invokes.
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
Register code_reg,
Label* done,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
bool definitely_matches = false;
Label regular_invoke;
@@ -950,10 +2964,10 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
} else if (actual.is_immediate()) {
- Branch(eq, &regular_invoke, expected.reg(), Operand(actual.immediate()));
+ Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
li(a0, Operand(actual.immediate()));
} else {
- Branch(eq, &regular_invoke, expected.reg(), Operand(actual.reg()));
+ Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
}
if (!definitely_matches) {
@@ -962,29 +2976,39 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
}
- ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline);
+ Handle<Code> adaptor =
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
- CallBuiltin(adaptor);
- b(done);
- nop();
+ call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+ SetCallKind(t1, call_kind);
+ Call(adaptor, RelocInfo::CODE_TARGET);
+ call_wrapper.AfterCall();
+ jmp(done);
} else {
- JumpToBuiltin(adaptor);
+ SetCallKind(t1, call_kind);
+ Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(&regular_invoke);
}
}
+
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
Label done;
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+ call_wrapper, call_kind);
if (flag == CALL_FUNCTION) {
+ SetCallKind(t1, call_kind);
Call(code);
} else {
ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(t1, call_kind);
Jump(code);
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -997,13 +3021,17 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ CallKind call_kind) {
Label done;
- InvokePrologue(expected, actual, code, no_reg, &done, flag);
+ InvokePrologue(expected, actual, code, no_reg, &done, flag,
+ NullCallWrapper(), call_kind);
if (flag == CALL_FUNCTION) {
+ SetCallKind(t1, call_kind);
Call(code, rmode);
} else {
+ SetCallKind(t1, call_kind);
Jump(code, rmode);
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -1014,7 +3042,9 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
// Contract with called JS functions requires that function is passed in a1.
ASSERT(function.is(a1));
Register expected_reg = a2;
@@ -1025,72 +3055,125 @@ void MacroAssembler::InvokeFunction(Register function,
lw(expected_reg,
FieldMemOperand(code_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
- lw(code_reg,
- MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
- addiu(code_reg, code_reg, Code::kHeaderSize - kHeapObjectTag);
+ sra(expected_reg, expected_reg, kSmiTagSize);
+ lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
+}
+
+
+void MacroAssembler::InvokeFunction(JSFunction* function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ CallKind call_kind) {
+ ASSERT(function->is_compiled());
+
+ // Get the function and setup the context.
+ li(a1, Operand(Handle<JSFunction>(function)));
+ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Invoke the cached code.
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ if (V8::UseCrankshaft()) {
+ UNIMPLEMENTED_MIPS();
+ } else {
+ InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
+ }
+}
+
+
+void MacroAssembler::IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail) {
+ lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
+ IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail) {
+ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+}
+
+
+void MacroAssembler::IsObjectJSStringType(Register object,
+ Register scratch,
+ Label* fail) {
+ ASSERT(kNotStringTag != 0);
+
+ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ And(scratch, scratch, Operand(kIsNotStringMask));
+ Branch(fail, ne, scratch, Operand(zero_reg));
}
// ---------------------------------------------------------------------------
// Support functions.
- void MacroAssembler::GetObjectType(Register function,
- Register map,
- Register type_reg) {
- lw(map, FieldMemOperand(function, HeapObject::kMapOffset));
- lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- }
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss) {
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
- void MacroAssembler::CallBuiltin(ExternalReference builtin_entry) {
- // Load builtin address.
- LoadExternalReference(t9, builtin_entry);
- lw(t9, MemOperand(t9)); // Deref address.
- addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
- // Call and allocate arguments slots.
- jalr(t9);
- // Use the branch delay slot to allocated argument slots.
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
- addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
- }
+ // Check that the function really is a function. Load map into result reg.
+ GetObjectType(function, result, scratch);
+ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+ Branch(&non_instance, ne, scratch, Operand(zero_reg));
- void MacroAssembler::CallBuiltin(Register target) {
- // Target already holds target address.
- // Call and allocate arguments slots.
- jalr(target);
- // Use the branch delay slot to allocated argument slots.
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
- addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
- }
+ // Get the prototype or initial map from the function.
+ lw(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ // If the prototype or initial map is the hole, don't return it and
+ // simply miss the cache instead. This will allow us to allocate a
+ // prototype object on-demand in the runtime system.
+ LoadRoot(t8, Heap::kTheHoleValueRootIndex);
+ Branch(miss, eq, result, Operand(t8));
- void MacroAssembler::JumpToBuiltin(ExternalReference builtin_entry) {
- // Load builtin address.
- LoadExternalReference(t9, builtin_entry);
- lw(t9, MemOperand(t9)); // Deref address.
- addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
- // Call and allocate arguments slots.
- jr(t9);
- // Use the branch delay slot to allocated argument slots.
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
- }
+ // If the function does not have an initial map, we're done.
+ Label done;
+ GetObjectType(result, scratch, scratch);
+ Branch(&done, ne, scratch, Operand(MAP_TYPE));
+ // Get the prototype from the initial map.
+ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ jmp(&done);
- void MacroAssembler::JumpToBuiltin(Register target) {
- // t9 already holds target address.
- // Call and allocate arguments slots.
- jr(t9);
- // Use the branch delay slot to allocated argument slots.
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
- }
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ bind(&done);
+}
+
+
+void MacroAssembler::GetObjectType(Register object,
+ Register map,
+ Register type_reg) {
+ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+}
// -----------------------------------------------------------------------------
-// Runtime calls
+// Runtime calls.
void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
Register r1, const Operand& r2) {
@@ -1099,8 +3182,134 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
}
-void MacroAssembler::StubReturn(int argc) {
- UNIMPLEMENTED_MIPS();
+MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
+ Register r1, const Operand& r2) {
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ Object* result;
+ { MaybeObject* maybe_result = stub->TryGetCode();
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
+ return result;
+}
+
+
+void MacroAssembler::TailCallStub(CodeStub* stub) {
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
+ Condition cond,
+ Register r1,
+ const Operand& r2) {
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ Object* result;
+ { MaybeObject* maybe_result = stub->TryGetCode();
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
+ return result;
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ return ref0.address() - ref1.address();
+}
+
+
+MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
+ ExternalReference function, int stack_space) {
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address();
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(),
+ next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(),
+ next_address);
+
+ // Allocate HandleScope in callee-save registers.
+ li(s3, Operand(next_address));
+ lw(s0, MemOperand(s3, kNextOffset));
+ lw(s1, MemOperand(s3, kLimitOffset));
+ lw(s2, MemOperand(s3, kLevelOffset));
+ Addu(s2, s2, Operand(1));
+ sw(s2, MemOperand(s3, kLevelOffset));
+
+ // The O32 ABI requires us to pass a pointer in a0 where the returned struct
+ // (4 bytes) will be placed. This is also built into the Simulator.
+ // Set up the pointer to the returned value (a0). It was allocated in
+ // EnterExitFrame.
+ addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
+
+ // Native call returns to the DirectCEntry stub which redirects to the
+ // return address pushed on stack (could have moved after GC).
+ // DirectCEntry stub itself is generated early and never moves.
+ DirectCEntryStub stub;
+ stub.GenerateCall(this, function);
+
+ // As mentioned above, on MIPS a pointer is returned - we need to dereference
+ // it to get the actual return value (which is also a pointer).
+ lw(v0, MemOperand(v0));
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+
+ // If result is non-zero, dereference to get the result value
+ // otherwise set it to undefined.
+ Label skip;
+ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ Branch(&skip, eq, v0, Operand(zero_reg));
+ lw(a0, MemOperand(v0));
+ bind(&skip);
+ mov(v0, a0);
+
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ sw(s0, MemOperand(s3, kNextOffset));
+ if (emit_debug_code()) {
+ lw(a1, MemOperand(s3, kLevelOffset));
+ Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
+ }
+ Subu(s2, s2, Operand(1));
+ sw(s2, MemOperand(s3, kLevelOffset));
+ lw(at, MemOperand(s3, kLimitOffset));
+ Branch(&delete_allocated_handles, ne, s1, Operand(at));
+
+ // Check if the function scheduled an exception.
+ bind(&leave_exit_frame);
+ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
+ lw(t1, MemOperand(at));
+ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
+ li(s0, Operand(stack_space));
+ LeaveExitFrame(false, s0);
+ Ret();
+
+ bind(&promote_scheduled_exception);
+ MaybeObject* result = TryTailCallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1);
+ if (result->IsFailure()) {
+ return result;
+ }
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ bind(&delete_allocated_handles);
+ sw(s1, MemOperand(s3, kLimitOffset));
+ mov(s0, v0);
+ mov(a0, v0);
+ PrepareCallCFunction(1, s1);
+ li(a0, Operand(ExternalReference::isolate_address()));
+ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
+ 1);
+ mov(v0, s0);
+ jmp(&leave_exit_frame);
+
+ return result;
}
@@ -1112,7 +3321,138 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
}
-void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+void MacroAssembler::IndexFromHash(Register hash,
+ Register index) {
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ STATIC_ASSERT(kSmiTag == 0);
+ Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
+ sll(index, hash, kSmiTagSize);
+}
+
+
+void MacroAssembler::ObjectToDoubleFPURegister(Register object,
+ FPURegister result,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* not_number,
+ ObjectToDoubleFlags flags) {
+ Label done;
+ if ((flags & OBJECT_NOT_SMI) == 0) {
+ Label not_smi;
+ JumpIfNotSmi(object, &not_smi);
+ // Remove smi tag and convert to double.
+ sra(scratch1, object, kSmiTagSize);
+ mtc1(scratch1, result);
+ cvt_d_w(result, result);
+ Branch(&done);
+ bind(&not_smi);
+ }
+ // Check for heap number and load double value from it.
+ lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
+ Branch(not_number, ne, scratch1, Operand(heap_number_map));
+
+ if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
+ // If exponent is all ones the number is either a NaN or +/-Infinity.
+ Register exponent = scratch1;
+ Register mask_reg = scratch2;
+ lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ li(mask_reg, HeapNumber::kExponentMask);
+
+ And(exponent, exponent, mask_reg);
+ Branch(not_number, eq, exponent, Operand(mask_reg));
+ }
+ ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
+ bind(&done);
+}
+
+
+void MacroAssembler::SmiToDoubleFPURegister(Register smi,
+ FPURegister value,
+ Register scratch1) {
+ sra(scratch1, smi, kSmiTagSize);
+ mtc1(scratch1, value);
+ cvt_d_w(value, value);
+}
+
+
+void MacroAssembler::AdduAndCheckForOverflow(Register dst,
+ Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch) {
+ ASSERT(!dst.is(overflow_dst));
+ ASSERT(!dst.is(scratch));
+ ASSERT(!overflow_dst.is(scratch));
+ ASSERT(!overflow_dst.is(left));
+ ASSERT(!overflow_dst.is(right));
+ ASSERT(!left.is(right));
+
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ addu(dst, left, right); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ xor_(overflow_dst, dst, right);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ addu(dst, left, right); // Right is overwritten.
+ xor_(scratch, dst, scratch); // Original right.
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ addu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, dst, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+}
+
+
+void MacroAssembler::SubuAndCheckForOverflow(Register dst,
+ Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch) {
+ ASSERT(!dst.is(overflow_dst));
+ ASSERT(!dst.is(scratch));
+ ASSERT(!overflow_dst.is(scratch));
+ ASSERT(!overflow_dst.is(left));
+ ASSERT(!overflow_dst.is(right));
+ ASSERT(!left.is(right));
+ ASSERT(!scratch.is(left));
+ ASSERT(!scratch.is(right));
+
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ subu(dst, left, right); // Left is overwritten.
+ xor_(overflow_dst, dst, scratch); // scratch is original left.
+ xor_(scratch, scratch, right); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ subu(dst, left, right); // Right is overwritten.
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, scratch); // Original right.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else {
+ subu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+}
+
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
// All parameters are on the stack. v0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -1128,101 +3468,294 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// should remove this need and make the runtime routine entry code
// smarter.
li(a0, num_arguments);
- LoadExternalReference(a1, ExternalReference(f));
+ li(a1, Operand(ExternalReference(f, isolate())));
CEntryStub stub(1);
CallStub(&stub);
}
+void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ li(a0, Operand(function->nargs));
+ li(a1, Operand(ExternalReference(function, isolate())));
+ CEntryStub stub(1);
+ stub.SaveDoubles();
+ CallStub(&stub);
+}
+
+
void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments);
}
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+ int num_arguments) {
+ li(a0, Operand(num_arguments));
+ li(a1, Operand(ext));
+
+ CEntryStub stub(1);
+ CallStub(&stub);
+}
+
+
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size) {
- UNIMPLEMENTED_MIPS();
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ li(a0, Operand(num_arguments));
+ JumpToExternalReference(ext);
+}
+
+
+MaybeObject* MacroAssembler::TryTailCallExternalReference(
+ const ExternalReference& ext, int num_arguments, int result_size) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ li(a0, num_arguments);
+ return TryJumpToExternalReference(ext);
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
- TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
}
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
- UNIMPLEMENTED_MIPS();
+ li(a1, Operand(builtin));
+ CEntryStub stub(1);
+ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
- bool* resolved) {
- UNIMPLEMENTED_MIPS();
- return Handle<Code>(reinterpret_cast<Code*>(NULL)); // UNIMPLEMENTED RETURN
+MaybeObject* MacroAssembler::TryJumpToExternalReference(
+ const ExternalReference& builtin) {
+ li(a1, Operand(builtin));
+ CEntryStub stub(1);
+ return TryTailCallStub(&stub);
}
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags) {
- UNIMPLEMENTED_MIPS();
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
+ GetBuiltinEntry(t9, id);
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(t9));
+ SetCallKind(t1, CALL_AS_METHOD);
+ Call(t9);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(t1, CALL_AS_METHOD);
+ Jump(t9);
+ }
+}
+
+
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
+ // Load the builtins object into target register.
+ lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+ // Load the JavaScript builtin function from the builtins object.
+ lw(target, FieldMemOperand(target,
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
}
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(!target.is(a1));
+ GetBuiltinFunction(a1, id);
+ // Load the code entry point from the builtins object.
+ lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
}
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- UNIMPLEMENTED_MIPS();
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ li(scratch1, Operand(value));
+ li(scratch2, Operand(ExternalReference(counter)));
+ sw(scratch1, MemOperand(scratch2));
+ }
}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ li(scratch2, Operand(ExternalReference(counter)));
+ lw(scratch1, MemOperand(scratch2));
+ Addu(scratch1, scratch1, Operand(value));
+ sw(scratch1, MemOperand(scratch2));
+ }
}
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ li(scratch2, Operand(ExternalReference(counter)));
+ lw(scratch1, MemOperand(scratch2));
+ Subu(scratch1, scratch1, Operand(value));
+ sw(scratch1, MemOperand(scratch2));
+ }
}
// -----------------------------------------------------------------------------
-// Debugging
+// Debugging.
void MacroAssembler::Assert(Condition cc, const char* msg,
Register rs, Operand rt) {
- UNIMPLEMENTED_MIPS();
+ if (emit_debug_code())
+ Check(cc, msg, rs, rt);
+}
+
+
+void MacroAssembler::AssertRegisterIsRoot(Register reg,
+ Heap::RootListIndex index) {
+ if (emit_debug_code()) {
+ LoadRoot(at, index);
+ Check(eq, "Register did not match expected root", reg, Operand(at));
+ }
+}
+
+
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (emit_debug_code()) {
+ ASSERT(!elements.is(at));
+ Label ok;
+ push(elements);
+ lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
+ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ Branch(&ok, eq, elements, Operand(at));
+ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
+ Branch(&ok, eq, elements, Operand(at));
+ Abort("JSObject with fast elements map has slow elements");
+ bind(&ok);
+ pop(elements);
+ }
}
void MacroAssembler::Check(Condition cc, const char* msg,
Register rs, Operand rt) {
- UNIMPLEMENTED_MIPS();
+ Label L;
+ Branch(&L, cc, rs, rt);
+ Abort(msg);
+ // Will not return here.
+ bind(&L);
}
void MacroAssembler::Abort(const char* msg) {
- UNIMPLEMENTED_MIPS();
+ Label abort_start;
+ bind(&abort_start);
+ // We want to pass the msg string like a smi to avoid GC
+ // problems, however msg is not guaranteed to be aligned
+ // properly. Instead, we pass an aligned pointer that is
+ // a proper v8 smi, but also pass the alignment difference
+ // from the real pointer as a smi.
+ intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+ intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+ ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
+ if (msg != NULL) {
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
+#endif
+ // Disable stub call restrictions to always allow calls to abort.
+ AllowStubCallsScope allow_scope(this, true);
+
+ li(a0, Operand(p0));
+ push(a0);
+ li(a0, Operand(Smi::FromInt(p1 - p0)));
+ push(a0);
+ CallRuntime(Runtime::kAbort, 2);
+ // Will not return here.
+ if (is_trampoline_pool_blocked()) {
+ // If the calling code cares about the exact number of
+ // instructions generated, we insert padding here to keep the size
+ // of the Abort macro constant.
+ // Currently in debug mode with debug_code enabled the number of
+ // generated instructions is 14, so we use this as a maximum value.
+ static const int kExpectedAbortInstructions = 14;
+ int abort_instructions = InstructionsGeneratedSince(&abort_start);
+ ASSERT(abort_instructions <= kExpectedAbortInstructions);
+ while (abort_instructions++ < kExpectedAbortInstructions) {
+ nop();
+ }
+ }
+}
+
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ for (int i = 1; i < context_chain_length; i++) {
+ lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ }
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in esi).
+ Move(dst, cp);
+ }
+}
+
+
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+ // Load the global or builtins object from the current context.
+ lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Load the global context from the global or builtins object.
+ lw(function, FieldMemOperand(function,
+ GlobalObject::kGlobalContextOffset));
+ // Load the function from the global context.
+ lw(function, MemOperand(function, Context::SlotOffset(index)));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch) {
+ // Load the initial map. The global functions all have initial maps.
+ lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (emit_debug_code()) {
+ Label ok, fail;
+ CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
+ Branch(&ok);
+ bind(&fail);
+ Abort("Global functions must have initial map");
+ bind(&ok);
+ }
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
addiu(sp, sp, -5 * kPointerSize);
- li(t0, Operand(Smi::FromInt(type)));
- li(t1, Operand(CodeObject()));
+ li(t8, Operand(Smi::FromInt(type)));
+ li(t9, Operand(CodeObject()));
sw(ra, MemOperand(sp, 4 * kPointerSize));
sw(fp, MemOperand(sp, 3 * kPointerSize));
sw(cp, MemOperand(sp, 2 * kPointerSize));
- sw(t0, MemOperand(sp, 1 * kPointerSize));
- sw(t1, MemOperand(sp, 0 * kPointerSize));
+ sw(t8, MemOperand(sp, 1 * kPointerSize));
+ sw(t9, MemOperand(sp, 0 * kPointerSize));
addiu(fp, sp, 3 * kPointerSize);
}
@@ -1235,92 +3768,474 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
}
-void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode,
- Register hold_argc,
- Register hold_argv,
- Register hold_function) {
- // Compute the argv pointer and keep it in a callee-saved register.
- // a0 is argc.
- sll(t0, a0, kPointerSizeLog2);
- add(hold_argv, sp, t0);
- addi(hold_argv, hold_argv, -kPointerSize);
-
- // Compute callee's stack pointer before making changes and save it as
- // t1 register so that it is restored as sp register on exit, thereby
- // popping the args.
- // t1 = sp + kPointerSize * #args
- add(t1, sp, t0);
+void MacroAssembler::EnterExitFrame(bool save_doubles,
+ int stack_space) {
+ // Setup the frame structure on the stack.
+ STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
+ STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
+ STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
- // Align the stack at this point.
- AlignStack(0);
+ // This is how the stack will look:
+ // fp + 2 (==kCallerSPDisplacement) - old stack's end
+ // [fp + 1 (==kCallerPCOffset)] - saved old ra
+ // [fp + 0 (==kCallerFPOffset)] - saved old fp
+ // [fp - 1 (==kSPOffset)] - sp of the called function
+ // [fp - 2 (==kCodeOffset)] - CodeObject
+ // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
+ // new stack (will contain saved ra)
// Save registers.
- addiu(sp, sp, -12);
- sw(t1, MemOperand(sp, 8));
- sw(ra, MemOperand(sp, 4));
- sw(fp, MemOperand(sp, 0));
- mov(fp, sp); // Setup new frame pointer.
-
- // Push debug marker.
- if (mode == ExitFrame::MODE_DEBUG) {
- Push(zero_reg);
- } else {
- li(t0, Operand(CodeObject()));
- Push(t0);
+ addiu(sp, sp, -4 * kPointerSize);
+ sw(ra, MemOperand(sp, 3 * kPointerSize));
+ sw(fp, MemOperand(sp, 2 * kPointerSize));
+ addiu(fp, sp, 2 * kPointerSize); // Setup new frame pointer.
+
+ if (emit_debug_code()) {
+ sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
+ li(t8, Operand(CodeObject())); // Accessed from ExitFrame::code_slot.
+ sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
+
// Save the frame pointer and the context in top.
- LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
- sw(fp, MemOperand(t0));
- LoadExternalReference(t0, ExternalReference(Top::k_context_address));
- sw(cp, MemOperand(t0));
+ li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
+ sw(fp, MemOperand(t8));
+ li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
+ sw(cp, MemOperand(t8));
+
+ const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ if (save_doubles) {
+ // The stack must be allign to 0 modulo 8 for stores with sdc1.
+ ASSERT(kDoubleSize == frame_alignment);
+ if (frame_alignment > 0) {
+ ASSERT(IsPowerOf2(frame_alignment));
+ And(sp, sp, Operand(-frame_alignment)); // Align stack.
+ }
+ int space = FPURegister::kNumRegisters * kDoubleSize;
+ Subu(sp, sp, Operand(space));
+ // Remember: we only need to save every 2nd double FPU value.
+ for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+ FPURegister reg = FPURegister::from_code(i);
+ sdc1(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ }
+
+ // Reserve place for the return address, stack space and an optional slot
+ // (used by the DirectCEntryStub to hold the return value if a struct is
+ // returned) and align the frame preparing for calling the runtime function.
+ ASSERT(stack_space >= 0);
+ Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
+ if (frame_alignment > 0) {
+ ASSERT(IsPowerOf2(frame_alignment));
+ And(sp, sp, Operand(-frame_alignment)); // Align stack.
+ }
- // Setup argc and the builtin function in callee-saved registers.
- mov(hold_argc, a0);
- mov(hold_function, a1);
+ // Set the exit frame sp value to point just before the return address
+ // location.
+ addiu(at, sp, kPointerSize);
+ sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles,
+ Register argument_count) {
+ // Optionally restore all double registers.
+ if (save_doubles) {
+ // Remember: we only need to restore every 2nd double FPU value.
+ lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+ FPURegister reg = FPURegister::from_code(i);
+ ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
+ }
+ }
+
// Clear top frame.
- LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
- sw(zero_reg, MemOperand(t0));
+ li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
+ sw(zero_reg, MemOperand(t8));
// Restore current context from top and clear it in debug mode.
- LoadExternalReference(t0, ExternalReference(Top::k_context_address));
- lw(cp, MemOperand(t0));
+ li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
+ lw(cp, MemOperand(t8));
#ifdef DEBUG
- sw(a3, MemOperand(t0));
+ sw(a3, MemOperand(t8));
#endif
// Pop the arguments, restore registers, and return.
mov(sp, fp); // Respect ABI stack constraint.
- lw(fp, MemOperand(sp, 0));
- lw(ra, MemOperand(sp, 4));
- lw(sp, MemOperand(sp, 8));
- jr(ra);
- nop(); // Branch delay slot nop.
-}
-
-
-void MacroAssembler::AlignStack(int offset) {
- // On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
- // and an offset of 1 aligns to 4 modulo 8 bytes.
- int activation_frame_alignment = OS::ActivationFrameAlignment();
- if (activation_frame_alignment != kPointerSize) {
- // This code needs to be made more general if this assert doesn't hold.
- ASSERT(activation_frame_alignment == 2 * kPointerSize);
- if (offset == 0) {
- andi(t0, sp, activation_frame_alignment - 1);
- Push(zero_reg, eq, t0, zero_reg);
- } else {
- andi(t0, sp, activation_frame_alignment - 1);
- addiu(t0, t0, -4);
- Push(zero_reg, eq, t0, zero_reg);
+ lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
+ lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
+ addiu(sp, sp, 8);
+ if (argument_count.is_valid()) {
+ sll(t8, argument_count, kPointerSizeLog2);
+ addu(sp, sp, t8);
+ }
+}
+
+
+void MacroAssembler::InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2) {
+ sll(scratch1, length, kSmiTagSize);
+ LoadRoot(scratch2, map_index);
+ sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
+ li(scratch1, Operand(String::kEmptyHashField));
+ sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
+ sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
+}
+
+
+int MacroAssembler::ActivationFrameAlignment() {
+#if defined(V8_HOST_ARCH_MIPS)
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one Mips
+ // platform for another Mips platform with a different alignment.
+ return OS::ActivationFrameAlignment();
+#else // defined(V8_HOST_ARCH_MIPS)
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so this is controlled from a
+ // flag.
+ return FLAG_sim_stack_alignment;
+#endif // defined(V8_HOST_ARCH_MIPS)
+}
+
+
+void MacroAssembler::AssertStackIsAligned() {
+ if (emit_debug_code()) {
+ const int frame_alignment = ActivationFrameAlignment();
+ const int frame_alignment_mask = frame_alignment - 1;
+
+ if (frame_alignment > kPointerSize) {
+ Label alignment_as_expected;
+ ASSERT(IsPowerOf2(frame_alignment));
+ andi(at, sp, frame_alignment_mask);
+ Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
+ // Don't use Check here, as it will call Runtime_Abort re-entering here.
+ stop("Unexpected stack alignment");
+ bind(&alignment_as_expected);
+ }
}
+}
+
+
+void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
+ Register reg,
+ Register scratch,
+ Label* not_power_of_two_or_zero) {
+ Subu(scratch, reg, Operand(1));
+ Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
+ scratch, Operand(zero_reg));
+ and_(at, scratch, reg); // In the delay slot.
+ Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register reg1,
+ Register reg2,
+ Label* on_not_both_smi) {
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(1, kSmiTagMask);
+ or_(at, reg1, reg2);
+ andi(at, at, kSmiTagMask);
+ Branch(on_not_both_smi, ne, at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::JumpIfEitherSmi(Register reg1,
+ Register reg2,
+ Label* on_either_smi) {
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(1, kSmiTagMask);
+ // Both Smi tags must be 1 (not Smi).
+ and_(at, reg1, reg2);
+ andi(at, at, kSmiTagMask);
+ Branch(on_either_smi, eq, at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::AbortIfSmi(Register object) {
+ STATIC_ASSERT(kSmiTag == 0);
+ andi(at, object, kSmiTagMask);
+ Assert(ne, "Operand is a smi", at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::AbortIfNotSmi(Register object) {
+ STATIC_ASSERT(kSmiTag == 0);
+ andi(at, object, kSmiTagMask);
+ Assert(eq, "Operand is a smi", at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::AbortIfNotString(Register object) {
+ STATIC_ASSERT(kSmiTag == 0);
+ And(t0, object, Operand(kSmiTagMask));
+ Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
+ push(object);
+ lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
+ Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
+ pop(object);
+}
+
+
+void MacroAssembler::AbortIfNotRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message) {
+ ASSERT(!src.is(at));
+ LoadRoot(at, root_value_index);
+ Assert(eq, message, src, Operand(at));
+}
+
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+ Register heap_number_map,
+ Register scratch,
+ Label* on_not_heap_number) {
+ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
+}
+
+
+void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Test that both first and second are sequential ASCII strings.
+ // Assume that they are non-smis.
+ lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+ lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+
+ JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
+ scratch2,
+ scratch1,
+ scratch2,
+ failure);
+}
+
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Check that neither is a smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ And(scratch1, first, Operand(second));
+ And(scratch1, scratch1, Operand(kSmiTagMask));
+ Branch(failure, eq, scratch1, Operand(zero_reg));
+ JumpIfNonSmisNotBothSequentialAsciiStrings(first,
+ second,
+ scratch1,
+ scratch2,
+ failure);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
+ andi(scratch1, first, kFlatAsciiStringMask);
+ Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
+ andi(scratch2, second, kFlatAsciiStringMask);
+ Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure) {
+ int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ And(scratch, type, Operand(kFlatAsciiStringMask));
+ Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
+}
+
+
+static const int kRegisterPassedArguments = 4;
+
+void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+ int frame_alignment = ActivationFrameAlignment();
+
+ // Up to four simple arguments are passed in registers a0..a3.
+ // Those four arguments must have reserved argument slots on the stack for
+ // mips, even though those argument slots are not normally used.
+ // Remaining arguments are pushed on the stack, above (higher address than)
+ // the argument slots.
+ ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
+ int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
+ 0 : num_arguments - kRegisterPassedArguments) +
+ (StandardFrameConstants::kCArgsSlotsSize /
+ kPointerSize);
+ if (frame_alignment > kPointerSize) {
+ // Make stack end at alignment and make room for num_arguments - 4 words
+ // and the original value of sp.
+ mov(scratch, sp);
+ Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
+ ASSERT(IsPowerOf2(frame_alignment));
+ And(sp, sp, Operand(-frame_alignment));
+ sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
}
}
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ CallCFunctionHelper(no_reg, function, t8, num_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ Register scratch,
+ int num_arguments) {
+ CallCFunctionHelper(function,
+ ExternalReference::the_hole_value_location(isolate()),
+ scratch,
+ num_arguments);
+}
+
+
+void MacroAssembler::CallCFunctionHelper(Register function,
+ ExternalReference function_reference,
+ Register scratch,
+ int num_arguments) {
+ // Make sure that the stack is aligned before calling a C function unless
+ // running in the simulator. The simulator has its own alignment check which
+ // provides more information.
+ // The argument stots are presumed to have been set up by
+ // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
+
+#if defined(V8_HOST_ARCH_MIPS)
+ if (emit_debug_code()) {
+ int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment_mask = frame_alignment - 1;
+ if (frame_alignment > kPointerSize) {
+ ASSERT(IsPowerOf2(frame_alignment));
+ Label alignment_as_expected;
+ And(at, sp, Operand(frame_alignment_mask));
+ Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
+ // Don't use Check here, as it will call Runtime_Abort possibly
+ // re-entering here.
+ stop("Unexpected alignment in CallCFunction");
+ bind(&alignment_as_expected);
+ }
+ }
+#endif // V8_HOST_ARCH_MIPS
+
+ // Just call directly. The function called cannot cause a GC, or
+ // allow preemption, so the return address in the link register
+ // stays correct.
+
+ if (function.is(no_reg)) {
+ function = t9;
+ li(function, Operand(function_reference));
+ } else if (!function.is(t9)) {
+ mov(t9, function);
+ function = t9;
+ }
+
+ Call(function);
+
+ ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
+ int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
+ 0 : num_arguments - kRegisterPassedArguments) +
+ (StandardFrameConstants::kCArgsSlotsSize /
+ kPointerSize);
+
+ if (OS::ActivationFrameAlignment() > kPointerSize) {
+ lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
+ }
+}
+
+
+#undef BRANCH_ARGS_CHECK
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+ Register descriptors) {
+ lw(descriptors,
+ FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
+ Label not_smi;
+ JumpIfNotSmi(descriptors, &not_smi);
+ li(descriptors, Operand(FACTORY->empty_descriptor_array()));
+ bind(&not_smi);
+}
+
+
+CodePatcher::CodePatcher(byte* address, int instructions)
+ : address_(address),
+ instructions_(instructions),
+ size_(instructions * Assembler::kInstrSize),
+ masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
+ // Create a new macro assembler pointing to the address of the code to patch.
+ // The size is adjusted with kGap on order for the assembler to generate size
+ // bytes of instructions without failing with buffer size constraints.
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+ // Indicate that code has changed.
+ CPU::FlushICache(address_, size_);
+
+ // Check that the code was patched as expected.
+ ASSERT(masm_.pc_ == address_ + size_);
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+void CodePatcher::Emit(Instr instr) {
+ masm()->emit(instr);
+}
+
+
+void CodePatcher::Emit(Address addr) {
+ masm()->emit(reinterpret_cast<Instr>(addr));
+}
+
+
+void CodePatcher::ChangeBranchCondition(Condition cond) {
+ Instr instr = Assembler::instr_at(masm_.pc_);
+ ASSERT(Assembler::IsBranch(instr));
+ uint32_t opcode = Assembler::GetOpcodeField(instr);
+ // Currently only the 'eq' and 'ne' cond values are supported and the simple
+ // branch instructions (with opcode being the branch type).
+ // There are some special cases (see Assembler::IsBranch()) so extending this
+ // would be tricky.
+ ASSERT(opcode == BEQ ||
+ opcode == BNE ||
+ opcode == BLEZ ||
+ opcode == BGTZ ||
+ opcode == BEQL ||
+ opcode == BNEL ||
+ opcode == BLEZL ||
+ opcode == BGTZL);
+ opcode = (cond == eq) ? BEQ : BNE;
+ instr = (instr & ~kOpcodeMask) | opcode;
+ masm_.emit(instr);
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 0f0365b7c..985ef0c83 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,6 +30,7 @@
#include "assembler.h"
#include "mips/assembler-mips.h"
+#include "v8globals.h"
namespace v8 {
namespace internal {
@@ -37,76 +38,205 @@ namespace internal {
// Forward declaration.
class JumpTarget;
-// Register at is used for instruction generation. So it is not safe to use it
-// unless we know exactly what we do.
+// Reserved Register Usage Summary.
+//
+// Registers t8, t9, and at are reserved for use by the MacroAssembler.
+//
+// The programmer should know that the MacroAssembler may clobber these three,
+// but won't touch other registers except in special cases.
+//
+// Per the MIPS ABI, register t9 must be used for indirect function call
+// via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
+// trying to update gp register for position-independent-code. Whenever
+// MIPS generated code calls C code, it must be via t9 register.
// Registers aliases
// cp is assumed to be a callee saved register.
-const Register cp = s7; // JavaScript context pointer
-const Register fp = s8_fp; // Alias fp
+const Register roots = s6; // Roots array pointer.
+const Register cp = s7; // JavaScript context pointer.
+const Register fp = s8_fp; // Alias for fp.
+// Registers used for condition evaluation.
+const Register condReg1 = s4;
+const Register condReg2 = s5;
+
+
+// Flags used for the AllocateInNewSpace functions.
+enum AllocationFlags {
+ // No special flags.
+ NO_ALLOCATION_FLAGS = 0,
+ // Return the pointer to the allocated already tagged as a heap object.
+ TAG_OBJECT = 1 << 0,
+ // The content of the result register already contains the allocation top in
+ // new space.
+ RESULT_CONTAINS_TOP = 1 << 1,
+ // Specify that the requested size of the space to allocate is specified in
+ // words instead of bytes.
+ SIZE_IN_WORDS = 1 << 2
+};
+
+// Flags used for the ObjectToDoubleFPURegister function.
+enum ObjectToDoubleFlags {
+ // No special flags.
+ NO_OBJECT_TO_DOUBLE_FLAGS = 0,
+ // Object is known to be a non smi.
+ OBJECT_NOT_SMI = 1 << 0,
+ // Don't load NaNs or infinities, branch to the non number case instead.
+ AVOID_NANS_AND_INFINITIES = 1 << 1
+};
-enum InvokeJSFlags {
- CALL_JS,
- JUMP_JS
+// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
+enum BranchDelaySlot {
+ USE_DELAY_SLOT,
+ PROTECT
};
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- MacroAssembler(void* buffer, int size);
+ // The isolate parameter can be NULL if the macro assembler should
+ // not use isolate-dependent functionality. In this case, it's the
+ // responsibility of the caller to never invoke such function on the
+ // macro assembler.
+ MacroAssembler(Isolate* isolate, void* buffer, int size);
+
+// Arguments macros.
+#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
+#define COND_ARGS cond, r1, r2
+
+// Prototypes.
+
+// Prototypes for functions with no target (eg Ret()).
+#define DECLARE_NOTARGET_PROTOTYPE(Name) \
+ void Name(BranchDelaySlot bd = PROTECT); \
+ void Name(COND_TYPED_ARGS, BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, COND_TYPED_ARGS) { \
+ Name(COND_ARGS, bd); \
+ }
- // Jump, Call, and Ret pseudo instructions implementing inter-working.
- void Jump(const Operand& target,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Call(const Operand& target,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Jump(Register target,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Jump(byte* target, RelocInfo::Mode rmode,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Jump(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Call(Register target,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Call(byte* target, RelocInfo::Mode rmode,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Call(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Ret(Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Branch(Condition cond, int16_t offset, Register rs = zero_reg,
- const Operand& rt = Operand(zero_reg), Register scratch = at);
- void Branch(Condition cond, Label* L, Register rs = zero_reg,
- const Operand& rt = Operand(zero_reg), Register scratch = at);
- // conditionnal branch and link
- void BranchAndLink(Condition cond, int16_t offset, Register rs = zero_reg,
- const Operand& rt = Operand(zero_reg),
- Register scratch = at);
- void BranchAndLink(Condition cond, Label* L, Register rs = zero_reg,
- const Operand& rt = Operand(zero_reg),
- Register scratch = at);
+// Prototypes for functions with a target.
+
+// Cases when relocation may be needed.
+#define DECLARE_RELOC_PROTOTYPE(Name, target_type) \
+ void Name(target_type target, \
+ RelocInfo::Mode rmode, \
+ BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, \
+ target_type target, \
+ RelocInfo::Mode rmode) { \
+ Name(target, rmode, bd); \
+ } \
+ void Name(target_type target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS, \
+ BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, \
+ target_type target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS) { \
+ Name(target, rmode, COND_ARGS, bd); \
+ }
+
+// Cases when relocation is not needed.
+#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
+ void Name(target_type target, BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, target_type target) { \
+ Name(target, bd); \
+ } \
+ void Name(target_type target, \
+ COND_TYPED_ARGS, \
+ BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, \
+ target_type target, \
+ COND_TYPED_ARGS) { \
+ Name(target, COND_ARGS, bd); \
+ }
+
+// Target prototypes.
+
+#define DECLARE_JUMP_CALL_PROTOTYPES(Name) \
+ DECLARE_NORELOC_PROTOTYPE(Name, Register) \
+ DECLARE_NORELOC_PROTOTYPE(Name, const Operand&) \
+ DECLARE_RELOC_PROTOTYPE(Name, byte*) \
+ DECLARE_RELOC_PROTOTYPE(Name, Handle<Code>)
+
+#define DECLARE_BRANCH_PROTOTYPES(Name) \
+ DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
+ DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
+
+
+DECLARE_JUMP_CALL_PROTOTYPES(Jump)
+DECLARE_JUMP_CALL_PROTOTYPES(Call)
+
+DECLARE_BRANCH_PROTOTYPES(Branch)
+DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
+
+DECLARE_NOTARGET_PROTOTYPE(Ret)
+
+#undef COND_TYPED_ARGS
+#undef COND_ARGS
+#undef DECLARE_NOTARGET_PROTOTYPE
+#undef DECLARE_NORELOC_PROTOTYPE
+#undef DECLARE_RELOC_PROTOTYPE
+#undef DECLARE_JUMP_CALL_PROTOTYPES
+#undef DECLARE_BRANCH_PROTOTYPES
+
+ void CallWithAstId(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ unsigned ast_id = kNoASTId,
+ Condition cond = al,
+ Register r1 = zero_reg,
+ const Operand& r2 = Operand(zero_reg));
+
+ int CallSize(Register reg);
+ int CallSize(Handle<Code> code, RelocInfo::Mode rmode);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
- void Drop(int count, Condition cond = cc_always);
+ void Drop(int count,
+ Condition cond = cc_always,
+ Register reg = no_reg,
+ const Operand& op = Operand(no_reg));
+
+ void DropAndRet(int drop = 0,
+ Condition cond = cc_always,
+ Register reg = no_reg,
+ const Operand& op = Operand(no_reg));
+
+ // Swap two registers. If the scratch register is omitted then a slightly
+ // less efficient form using xor instead of mov is emitted.
+ void Swap(Register reg1, Register reg2, Register scratch = no_reg);
void Call(Label* target);
+ inline void Move(Register dst, Register src) {
+ if (!dst.is(src)) {
+ mov(dst, src);
+ }
+ }
+
+ inline void Move(FPURegister dst, FPURegister src) {
+ if (!dst.is(src)) {
+ mov_d(dst, src);
+ }
+ }
+
+ inline void Move(Register dst_low, Register dst_high, FPURegister src) {
+ mfc1(dst_low, src);
+ mfc1(dst_high, FPURegister::from_code(src.code() + 1));
+ }
+
+ inline void Move(FPURegister dst, Register src_low, Register src_high) {
+ mtc1(src_low, dst);
+ mtc1(src_high, FPURegister::from_code(dst.code() + 1));
+ }
+
// Jump unconditionally to given label.
// We NEED a nop in the branch delay slot, as it used by v8, for example in
// CodeGenerator::ProcessDeferred().
// Currently the branch delay slot is filled by the MacroAssembler.
// Use rather b(Label) for code generation.
void jmp(Label* L) {
- Branch(cc_always, L);
+ Branch(L);
}
// Load an object from the root table.
@@ -116,19 +246,164 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond, Register src1, const Operand& src2);
- // Load an external reference.
- void LoadExternalReference(Register reg, ExternalReference ext) {
- li(reg, Operand(ext));
+ // Store an object to the root table.
+ void StoreRoot(Register source,
+ Heap::RootListIndex index);
+ void StoreRoot(Register source,
+ Heap::RootListIndex index,
+ Condition cond, Register src1, const Operand& src2);
+
+
+ // Check if object is in new space.
+ // scratch can be object itself, but it will be clobbered.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cc, // eq for new space, ne otherwise.
+ Label* branch);
+
+
+ // For the page containing |object| mark the region covering [address]
+ // dirty. The object address must be in the first 8K of an allocated page.
+ void RecordWriteHelper(Register object,
+ Register address,
+ Register scratch);
+
+ // For the page containing |object| mark the region covering
+ // [object+offset] dirty. The object address must be in the first 8K
+ // of an allocated page. The 'scratch' registers are used in the
+ // implementation and all 3 registers are clobbered by the
+ // operation, as well as the 'at' register. RecordWrite updates the
+ // write barrier even when storing smis.
+ void RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1);
+
+ // For the page containing |object| mark the region covering
+ // [address] dirty. The object address must be in the first 8K of an
+ // allocated page. All 3 registers are clobbered by the operation,
+ // as well as the ip register. RecordWrite updates the write barrier
+ // even when storing smis.
+ void RecordWrite(Register object,
+ Register address,
+ Register scratch);
+
+
+ // ---------------------------------------------------------------------------
+ // Inline caching support.
+
+ // Generate code for checking access rights - used for security checks
+ // on access to global objects across environments. The holder register
+ // is left untouched, whereas both scratch registers are clobbered.
+ void CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch,
+ Label* miss);
+
+ inline void MarkCode(NopMarkerTypes type) {
+ nop(type);
}
- // Sets the remembered set bit for [address+offset].
- void RecordWrite(Register object, Register offset, Register scratch);
+ // Check if the given instruction is a 'type' marker.
+ // ie. check if it is a sll zero_reg, zero_reg, <type> (referenced as
+ // nop(type)). These instructions are generated to mark special location in
+ // the code, like some special IC code.
+ static inline bool IsMarkedCode(Instr instr, int type) {
+ ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
+ return IsNop(instr, type);
+ }
+ static inline int GetCodeMarker(Instr instr) {
+ uint32_t opcode = ((instr & kOpcodeMask));
+ uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
+ uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
+ uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
+
+ // Return <n> if we have a sll zero_reg, zero_reg, n
+ // else return -1.
+ bool sllzz = (opcode == SLL &&
+ rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+ rs == static_cast<uint32_t>(ToNumber(zero_reg)));
+ int type =
+ (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
+ ASSERT((type == -1) ||
+ ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
+ return type;
+ }
+
+
+
+ // ---------------------------------------------------------------------------
+ // Allocation support.
+
+ // Allocate an object in new space. The object_size is specified
+ // either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. If the new space is exhausted control continues at the
+ // gc_required label. The allocated object is returned in result. If
+ // the flag tag_allocated_object is true the result is tagged as as
+ // a heap object. All registers are clobbered also when control
+ // continues at the gc_required label.
+ void AllocateInNewSpace(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+ void AllocateInNewSpace(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ // Undo allocation in new space. The object passed and objects allocated after
+ // it will no longer be allocated. The caller must make sure that no pointers
+ // are left to the object(s) no longer allocated as they would be invalid when
+ // allocation is undone.
+ void UndoAllocationInNewSpace(Register object, Register scratch);
+
+
+ void AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Allocates a heap number or jumps to the gc_required label if the young
+ // space is full and a scavenge is needed. All registers are clobbered also
+ // when control continues at the gc_required label.
+ void AllocateHeapNumber(Register result,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* gc_required);
+ void AllocateHeapNumberWithValue(Register result,
+ FPURegister value,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
// ---------------------------------------------------------------------------
- // Instruction macros
+ // Instruction macros.
-#define DEFINE_INSTRUCTION(instr) \
+#define DEFINE_INSTRUCTION(instr) \
void instr(Register rd, Register rs, const Operand& rt); \
void instr(Register rd, Register rs, Register rt) { \
instr(rd, rs, Operand(rt)); \
@@ -137,7 +412,7 @@ class MacroAssembler: public Assembler {
instr(rs, rt, Operand(j)); \
}
-#define DEFINE_INSTRUCTION2(instr) \
+#define DEFINE_INSTRUCTION2(instr) \
void instr(Register rs, const Operand& rt); \
void instr(Register rs, Register rt) { \
instr(rs, Operand(rt)); \
@@ -146,8 +421,8 @@ class MacroAssembler: public Assembler {
instr(rs, Operand(j)); \
}
- DEFINE_INSTRUCTION(Add);
DEFINE_INSTRUCTION(Addu);
+ DEFINE_INSTRUCTION(Subu);
DEFINE_INSTRUCTION(Mul);
DEFINE_INSTRUCTION2(Mult);
DEFINE_INSTRUCTION2(Multu);
@@ -158,46 +433,75 @@ class MacroAssembler: public Assembler {
DEFINE_INSTRUCTION(Or);
DEFINE_INSTRUCTION(Xor);
DEFINE_INSTRUCTION(Nor);
+ DEFINE_INSTRUCTION2(Neg);
DEFINE_INSTRUCTION(Slt);
DEFINE_INSTRUCTION(Sltu);
+ // MIPS32 R2 instruction macro.
+ DEFINE_INSTRUCTION(Ror);
+
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
- //------------Pseudo-instructions-------------
+ // ---------------------------------------------------------------------------
+ // Pseudo-instructions.
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
- // Move the logical ones complement of source to dest.
- void movn(Register rd, Register rt);
-
- // load int32 in the rd register
+ // Load int32 in the rd register.
void li(Register rd, Operand j, bool gen2instr = false);
inline void li(Register rd, int32_t j, bool gen2instr = false) {
li(rd, Operand(j), gen2instr);
}
-
- // Exception-generating instructions and debugging support
- void stop(const char* msg);
-
+ inline void li(Register dst, Handle<Object> value, bool gen2instr = false) {
+ li(dst, Operand(value), gen2instr);
+ }
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
- // saved in higher memory addresses
+ // saved in higher memory addresses.
void MultiPush(RegList regs);
void MultiPushReversed(RegList regs);
- void Push(Register src) {
+
+ // Lower case push() for compatibility with arch-independent code.
+ void push(Register src) {
Addu(sp, sp, Operand(-kPointerSize));
sw(src, MemOperand(sp, 0));
}
- inline void push(Register src) { Push(src); }
+
+ // Push a handle.
+ void Push(Handle<Object> handle);
+
+ // Push two registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2) {
+ Subu(sp, sp, Operand(2 * kPointerSize));
+ sw(src1, MemOperand(sp, 1 * kPointerSize));
+ sw(src2, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push three registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3) {
+ Subu(sp, sp, Operand(3 * kPointerSize));
+ sw(src1, MemOperand(sp, 2 * kPointerSize));
+ sw(src2, MemOperand(sp, 1 * kPointerSize));
+ sw(src3, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push four registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4) {
+ Subu(sp, sp, Operand(4 * kPointerSize));
+ sw(src1, MemOperand(sp, 3 * kPointerSize));
+ sw(src2, MemOperand(sp, 2 * kPointerSize));
+ sw(src3, MemOperand(sp, 1 * kPointerSize));
+ sw(src4, MemOperand(sp, 0 * kPointerSize));
+ }
void Push(Register src, Condition cond, Register tst1, Register tst2) {
- // Since we don't have conditionnal execution we use a Branch.
- Branch(cond, 3, tst1, Operand(tst2));
- Addu(sp, sp, Operand(-kPointerSize));
+ // Since we don't have conditional execution we use a Branch.
+ Branch(3, cond, tst1, Operand(tst2));
+ Subu(sp, sp, Operand(kPointerSize));
sw(src, MemOperand(sp, 0));
}
@@ -205,137 +509,365 @@ class MacroAssembler: public Assembler {
// registers specified in regs. Pop order is the opposite as in MultiPush.
void MultiPop(RegList regs);
void MultiPopReversed(RegList regs);
- void Pop(Register dst) {
+
+ // Lower case pop() for compatibility with arch-independent code.
+ void pop(Register dst) {
lw(dst, MemOperand(sp, 0));
Addu(sp, sp, Operand(kPointerSize));
}
- void Pop() {
- Add(sp, sp, Operand(kPointerSize));
+
+ // Pop two registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2) {
+ ASSERT(!src1.is(src2));
+ lw(src2, MemOperand(sp, 0 * kPointerSize));
+ lw(src1, MemOperand(sp, 1 * kPointerSize));
+ Addu(sp, sp, 2 * kPointerSize);
}
+ void Pop(uint32_t count = 1) {
+ Addu(sp, sp, Operand(count * kPointerSize));
+ }
- // ---------------------------------------------------------------------------
- // Activation frames
+ // Push and pop the registers that can hold pointers, as defined by the
+ // RegList constant kSafepointSavedRegisters.
+ void PushSafepointRegisters();
+ void PopSafepointRegisters();
+ void PushSafepointRegistersAndDoubles();
+ void PopSafepointRegistersAndDoubles();
+ // Store value in register src in the safepoint stack slot for
+ // register dst.
+ void StoreToSafepointRegisterSlot(Register src, Register dst);
+ void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
+ // Load the value of the src register from its safepoint stack slot
+ // into register dst.
+ void LoadFromSafepointRegisterSlot(Register dst, Register src);
+
+ // MIPS32 R2 instruction macro.
+ void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+ // Convert unsigned word to double.
+ void Cvt_d_uw(FPURegister fd, FPURegister fs);
+ void Cvt_d_uw(FPURegister fd, Register rs);
+
+ // Convert double to unsigned word.
+ void Trunc_uw_d(FPURegister fd, FPURegister fs);
+ void Trunc_uw_d(FPURegister fd, Register rs);
+
+ // Convert the HeapNumber pointed to by source to a 32bits signed integer
+ // dest. If the HeapNumber does not fit into a 32bits signed integer branch
+ // to not_int32 label. If FPU is available double_scratch is used but not
+ // scratch2.
+ void ConvertToInt32(Register source,
+ Register dest,
+ Register scratch,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label *not_int32);
+
+ // Helper for EmitECMATruncate.
+ // This will truncate a floating-point value outside of the singed 32bit
+ // integer range to a 32bit signed integer.
+ // Expects the double value loaded in input_high and input_low.
+ // Exits with the answer in 'result'.
+ // Note that this code does not work for values in the 32bit range!
+ void EmitOutOfInt32RangeTruncate(Register result,
+ Register input_high,
+ Register input_low,
+ Register scratch);
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer and all other registers clobbered.
+ void EmitECMATruncate(Register result,
+ FPURegister double_input,
+ FPURegister single_scratch,
+ Register scratch,
+ Register scratch2,
+ Register scratch3);
+
+ // -------------------------------------------------------------------------
+ // Activation frames.
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
- // Enter specific kind of exit frame; either EXIT or
- // EXIT_DEBUG. Expects the number of arguments in register a0 and
- // the builtin function to call in register a1.
- // On output hold_argc, hold_function, and hold_argv are setup.
- void EnterExitFrame(ExitFrame::Mode mode,
- Register hold_argc,
- Register hold_argv,
- Register hold_function);
+ void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+ void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
- // Leave the current exit frame. Expects the return value in v0.
- void LeaveExitFrame(ExitFrame::Mode mode);
+ // Enter exit frame.
+ // argc - argument count to be dropped by LeaveExitFrame.
+ // save_doubles - saves FPU registers on stack, currently disabled.
+ // stack_space - extra stack space.
+ void EnterExitFrame(bool save_doubles,
+ int stack_space = 0);
- // Align the stack by optionally pushing a Smi zero.
- void AlignStack(int offset);
+ // Leave the current exit frame.
+ void LeaveExitFrame(bool save_doubles, Register arg_count);
- void SetupAlignedCall(Register scratch, int arg_count = 0);
- void ReturnFromAlignedCall();
+ // Get the actual activation frame alignment for target environment.
+ static int ActivationFrameAlignment();
+ // Make sure the stack is aligned. Only emits code in debug mode.
+ void AssertStackIsAligned();
- // ---------------------------------------------------------------------------
- // JavaScript invokes
+ void LoadContext(Register dst, int context_chain_length);
+
+ void LoadGlobalFunction(int index, Register function);
+
+ // Load the initial map from the global function. The registers
+ // function and map can be the same, function is then overwritten.
+ void LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch);
+
+ // -------------------------------------------------------------------------
+ // JavaScript invokes.
+
+ // Setup call kind marking in t1. The method takes t1 as an
+ // explicit first parameter to make the code more readable at the
+ // call sites.
+ void SetCallKind(Register dst, CallKind kind);
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
- InvokeFlag flag);
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
- InvokeFlag flag);
+ InvokeFlag flag,
+ CallKind call_kind);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
- InvokeFlag flag);
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
+
+ void InvokeFunction(JSFunction* function,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ CallKind call_kind);
+
+ void IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail);
+
+ void IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail);
+
+ void IsObjectJSStringType(Register object,
+ Register scratch,
+ Label* fail);
#ifdef ENABLE_DEBUGGER_SUPPORT
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void SaveRegistersToMemory(RegList regs);
- void RestoreRegistersFromMemory(RegList regs);
- void CopyRegistersFromMemoryToStack(Register base, RegList regs);
- void CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs);
+ // -------------------------------------------------------------------------
+ // Debugger Support.
+
void DebugBreak();
#endif
- // ---------------------------------------------------------------------------
- // Exception handling
+ // -------------------------------------------------------------------------
+ // Exception handling.
// Push a new try handler and link into try handler chain.
// The return address must be passed in register ra.
+ // Clobber t0, t1, t2.
void PushTryHandler(CodeLocation try_location, HandlerType type);
// Unlink the stack handler on top of the stack from the try handler chain.
// Must preserve the result register.
void PopTryHandler();
+ // Passes thrown value (in v0) to the handler of top of the try handler chain.
+ void Throw(Register value);
- // ---------------------------------------------------------------------------
+ // Propagates an uncatchable exception to the top of the current JS stack's
+ // handler chain.
+ void ThrowUncatchable(UncatchableExceptionType type, Register value);
+
+ // Copies a fixed number of fields of heap objects from src to dst.
+ void CopyFields(Register dst, Register src, RegList temps, int field_count);
+
+ // Copies a number of bytes from src to dst. All registers are clobbered. On
+ // exit src and dst will point to the place just after where the last byte was
+ // read or written and length will be zero.
+ void CopyBytes(Register src,
+ Register dst,
+ Register length,
+ Register scratch);
+
+ // -------------------------------------------------------------------------
// Support functions.
+ // Try to get function prototype of a function and puts the value in
+ // the result register. Checks that the function really is a
+ // function and jumps to the miss label if the fast checks fail. The
+ // function register will be untouched; the other registers may be
+ // clobbered.
+ void TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss);
+
void GetObjectType(Register function,
Register map,
Register type_reg);
- inline void BranchOnSmi(Register value, Label* smi_label,
- Register scratch = at) {
- ASSERT_EQ(0, kSmiTag);
- andi(scratch, value, kSmiTagMask);
- Branch(eq, smi_label, scratch, Operand(zero_reg));
- }
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check if the map of an object is equal to a specified map (either
+ // given directly or as an index into the root list) and branch to
+ // label if not. Skip the smi check if not required (object is known
+ // to be a heap object).
+ void CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ void CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ SmiCheckType smi_check_type);
+
+ // Check if the map of an object is equal to a specified map and branch to a
+ // specified target if equal. Skip the smi check if not required (object is
+ // known to be a heap object)
+ void DispatchMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Handle<Code> success,
+ SmiCheckType smi_check_type);
+ // Generates code for reporting that an illegal operation has
+ // occurred.
+ void IllegalOperation(int num_arguments);
- inline void BranchOnNotSmi(Register value, Label* not_smi_label,
- Register scratch = at) {
- ASSERT_EQ(0, kSmiTag);
- andi(scratch, value, kSmiTagMask);
- Branch(ne, not_smi_label, scratch, Operand(zero_reg));
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
+
+ // Get the number of least significant bits from a register.
+ void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
+ void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
+
+ // Load the value of a number object into a FPU double register. If the
+ // object is not a number a jump to the label not_number is performed
+ // and the FPU double register is unchanged.
+ void ObjectToDoubleFPURegister(
+ Register object,
+ FPURegister value,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* not_number,
+ ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
+
+ // Load the value of a smi object into a FPU double register. The register
+ // scratch1 can be the same register as smi in which case smi will hold the
+ // untagged value afterwards.
+ void SmiToDoubleFPURegister(Register smi,
+ FPURegister value,
+ Register scratch1);
+
+ // -------------------------------------------------------------------------
+ // Overflow handling functions.
+ // Usage: first call the appropriate arithmetic function, then call one of the
+ // jump functions with the overflow_dst register as the second parameter.
+
+ void AdduAndCheckForOverflow(Register dst,
+ Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch = at);
+
+ void SubuAndCheckForOverflow(Register dst,
+ Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch = at);
+
+ void BranchOnOverflow(Label* label,
+ Register overflow_check,
+ BranchDelaySlot bd = PROTECT) {
+ Branch(label, lt, overflow_check, Operand(zero_reg), bd);
}
- void CallBuiltin(ExternalReference builtin_entry);
- void CallBuiltin(Register target);
- void JumpToBuiltin(ExternalReference builtin_entry);
- void JumpToBuiltin(Register target);
+ void BranchOnNoOverflow(Label* label,
+ Register overflow_check,
+ BranchDelaySlot bd = PROTECT) {
+ Branch(label, ge, overflow_check, Operand(zero_reg), bd);
+ }
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
+ void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
+ Ret(lt, overflow_check, Operand(zero_reg), bd);
+ }
+ void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
+ Ret(ge, overflow_check, Operand(zero_reg), bd);
+ }
- // ---------------------------------------------------------------------------
- // Runtime calls
+ // -------------------------------------------------------------------------
+ // Runtime calls.
// Call a code stub.
void CallStub(CodeStub* stub, Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void CallJSExitStub(CodeStub* stub);
- // Return from a code stub after popping its arguments.
- void StubReturn(int argc);
+ // Call a code stub and return the code object called. Try to generate
+ // the code if necessary. Do not perform a GC but instead return a retry
+ // after GC failure.
+ MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub,
+ Condition cond = cc_always,
+ Register r1 = zero_reg,
+ const Operand& r2 =
+ Operand(zero_reg));
+
+ // Tail call a code stub (jump).
+ void TailCallStub(CodeStub* stub);
+
+ // Tail call a code stub (jump) and return the code object called. Try to
+ // generate the code if necessary. Do not perform a GC but instead return
+ // a retry after GC failure.
+ MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
+ Condition cond = cc_always,
+ Register r1 = zero_reg,
+ const Operand& r2 =
+ Operand(zero_reg));
+
+ void CallJSExitStub(CodeStub* stub);
// Call a runtime routine.
- void CallRuntime(Runtime::Function* f, int num_arguments);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+ // Convenience function: call an external reference.
+ void CallExternalReference(const ExternalReference& ext,
+ int num_arguments);
+
// Tail call of a runtime routine (jump).
// Like JumpToExternalReference, but also takes care of passing the number
// of parameters.
@@ -343,40 +875,85 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
+ // Tail call of a runtime routine (jump). Try to generate the code if
+ // necessary. Do not perform a GC but instead return a retry after GC
+ // failure.
+ MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
+ const ExternalReference& ext, int num_arguments, int result_size);
+
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
+ // Before calling a C-function from generated code, align arguments on stack
+ // and add space for the four mips argument slots.
+ // After aligning the frame, non-register arguments must be stored on the
+ // stack, after the argument-slots using helper: CFunctionArgumentOperand().
+ // The argument count assumes all arguments are word sized.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_arguments, Register scratch);
+
+ // Arguments 1-4 are placed in registers a0 thru a3 respectively.
+ // Arguments 5..n are stored to stack using following:
+ // sw(t0, CFunctionArgumentOperand(5));
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, Register scratch, int num_arguments);
+ void GetCFunctionDoubleResult(const DoubleRegister dst);
+
+ // There are two ways of passing double arguments on MIPS, depending on
+ // whether soft or hard floating point ABI is used. These functions
+ // abstract parameter passing for the three different ways we call
+ // C functions from generated code.
+ void SetCallCDoubleArguments(DoubleRegister dreg);
+ void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
+ void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
+
+ // Calls an API function. Allocates HandleScope, extracts returned value
+ // from handle and propagates exceptions. Restores context.
+ MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
+ int stack_space);
+
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin);
+ MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
+
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags);
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper = NullCallWrapper());
// Store the code object for the given builtin in the target register and
- // setup the function in r1.
+ // setup the function in a1.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
struct Unresolved {
int pc;
- uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
+ uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
const char* name;
};
- List<Unresolved>* unresolved() { return &unresolved_; }
- Handle<Object> CodeObject() { return code_object_; }
-
-
- // ---------------------------------------------------------------------------
- // Stack limit support
-
- void StackLimitCheck(Label* on_stack_limit_hit);
+ Handle<Object> CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+ }
-
- // ---------------------------------------------------------------------------
- // StatsCounter support
+ // -------------------------------------------------------------------------
+ // StatsCounter support.
void SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
@@ -386,12 +963,14 @@ class MacroAssembler: public Assembler {
Register scratch1, Register scratch2);
- // ---------------------------------------------------------------------------
- // Debugging
+ // -------------------------------------------------------------------------
+ // Debugging.
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg, Register rs, Operand rt);
+ void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
+ void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg, Register rs, Operand rt);
@@ -405,17 +984,157 @@ class MacroAssembler: public Assembler {
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
- private:
- List<Unresolved> unresolved_;
- bool generating_stub_;
- bool allow_stub_calls_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
+ // ---------------------------------------------------------------------------
+ // Number utilities.
+
+ // Check whether the value of reg is a power of two and not zero. If not
+ // control continues at the label not_power_of_two. If reg is a power of two
+ // the register scratch contains the value of (reg - 1) when control falls
+ // through.
+ void JumpIfNotPowerOfTwoOrZero(Register reg,
+ Register scratch,
+ Label* not_power_of_two_or_zero);
+
+ // -------------------------------------------------------------------------
+ // Smi utilities.
+
+ // Try to convert int32 to smi. If the value is to large, preserve
+ // the original value and jump to not_a_smi. Destroys scratch and
+ // sets flags.
+ // This is only used by crankshaft atm so it is unimplemented on MIPS.
+ void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ void SmiTag(Register reg) {
+ Addu(reg, reg, reg);
+ }
+ void SmiTag(Register dst, Register src) {
+ Addu(dst, src, src);
+ }
+
+ void SmiUntag(Register reg) {
+ sra(reg, reg, kSmiTagSize);
+ }
+
+ void SmiUntag(Register dst, Register src) {
+ sra(dst, src, kSmiTagSize);
+ }
+
+ // Jump the register contains a smi.
+ inline void JumpIfSmi(Register value, Label* smi_label,
+ Register scratch = at) {
+ ASSERT_EQ(0, kSmiTag);
+ andi(scratch, value, kSmiTagMask);
+ Branch(smi_label, eq, scratch, Operand(zero_reg));
+ }
+
+ // Jump if the register contains a non-smi.
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label,
+ Register scratch = at) {
+ ASSERT_EQ(0, kSmiTag);
+ andi(scratch, value, kSmiTagMask);
+ Branch(not_smi_label, ne, scratch, Operand(zero_reg));
+ }
+
+ // Jump if either of the registers contain a non-smi.
+ void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
+ // Jump if either of the registers contain a smi.
+ void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+
+ // Abort execution if argument is a smi. Used in debug code.
+ void AbortIfSmi(Register object);
+ void AbortIfNotSmi(Register object);
+
+ // Abort execution if argument is a string. Used in debug code.
+ void AbortIfNotString(Register object);
+
+ // Abort execution if argument is not the root value with the given index.
+ void AbortIfNotRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message);
+
+ // ---------------------------------------------------------------------------
+ // HeapNumber utilities.
+
+ void JumpIfNotHeapNumber(Register object,
+ Register heap_number_map,
+ Register scratch,
+ Label* on_not_heap_number);
+
+ // -------------------------------------------------------------------------
+ // String utilities.
+
+ // Checks if both instance types are sequential ASCII strings and jumps to
+ // label if either is not.
+ void JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Check if instance type is sequential ASCII string and jump to label if
+ // it is not.
+ void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure);
+
+ // Test that both first and second are sequential ASCII strings.
+ // Assume that they are non-smis.
+ void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Test that both first and second are sequential ASCII strings.
+ // Check that they are non-smis.
+ void JumpIfNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ void LoadInstanceDescriptors(Register map, Register descriptors);
+
+ private:
+ void CallCFunctionHelper(Register function,
+ ExternalReference function_reference,
+ Register scratch,
+ int num_arguments);
+
+ void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
+ void BranchShort(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT);
+ void BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void J(Label* L, BranchDelaySlot bdslot);
+ void Jr(Label* L, BranchDelaySlot bdslot);
+ void Jalr(Label* L, BranchDelaySlot bdslot);
+
+ void Jump(intptr_t target, RelocInfo::Mode rmode,
+ BranchDelaySlot bd = PROTECT);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
+ BranchDelaySlot bd = PROTECT);
+ void Call(intptr_t target, RelocInfo::Mode rmode,
+ BranchDelaySlot bd = PROTECT);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
+ BranchDelaySlot bd = PROTECT);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
@@ -423,28 +1142,102 @@ class MacroAssembler: public Assembler {
Handle<Code> code_constant,
Register code_reg,
Label* done,
- InvokeFlag flag);
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
// Get the code for the given builtin. Returns if able to resolve
// the function in the 'resolved' flag.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support.
- // EnterFrame clobbers t0 and t1.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
+
+ void InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2);
+
+ // Compute memory operands for safepoint stack slots.
+ static int SafepointRegisterStackIndex(int reg_code);
+ MemOperand SafepointRegisterSlot(Register reg);
+ MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+
+ bool UseAbsoluteCodePointers();
+
+ bool generating_stub_;
+ bool allow_stub_calls_;
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
+
+ // Needs access to SafepointRegisterStackIndex for optimized frame
+ // traversal.
+ friend class OptimizedFrame;
+};
+
+
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. It is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion to fail.
+class CodePatcher {
+ public:
+ CodePatcher(byte* address, int instructions);
+ virtual ~CodePatcher();
+
+ // Macro assembler to emit code.
+ MacroAssembler* masm() { return &masm_; }
+
+ // Emit an instruction directly.
+ void Emit(Instr instr);
+
+ // Emit an address directly.
+ void Emit(Address addr);
+
+ // Change the condition part of an instruction leaving the rest of the current
+ // instruction unchanged.
+ void ChangeBranchCondition(Condition cond);
+
+ private:
+ byte* address_; // The address of the code being patched.
+ int instructions_; // Number of instructions of the expected patch size.
+ int size_; // Number of bytes of the expected patch size.
+ MacroAssembler masm_; // Macro assembler used to generate the code.
};
// -----------------------------------------------------------------------------
// Static helper functions.
+static MemOperand ContextOperand(Register context, int index) {
+ return MemOperand(context, Context::SlotOffset(index));
+}
+
+
+static inline MemOperand GlobalObjectOperand() {
+ return ContextOperand(cp, Context::GLOBAL_INDEX);
+}
+
+
// Generate a MemOperand for loading a field from an object.
static inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
+// Generate a MemOperand for storing arguments 5..N on the stack
+// when calling CallCFunction().
+static inline MemOperand CFunctionArgumentOperand(int index) {
+ ASSERT(index > StandardFrameConstants::kCArgSlotCount);
+ // Argument 5 takes the slot just past the four Arg-slots.
+ int offset =
+ (index - 5) * kPointerSize + StandardFrameConstants::kCArgsSlotsSize;
+ return MemOperand(sp, offset);
+}
+
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
@@ -458,4 +1251,3 @@ static inline MemOperand FieldMemOperand(Register object, int offset) {
} } // namespace v8::internal
#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
-
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
new file mode 100644
index 000000000..cfc8f651c
--- /dev/null
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -0,0 +1,1251 @@
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_MIPS)
+
+#include "unicode.h"
+#include "log.h"
+#include "code-stubs.h"
+#include "regexp-stack.h"
+#include "macro-assembler.h"
+#include "regexp-macro-assembler.h"
+#include "mips/regexp-macro-assembler-mips.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention
+ * - t1 : Pointer to current code object (Code*) including heap object tag.
+ * - t2 : Current position in input, as negative offset from end of string.
+ * Please notice that this is the byte offset, not the character offset!
+ * - t3 : Currently loaded character. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - t4 : points to tip of backtrack stack
+ * - t5 : Unused.
+ * - t6 : End of input (points to byte after last character in input).
+ * - fp : Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - sp : points to tip of C stack.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * The stack will have the following structure:
+ *
+ * - fp[56] direct_call (if 1, direct call from JavaScript code,
+ * if 0, call through the runtime system).
+ * - fp[52] stack_area_base (High end of the memory area to use as
+ * backtracking stack).
+ * - fp[48] int* capture_array (int[num_saved_registers_], for output).
+ * - fp[44] secondary link/return address used by native call.
+ * --- sp when called ---
+ * - fp[40] return address (lr).
+ * - fp[36] old frame pointer (r11).
+ * - fp[0..32] backup of registers s0..s7.
+ * --- frame pointer ----
+ * - fp[-4] end of input (Address of end of string).
+ * - fp[-8] start of input (Address of first character in string).
+ * - fp[-12] start index (character index of start).
+ * - fp[-16] void* input_string (location of a handle containing the string).
+ * - fp[-20] Offset of location before start of input (effectively character
+ * position -1). Used to initialize capture registers to a
+ * non-position.
+ * - fp[-24] At start (if 1, we are starting at the start of the
+ * string, otherwise 0)
+ * - fp[-28] register 0 (Only positions must be stored in the first
+ * - register 1 num_saved_registers_ registers)
+ * - ...
+ * - register num_registers-1
+ * --- sp ---
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String* input_string,
+ * int start_index,
+ * Address start,
+ * Address end,
+ * Address secondary_return_address, // Only used by native call.
+ * int* capture_output_array,
+ * byte* stack_area_base,
+ * bool direct_call = false)
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in mips/simulator-mips.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the ra register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
+ Mode mode,
+ int registers_to_save)
+ : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_(),
+ internal_failure_label_() {
+ ASSERT_EQ(0, registers_to_save % 2);
+ __ jmp(&entry_label_); // We'll write the entry code later.
+ // If the code gets too big or corrupted, an internal exception will be
+ // raised, and we will exit right away.
+ __ bind(&internal_failure_label_);
+ __ li(v0, Operand(FAILURE));
+ __ Ret();
+ __ bind(&start_label_); // And then continue from here.
+}
+
+
+RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+ internal_failure_label_.Unuse();
+}
+
+
+int RegExpMacroAssemblerMIPS::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ __ Addu(current_input_offset(),
+ current_input_offset(), Operand(by * char_size()));
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
+ ASSERT(reg >= 0);
+ ASSERT(reg < num_registers_);
+ if (by != 0) {
+ __ lw(a0, register_location(reg));
+ __ Addu(a0, a0, Operand(by));
+ __ sw(a0, register_location(reg));
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::Backtrack() {
+ CheckPreemption();
+ // Pop Code* offset from backtrack stack, add Code* and jump to location.
+ Pop(a0);
+ __ Addu(a0, a0, code_pointer());
+ __ Jump(Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::Bind(Label* label) {
+ __ bind(label);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacter(uint32_t c, Label* on_equal) {
+ BranchOrBacktrack(on_equal, eq, current_character(), Operand(c));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
+ BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
+ Label not_at_start;
+ // Did we start the match at the start of the string at all?
+ __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+ BranchOrBacktrack(&not_at_start, eq, a0, Operand(zero_reg));
+
+ // If we did, are we still at the start of the input?
+ __ lw(a1, MemOperand(frame_pointer(), kInputStart));
+ __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+ BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
+ __ bind(&not_at_start);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
+ // Did we start the match at the start of the string at all?
+ __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+ BranchOrBacktrack(on_not_at_start, eq, a0, Operand(zero_reg));
+ // If we did, are we still at the start of the input?
+ __ lw(a1, MemOperand(frame_pointer(), kInputStart));
+ __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+ BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) {
+ BranchOrBacktrack(on_less, lt, current_character(), Operand(limit));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string) {
+ if (on_failure == NULL) {
+ // Instead of inlining a backtrack for each test, (re)use the global
+ // backtrack target.
+ on_failure = &backtrack_label_;
+ }
+
+ if (check_end_of_string) {
+ // Is last character of required match inside string.
+ CheckPosition(cp_offset + str.length() - 1, on_failure);
+ }
+
+ __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+ if (cp_offset != 0) {
+ int byte_offset = cp_offset * char_size();
+ __ Addu(a0, a0, Operand(byte_offset));
+ }
+
+ // a0 : Address of characters to match against str.
+ int stored_high_byte = 0;
+ for (int i = 0; i < str.length(); i++) {
+ if (mode_ == ASCII) {
+ __ lbu(a1, MemOperand(a0, 0));
+ __ addiu(a0, a0, char_size());
+ ASSERT(str[i] <= String::kMaxAsciiCharCode);
+ BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
+ } else {
+ __ lhu(a1, MemOperand(a0, 0));
+ __ addiu(a0, a0, char_size());
+ uc16 match_char = str[i];
+ int match_high_byte = (match_char >> 8);
+ if (match_high_byte == 0) {
+ BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
+ } else {
+ if (match_high_byte != stored_high_byte) {
+ __ li(a2, Operand(match_high_byte));
+ stored_high_byte = match_high_byte;
+ }
+ __ Addu(a3, a2, Operand(match_char & 0xff));
+ BranchOrBacktrack(on_failure, ne, a1, Operand(a3));
+ }
+ }
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
+ Label backtrack_non_equal;
+ __ lw(a0, MemOperand(backtrack_stackpointer(), 0));
+ __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0));
+ __ Addu(backtrack_stackpointer(),
+ backtrack_stackpointer(),
+ Operand(kPointerSize));
+ __ bind(&backtrack_non_equal);
+ BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+ __ lw(a0, register_location(start_reg)); // Index of start of capture.
+ __ lw(a1, register_location(start_reg + 1)); // Index of end of capture.
+ __ Subu(a1, a1, a0); // Length of capture.
+
+ // If length is zero, either the capture is empty or it is not participating.
+ // In either case succeed immediately.
+ __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+ __ Addu(t5, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+
+ if (mode_ == ASCII) {
+ Label success;
+ Label fail;
+ Label loop_check;
+
+ // a0 - offset of start of capture.
+ // a1 - length of capture.
+ __ Addu(a0, a0, Operand(end_of_input_address()));
+ __ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
+ __ Addu(a1, a0, Operand(a1));
+
+ // a0 - Address of start of capture.
+ // a1 - Address of end of capture.
+ // a2 - Address of current input position.
+
+ Label loop;
+ __ bind(&loop);
+ __ lbu(a3, MemOperand(a0, 0));
+ __ addiu(a0, a0, char_size());
+ __ lbu(t0, MemOperand(a2, 0));
+ __ addiu(a2, a2, char_size());
+
+ __ Branch(&loop_check, eq, t0, Operand(a3));
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ Or(a3, a3, Operand(0x20)); // Convert capture character to lower-case.
+ __ Or(t0, t0, Operand(0x20)); // Also convert input character.
+ __ Branch(&fail, ne, t0, Operand(a3));
+ __ Subu(a3, a3, Operand('a'));
+ __ Branch(&fail, hi, a3, Operand('z' - 'a')); // Is a3 a lowercase letter?
+
+ __ bind(&loop_check);
+ __ Branch(&loop, lt, a0, Operand(a1));
+ __ jmp(&success);
+
+ __ bind(&fail);
+ GoTo(on_no_match);
+
+ __ bind(&success);
+ // Compute new value of character position after the matched part.
+ __ Subu(current_input_offset(), a2, end_of_input_address());
+ } else {
+ ASSERT(mode_ == UC16);
+ // Put regexp engine registers on stack.
+ RegList regexp_registers_to_retain = current_input_offset().bit() |
+ current_character().bit() | backtrack_stackpointer().bit();
+ __ MultiPush(regexp_registers_to_retain);
+
+ int argument_count = 4;
+ __ PrepareCallCFunction(argument_count, a2);
+
+ // a0 - offset of start of capture.
+ // a1 - length of capture.
+
+ // Put arguments into arguments registers.
+ // Parameters are
+ // a0: Address byte_offset1 - Address captured substring's start.
+ // a1: Address byte_offset2 - Address of current character position.
+ // a2: size_t byte_length - length of capture in bytes(!).
+ // a3: Isolate* isolate.
+
+ // Address of start of capture.
+ __ Addu(a0, a0, Operand(end_of_input_address()));
+ // Length of capture.
+ __ mov(a2, a1);
+ // Save length in callee-save register for use on return.
+ __ mov(s3, a1);
+ // Address of current input position.
+ __ Addu(a1, current_input_offset(), Operand(end_of_input_address()));
+ // Isolate.
+ __ li(a3, Operand(ExternalReference::isolate_address()));
+
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ __ CallCFunction(function, argument_count);
+
+ // Restore regexp engine registers.
+ __ MultiPop(regexp_registers_to_retain);
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
+ __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+
+ // Check if function returned non-zero for success or zero for failure.
+ BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg));
+ // On success, increment position by length of capture.
+ __ Addu(current_input_offset(), current_input_offset(), Operand(s3));
+ }
+
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotBackReference(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+ Label success;
+
+ // Find length of back-referenced capture.
+ __ lw(a0, register_location(start_reg));
+ __ lw(a1, register_location(start_reg + 1));
+ __ Subu(a1, a1, a0); // Length to check.
+ // Succeed on empty capture (including no capture).
+ __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+ __ Addu(t5, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+
+ // Compute pointers to match string and capture string.
+ __ Addu(a0, a0, Operand(end_of_input_address()));
+ __ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
+ __ Addu(a1, a1, Operand(a0));
+
+ Label loop;
+ __ bind(&loop);
+ if (mode_ == ASCII) {
+ __ lbu(a3, MemOperand(a0, 0));
+ __ addiu(a0, a0, char_size());
+ __ lbu(t0, MemOperand(a2, 0));
+ __ addiu(a2, a2, char_size());
+ } else {
+ ASSERT(mode_ == UC16);
+ __ lhu(a3, MemOperand(a0, 0));
+ __ addiu(a0, a0, char_size());
+ __ lhu(t0, MemOperand(a2, 0));
+ __ addiu(a2, a2, char_size());
+ }
+ BranchOrBacktrack(on_no_match, ne, a3, Operand(t0));
+ __ Branch(&loop, lt, a0, Operand(a1));
+
+ // Move current character position to position after match.
+ __ Subu(current_input_offset(), a2, end_of_input_address());
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotRegistersEqual(int reg1,
+ int reg2,
+ Label* on_not_equal) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c,
+ Label* on_not_equal) {
+ BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ __ And(a0, current_character(), Operand(mask));
+ BranchOrBacktrack(on_equal, eq, a0, Operand(c));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal) {
+ __ And(a0, current_character(), Operand(mask));
+ BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterMinusAnd(
+ uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check.
+ switch (type) {
+ case 's':
+ // Match space-characters.
+ if (mode_ == ASCII) {
+ // ASCII space characters are '\t'..'\r' and ' '.
+ Label success;
+ __ Branch(&success, eq, current_character(), Operand(' '));
+ // Check range 0x09..0x0d.
+ __ Subu(a0, current_character(), Operand('\t'));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand('\r' - '\t'));
+ __ bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // Match non-space characters.
+ if (mode_ == ASCII) {
+ // ASCII space characters are '\t'..'\r' and ' '.
+ BranchOrBacktrack(on_no_match, eq, current_character(), Operand(' '));
+ __ Subu(a0, current_character(), Operand('\t'));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand('\r' - '\t'));
+ return true;
+ }
+ return false;
+ case 'd':
+ // Match ASCII digits ('0'..'9').
+ __ Subu(a0, current_character(), Operand('0'));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0'));
+ return true;
+ case 'D':
+ // Match non ASCII-digits.
+ __ Subu(a0, current_character(), Operand('0'));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+ __ Xor(a0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
+ __ Subu(a0, a0, Operand(0x0b));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0c - 0x0b));
+ if (mode_ == UC16) {
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ Subu(a0, a0, Operand(0x2028 - 0x0b));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand(1));
+ }
+ return true;
+ }
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+ __ Xor(a0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
+ __ Subu(a0, a0, Operand(0x0b));
+ if (mode_ == ASCII) {
+ BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0c - 0x0b));
+ } else {
+ Label done;
+ BranchOrBacktrack(&done, ls, a0, Operand(0x0c - 0x0b));
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ Subu(a0, a0, Operand(0x2028 - 0x0b));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand(1));
+ __ bind(&done);
+ }
+ return true;
+ }
+ case 'w': {
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z'));
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ li(a0, Operand(map));
+ __ Addu(a0, a0, current_character());
+ __ lbu(a0, MemOperand(a0, 0));
+ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ Branch(&done, hi, current_character(), Operand('z'));
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ li(a0, Operand(map));
+ __ Addu(a0, a0, current_character());
+ __ lbu(a0, MemOperand(a0, 0));
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg));
+ if (mode_ != ASCII) {
+ __ bind(&done);
+ }
+ return true;
+ }
+ case '*':
+ // Match any character.
+ return true;
+ // No custom implementation (yet): s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::Fail() {
+ __ li(v0, Operand(FAILURE));
+ __ jmp(&exit_label_);
+}
+
+
+Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
+ if (masm_->has_exception()) {
+ // If the code gets corrupted due to long regular expressions and lack of
+ // space on trampolines, an internal exception flag is set. If this case
+ // is detected, we will jump into exit sequence right away.
+ __ bind_to(&entry_label_, internal_failure_label_.pos());
+ } else {
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ bind(&entry_label_);
+ // Push arguments
+ // Save callee-save registers.
+ // Start new stack frame.
+ // Store link register in existing stack-cell.
+ // Order here should correspond to order of offset constants in header file.
+ RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() |
+ s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | fp.bit();
+ RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+ __ MultiPush(argument_registers | registers_to_retain | ra.bit());
+ // Set frame pointer in space for it if this is not a direct call
+ // from generated code.
+ __ Addu(frame_pointer(), sp, Operand(4 * kPointerSize));
+ __ push(a0); // Make room for "position - 1" constant (value irrelevant).
+ __ push(a0); // Make room for "at start" constant (value irrelevant).
+
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ lw(a0, MemOperand(a0));
+ __ Subu(a0, sp, a0);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ li(v0, Operand(EXCEPTION));
+ __ jmp(&exit_label_);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(a0);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ Branch(&exit_label_, ne, v0, Operand(zero_reg));
+
+ __ bind(&stack_ok);
+ // Allocate space on stack for registers.
+ __ Subu(sp, sp, Operand(num_registers_ * kPointerSize));
+ // Load string end.
+ __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ // Load input start.
+ __ lw(a0, MemOperand(frame_pointer(), kInputStart));
+ // Find negative length (offset of start relative to end).
+ __ Subu(current_input_offset(), a0, end_of_input_address());
+ // Set a0 to address of char before start of the input string
+ // (effectively string position -1).
+ __ lw(a1, MemOperand(frame_pointer(), kStartIndex));
+ __ Subu(a0, current_input_offset(), Operand(char_size()));
+ __ sll(t5, a1, (mode_ == UC16) ? 1 : 0);
+ __ Subu(a0, a0, t5);
+ // Store this value in a local variable, for use when clearing
+ // position registers.
+ __ sw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+ // Determine whether the start index is zero, that is at the start of the
+ // string, and store that value in a local variable.
+ __ mov(t5, a1);
+ __ li(a1, Operand(1));
+ __ movn(a1, zero_reg, t5);
+ __ sw(a1, MemOperand(frame_pointer(), kAtStart));
+
+ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
+ // Fill saved registers with initial value = start offset - 1.
+
+ // Address of register 0.
+ __ Addu(a1, frame_pointer(), Operand(kRegisterZero));
+ __ li(a2, Operand(num_saved_registers_));
+ Label init_loop;
+ __ bind(&init_loop);
+ __ sw(a0, MemOperand(a1));
+ __ Addu(a1, a1, Operand(-kPointerSize));
+ __ Subu(a2, a2, Operand(1));
+ __ Branch(&init_loop, ne, a2, Operand(zero_reg));
+ }
+
+ // Initialize backtrack stack pointer.
+ __ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+ // Initialize code pointer register
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
+ // Load previous char as initial value of current character register.
+ Label at_start;
+ __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+ __ Branch(&at_start, ne, a0, Operand(zero_reg));
+ LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
+ __ jmp(&start_label_);
+ __ bind(&at_start);
+ __ li(current_character(), Operand('\n'));
+ __ jmp(&start_label_);
+
+
+ // Exit code:
+ if (success_label_.is_linked()) {
+ // Save captures when successful.
+ __ bind(&success_label_);
+ if (num_saved_registers_ > 0) {
+ // Copy captures to output.
+ __ lw(a1, MemOperand(frame_pointer(), kInputStart));
+ __ lw(a0, MemOperand(frame_pointer(), kRegisterOutput));
+ __ lw(a2, MemOperand(frame_pointer(), kStartIndex));
+ __ Subu(a1, end_of_input_address(), a1);
+ // a1 is length of input in bytes.
+ if (mode_ == UC16) {
+ __ srl(a1, a1, 1);
+ }
+ // a1 is length of input in characters.
+ __ Addu(a1, a1, Operand(a2));
+ // a1 is length of string in characters.
+
+ ASSERT_EQ(0, num_saved_registers_ % 2);
+ // Always an even number of capture registers. This allows us to
+ // unroll the loop once to add an operation between a load of a register
+ // and the following use of that register.
+ for (int i = 0; i < num_saved_registers_; i += 2) {
+ __ lw(a2, register_location(i));
+ __ lw(a3, register_location(i + 1));
+ if (mode_ == UC16) {
+ __ sra(a2, a2, 1);
+ __ Addu(a2, a2, a1);
+ __ sra(a3, a3, 1);
+ __ Addu(a3, a3, a1);
+ } else {
+ __ Addu(a2, a1, Operand(a2));
+ __ Addu(a3, a1, Operand(a3));
+ }
+ __ sw(a2, MemOperand(a0));
+ __ Addu(a0, a0, kPointerSize);
+ __ sw(a3, MemOperand(a0));
+ __ Addu(a0, a0, kPointerSize);
+ }
+ }
+ __ li(v0, Operand(SUCCESS));
+ }
+ // Exit and return v0.
+ __ bind(&exit_label_);
+ // Skip sp past regexp registers and local variables..
+ __ mov(sp, frame_pointer());
+ // Restore registers s0..s7 and return (restoring ra to pc).
+ __ MultiPop(registers_to_retain | ra.bit());
+ __ Ret();
+
+ // Backtrack code (branch target for conditional backtracks).
+ if (backtrack_label_.is_linked()) {
+ __ bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ Label exit_with_exception;
+
+ // Preempt-code.
+ if (check_preempt_label_.is_linked()) {
+ SafeCallTarget(&check_preempt_label_);
+ // Put regexp engine registers on stack.
+ RegList regexp_registers_to_retain = current_input_offset().bit() |
+ current_character().bit() | backtrack_stackpointer().bit();
+ __ MultiPush(regexp_registers_to_retain);
+ CallCheckStackGuardState(a0);
+ __ MultiPop(regexp_registers_to_retain);
+ // If returning non-zero, we should end execution with the given
+ // result as return value.
+ __ Branch(&exit_label_, ne, v0, Operand(zero_reg));
+
+ // String might have moved: Reload end of string from frame.
+ __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
+ SafeReturn();
+ }
+
+ // Backtrack stack overflow code.
+ if (stack_overflow_label_.is_linked()) {
+ SafeCallTarget(&stack_overflow_label_);
+ // Reached if the backtrack-stack limit has been hit.
+ // Put regexp engine registers on stack first.
+ RegList regexp_registers = current_input_offset().bit() |
+ current_character().bit();
+ __ MultiPush(regexp_registers);
+ Label grow_failed;
+ // Call GrowStack(backtrack_stackpointer(), &stack_base)
+ static const int num_arguments = 3;
+ __ PrepareCallCFunction(num_arguments, a0);
+ __ mov(a0, backtrack_stackpointer());
+ __ Addu(a1, frame_pointer(), Operand(kStackHighEnd));
+ __ li(a2, Operand(ExternalReference::isolate_address()));
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(masm_->isolate());
+ __ CallCFunction(grow_stack, num_arguments);
+ // Restore regexp registers.
+ __ MultiPop(regexp_registers);
+ // If return NULL, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ __ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
+ // Otherwise use return value as new stack pointer.
+ __ mov(backtrack_stackpointer(), v0);
+ // Restore saved registers and continue.
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
+ __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ SafeReturn();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ // If any of the code above needed to exit with an exception.
+ __ bind(&exit_with_exception);
+ // Exit with Result EXCEPTION(-1) to signal thrown exception.
+ __ li(v0, Operand(EXCEPTION));
+ __ jmp(&exit_label_);
+ }
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(&code_desc);
+ Handle<Code> code = FACTORY->NewCode(code_desc,
+ Code::ComputeFlags(Code::REGEXP),
+ masm_->CodeObject());
+ LOG(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
+ return Handle<HeapObject>::cast(code);
+}
+
+
+void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterGE(int reg,
+ int comparand,
+ Label* if_ge) {
+ __ lw(a0, register_location(reg));
+ BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg,
+ int comparand,
+ Label* if_lt) {
+ __ lw(a0, register_location(reg));
+ BranchOrBacktrack(if_lt, lt, a0, Operand(comparand));
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg,
+ Label* if_eq) {
+ __ lw(a0, register_location(reg));
+ BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset()));
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+ RegExpMacroAssemblerMIPS::Implementation() {
+ return kMIPSImplementation;
+}
+
+
+void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds,
+ int characters) {
+ ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
+ ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works).
+ if (check_bounds) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ }
+ LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
+ Pop(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerMIPS::PopRegister(int register_index) {
+ Pop(a0);
+ __ sw(a0, register_location(register_index));
+}
+
+
+void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
+ if (label->is_bound()) {
+ int target = label->pos();
+ __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ Label after_constant;
+ __ Branch(&after_constant);
+ int offset = masm_->pc_offset();
+ int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag;
+ __ emit(0);
+ masm_->label_at_put(label, offset);
+ __ bind(&after_constant);
+ if (is_int16(cp_offset)) {
+ __ lw(a0, MemOperand(code_pointer(), cp_offset));
+ } else {
+ __ Addu(a0, code_pointer(), cp_offset);
+ __ lw(a0, MemOperand(a0, 0));
+ }
+ }
+ Push(a0);
+ CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerMIPS::PushCurrentPosition() {
+ Push(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerMIPS::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
+ __ lw(a0, register_location(register_index));
+ Push(a0);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
+ __ lw(current_input_offset(), register_location(reg));
+}
+
+
+void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
+ __ lw(backtrack_stackpointer(), register_location(reg));
+ __ lw(a0, MemOperand(frame_pointer(), kStackHighEnd));
+ __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ Branch(&after_position,
+ ge,
+ current_input_offset(),
+ Operand(-by * char_size()));
+ __ li(current_input_offset(), -by * char_size());
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&after_position);
+}
+
+
+void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
+ ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
+ __ li(a0, Operand(to));
+ __ sw(a0, register_location(register_index));
+}
+
+
+void RegExpMacroAssemblerMIPS::Succeed() {
+ __ jmp(&success_label_);
+}
+
+
+void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
+ if (cp_offset == 0) {
+ __ sw(current_input_offset(), register_location(reg));
+ } else {
+ __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ __ sw(a0, register_location(reg));
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
+ ASSERT(reg_from <= reg_to);
+ __ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ for (int reg = reg_from; reg <= reg_to; reg++) {
+ __ sw(a0, register_location(reg));
+ }
+}
+
+
+void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
+ __ lw(a1, MemOperand(frame_pointer(), kStackHighEnd));
+ __ Subu(a0, backtrack_stackpointer(), a1);
+ __ sw(a0, register_location(reg));
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
+ static const int num_arguments = 3;
+ __ PrepareCallCFunction(num_arguments, scratch);
+ __ mov(a2, frame_pointer());
+ // Code* of self.
+ __ li(a1, Operand(masm_->CodeObject()));
+ // a0 becomes return address pointer.
+ ExternalReference stack_guard_check =
+ ExternalReference::re_check_stack_guard_state(masm_->isolate());
+ CallCFunctionUsingStub(stack_guard_check, num_arguments);
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+}
+
+
+int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame) {
+ Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+ ASSERT(isolate == Isolate::Current());
+ if (isolate->stack_guard()->IsStackOverflow()) {
+ isolate->StackOverflow();
+ return EXCEPTION;
+ }
+
+ // If not real stack overflow the stack guard was used to interrupt
+ // execution for another purpose.
+
+ // If this is a direct call from JavaScript retry the RegExp forcing the call
+ // through the runtime system. Currently the direct call cannot handle a GC.
+ if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+ return RETRY;
+ }
+
+ // Prepare for possible GC.
+ HandleScope handles;
+ Handle<Code> code_handle(re_code);
+
+ Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+ // Current string.
+ bool is_ascii = subject->IsAsciiRepresentation();
+
+ ASSERT(re_code->instruction_start() <= *return_address);
+ ASSERT(*return_address <=
+ re_code->instruction_start() + re_code->instruction_size());
+
+ MaybeObject* result = Execution::HandleStackGuardInterrupt();
+
+ if (*code_handle != re_code) { // Return address no longer valid.
+ int delta = *code_handle - re_code;
+ // Overwrite the return address on the stack.
+ *return_address += delta;
+ }
+
+ if (result->IsException()) {
+ return EXCEPTION;
+ }
+
+ // String might have changed.
+ if (subject->IsAsciiRepresentation() != is_ascii) {
+ // If we changed between an ASCII and an UC16 string, the specialized
+ // code cannot be used, and we need to restart regexp matching from
+ // scratch (including, potentially, compiling a new version of the code).
+ return RETRY;
+ }
+
+ // Otherwise, the content of the string might have moved. It must still
+ // be a sequential or external string with the same content.
+ // Update the start and end pointers in the stack frame to the current
+ // location (whether it has actually moved or not).
+ ASSERT(StringShape(*subject).IsSequential() ||
+ StringShape(*subject).IsExternal());
+
+ // The original start address of the characters to match.
+ const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+ // Find the current start address of the same character at the current string
+ // position.
+ int start_index = frame_entry<int>(re_frame, kStartIndex);
+ const byte* new_address = StringCharacterPosition(*subject, start_index);
+
+ if (start_address != new_address) {
+ // If there is a difference, update the object pointer and start and end
+ // addresses in the RegExp stack frame to match the new value.
+ const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
+ int byte_length = end_address - start_address;
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
+ frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+ frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+ }
+
+ return 0;
+}
+
+
+MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
+ ASSERT(register_index < (1<<30));
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ return MemOperand(frame_pointer(),
+ kRegisterZero - register_index * kPointerSize);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ BranchOrBacktrack(on_outside_input,
+ ge,
+ current_input_offset(),
+ Operand(-cp_offset * char_size()));
+}
+
+
+void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to,
+ Condition condition,
+ Register rs,
+ const Operand& rt) {
+ if (condition == al) { // Unconditional.
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+ }
+ if (to == NULL) {
+ __ Branch(&backtrack_label_, condition, rs, rt);
+ return;
+ }
+ __ Branch(to, condition, rs, rt);
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeCall(Label* to, Condition cond, Register rs,
+ const Operand& rt) {
+ __ BranchAndLink(to, cond, rs, rt);
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeReturn() {
+ __ pop(ra);
+ __ Addu(t5, ra, Operand(masm_->CodeObject()));
+ __ Jump(t5);
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) {
+ __ bind(name);
+ __ Subu(ra, ra, Operand(masm_->CodeObject()));
+ __ push(ra);
+}
+
+
+void RegExpMacroAssemblerMIPS::Push(Register source) {
+ ASSERT(!source.is(backtrack_stackpointer()));
+ __ Addu(backtrack_stackpointer(),
+ backtrack_stackpointer(),
+ Operand(-kPointerSize));
+ __ sw(source, MemOperand(backtrack_stackpointer()));
+}
+
+
+void RegExpMacroAssemblerMIPS::Pop(Register target) {
+ ASSERT(!target.is(backtrack_stackpointer()));
+ __ lw(target, MemOperand(backtrack_stackpointer()));
+ __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), kPointerSize);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckPreemption() {
+ // Check for preemption.
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ lw(a0, MemOperand(a0));
+ SafeCall(&check_preempt_label_, ls, sp, Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckStackLimit() {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
+
+ __ li(a0, Operand(stack_limit));
+ __ lw(a0, MemOperand(a0));
+ SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub(
+ ExternalReference function,
+ int num_arguments) {
+ // Must pass all arguments in registers. The stub pushes on the stack.
+ ASSERT(num_arguments <= 4);
+ __ li(code_pointer(), Operand(function));
+ RegExpCEntryStub stub;
+ __ CallStub(&stub);
+ if (OS::ActivationFrameAlignment() != 0) {
+ __ lw(sp, MemOperand(sp, 16));
+ }
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+
+void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
+ Register offset = current_input_offset();
+ if (cp_offset != 0) {
+ __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ offset = a0;
+ }
+ // We assume that we cannot do unaligned loads on MIPS, so this function
+ // must only be used to load a single character at a time.
+ ASSERT(characters == 1);
+ __ Addu(t5, end_of_input_address(), Operand(offset));
+ if (mode_ == ASCII) {
+ __ lbu(current_character(), MemOperand(t5, 0));
+ } else {
+ ASSERT(mode_ == UC16);
+ __ lhu(current_character(), MemOperand(t5, 0));
+ }
+}
+
+
+void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
+ int stack_alignment = OS::ActivationFrameAlignment();
+ if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
+ // Stack is already aligned for call, so decrement by alignment
+ // to make room for storing the return address.
+ __ Subu(sp, sp, Operand(stack_alignment));
+ __ sw(ra, MemOperand(sp, 0));
+ __ mov(a0, sp);
+ __ mov(t9, t1);
+ __ Call(t9);
+ __ lw(ra, MemOperand(sp, 0));
+ __ Addu(sp, sp, Operand(stack_alignment));
+ __ Jump(Operand(ra));
+}
+
+
+#undef __
+
+#endif // V8_INTERPRETED_REGEXP
+
+}} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.h b/deps/v8/src/mips/regexp-macro-assembler-mips.h
new file mode 100644
index 000000000..ad7ada547
--- /dev/null
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.h
@@ -0,0 +1,252 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+
+namespace v8 {
+namespace internal {
+
+#ifdef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerMIPS: public RegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerMIPS();
+ virtual ~RegExpMacroAssemblerMIPS();
+};
+#else // V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save);
+ virtual ~RegExpMacroAssemblerMIPS();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(Label* on_at_start);
+ virtual void CheckCharacter(uint32_t c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ virtual void CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ Label* on_no_match);
+ virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
+ virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal);
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual void Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame);
+ private:
+ // Offsets from frame_pointer() of function parameters and stored registers.
+ static const int kFramePointer = 0;
+
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ // Registers s0 to s7, fp, and ra.
+ static const int kStoredRegisters = kFramePointer;
+ // Return address (stored from link register, read into pc on return).
+ static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
+ static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
+ // Stack frame header.
+ static const int kStackFrameHeader = kReturnAddress + kPointerSize;
+ // Stack parameters placed by caller.
+ static const int kRegisterOutput = kStackFrameHeader + 20;
+ static const int kStackHighEnd = kRegisterOutput + kPointerSize;
+ static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kIsolate = kDirectCall + kPointerSize;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kInputEnd = kFramePointer - kPointerSize;
+ static const int kInputStart = kInputEnd - kPointerSize;
+ static const int kStartIndex = kInputStart - kPointerSize;
+ static const int kInputString = kStartIndex - kPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kInputStartMinusOne = kInputString - kPointerSize;
+ static const int kAtStart = kInputStartMinusOne - kPointerSize;
+ // First register address. Following registers are below it on the stack.
+ static const int kRegisterZero = kAtStart - kPointerSize;
+
+ // Initial size of code buffer.
+ static const size_t kRegExpCodeSize = 1024;
+
+ // Load a number of characters at the given offset from the
+ // current position, into the current-character register.
+ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // The ebp-relative location of a regexp register.
+ MemOperand register_location(int register_index);
+
+ // Register holding the current input position as negative offset from
+ // the end of the string.
+ inline Register current_input_offset() { return t2; }
+
+ // The register containing the current character after LoadCurrentCharacter.
+ inline Register current_character() { return t3; }
+
+ // Register holding address of the end of the input string.
+ inline Register end_of_input_address() { return t6; }
+
+ // Register holding the frame address. Local variables, parameters and
+ // regexp registers are addressed relative to this.
+ inline Register frame_pointer() { return fp; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ inline Register backtrack_stackpointer() { return t4; }
+
+ // Register holding pointer to the current code object.
+ inline Register code_pointer() { return t1; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument).
+ inline int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is NULL, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Label* to,
+ Condition condition,
+ Register rs,
+ const Operand& rt);
+
+ // Call and return internally in the generated code in a way that
+ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+ inline void SafeCall(Label* to,
+ Condition cond,
+ Register rs,
+ const Operand& rt);
+ inline void SafeReturn();
+ inline void SafeCallTarget(Label* name);
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // and increments it by a word size.
+ inline void Pop(Register target);
+
+ // Calls a C function and cleans up the frame alignment done by
+ // by FrameAlign. The called function *is* allowed to trigger a garbage
+ // collection, but may not take more than four arguments (no arguments
+ // passed on the stack), and the first argument will be a pointer to the
+ // return address.
+ inline void CallCFunctionUsingStub(ExternalReference function,
+ int num_arguments);
+
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (ASCII or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1).
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+ Label internal_failure_label_;
+};
+
+#endif // V8_INTERPRETED_REGEXP
+
+
+}} // namespace v8::internal
+
+#endif // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+
diff --git a/deps/v8/src/mips/register-allocator-mips-inl.h b/deps/v8/src/mips/register-allocator-mips-inl.h
deleted file mode 100644
index a876bee49..000000000
--- a/deps/v8/src/mips/register-allocator-mips-inl.h
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-#define V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-
-#include "v8.h"
-#include "mips/assembler-mips.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
- // The code for this test relies on the order of register codes.
- return reg.is(cp) || reg.is(s8_fp) || reg.is(sp);
-}
-
-
-int RegisterAllocator::ToNumber(Register reg) {
- ASSERT(reg.is_valid() && !IsReserved(reg));
- const int kNumbers[] = {
- 0, // zero_reg
- 1, // at
- 2, // v0
- 3, // v1
- 4, // a0
- 5, // a1
- 6, // a2
- 7, // a3
- 8, // t0
- 9, // t1
- 10, // t2
- 11, // t3
- 12, // t4
- 13, // t5
- 14, // t
- 15, // t7
- 16, // t8
- 17, // t9
- 18, // s0
- 19, // s1
- 20, // s2
- 21, // s3
- 22, // s4
- 23, // s5
- 24, // s6
- 25, // s7
- 26, // k0
- 27, // k1
- 28, // gp
- 29, // sp
- 30, // s8_fp
- 31, // ra
- };
- return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
- ASSERT(num >= 0 && num < kNumRegisters);
- const Register kRegisters[] = {
- zero_reg,
- at,
- v0,
- v1,
- a0,
- a1,
- a2,
- a3,
- t0,
- t1,
- t2,
- t3,
- t4,
- t5,
- t6,
- t7,
- s0,
- s1,
- s2,
- s3,
- s4,
- s5,
- s6,
- s7,
- t8,
- t9,
- k0,
- k1,
- gp,
- sp,
- s8_fp,
- ra
- };
- return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
- // The non-reserved a1 and ra registers are live on JS function entry.
- Use(a1); // JS function.
- Use(ra); // Return address.
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-
diff --git a/deps/v8/src/mips/register-allocator-mips.cc b/deps/v8/src/mips/register-allocator-mips.cc
deleted file mode 100644
index 2c5d61bee..000000000
--- a/deps/v8/src/mips/register-allocator-mips.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Result::ToRegister(Register target) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
- // No byte registers on MIPS.
- UNREACHABLE();
- return Result();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 59a537324..30e12e75b 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -26,6 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
+#include <math.h>
+#include <limits.h>
#include <cstdarg>
#include "v8.h"
@@ -33,27 +35,29 @@
#include "disasm.h"
#include "assembler.h"
-#include "globals.h" // Need the BitCast
+#include "globals.h" // Need the BitCast.
#include "mips/constants-mips.h"
#include "mips/simulator-mips.h"
-namespace v8i = v8::internal;
-
-#if !defined(__mips) || defined(USE_SIMULATOR)
// Only build the simulator if not compiling for real MIPS hardware.
-namespace assembler {
-namespace mips {
+#if defined(USE_SIMULATOR)
-using ::v8::internal::Object;
-using ::v8::internal::PrintF;
-using ::v8::internal::OS;
-using ::v8::internal::ReadLine;
-using ::v8::internal::DeleteArray;
+namespace v8 {
+namespace internal {
-// Utils functions
+// Utils functions.
bool HaveSameSign(int32_t a, int32_t b) {
- return ((a ^ b) > 0);
+ return ((a ^ b) >= 0);
+}
+
+
+uint32_t get_fcsr_condition_bit(uint32_t cc) {
+ if (cc == 0) {
+ return 23;
+ } else {
+ return 24 + cc;
+ }
}
@@ -63,15 +67,18 @@ bool HaveSameSign(int32_t a, int32_t b) {
// Library does not provide vsscanf.
#define SScanF sscanf // NOLINT
-// The Debugger class is used by the simulator while debugging simulated MIPS
+// The MipsDebugger class is used by the simulator while debugging simulated
// code.
-class Debugger {
+class MipsDebugger {
public:
- explicit Debugger(Simulator* sim);
- ~Debugger();
+ explicit MipsDebugger(Simulator* sim);
+ ~MipsDebugger();
void Stop(Instruction* instr);
void Debug();
+ // Print all registers with a nice formatting.
+ void PrintAllRegs();
+ void PrintAllRegsIncludingFPU();
private:
// We set the breakpoint code to 0xfffff to easily recognize it.
@@ -81,6 +88,10 @@ class Debugger {
Simulator* sim_;
int32_t GetRegisterValue(int regnum);
+ int32_t GetFPURegisterValueInt(int regnum);
+ int64_t GetFPURegisterValueLong(int regnum);
+ float GetFPURegisterValueFloat(int regnum);
+ double GetFPURegisterValueDouble(int regnum);
bool GetValue(const char* desc, int32_t* value);
// Set or delete a breakpoint. Returns true if successful.
@@ -91,18 +102,17 @@ class Debugger {
// execution to skip past breakpoints when run from the debugger.
void UndoBreakpoints();
void RedoBreakpoints();
-
- // Print all registers with a nice formatting.
- void PrintAllRegs();
};
-Debugger::Debugger(Simulator* sim) {
+MipsDebugger::MipsDebugger(Simulator* sim) {
sim_ = sim;
}
-Debugger::~Debugger() {
+
+MipsDebugger::~MipsDebugger() {
}
+
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
@@ -115,36 +125,58 @@ static void InitializeCoverage() {
}
-void Debugger::Stop(Instruction* instr) {
- UNIMPLEMENTED_MIPS();
- char* str = reinterpret_cast<char*>(instr->InstructionBits());
- if (strlen(str) > 0) {
+void MipsDebugger::Stop(Instruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->Bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char** msg_address =
+ reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize);
+ char* msg = *msg_address;
+ ASSERT(msg != NULL);
+
+ // Update this stop description.
+ if (!watched_stops[code].desc) {
+ watched_stops[code].desc = msg;
+ }
+
+ if (strlen(msg) > 0) {
if (coverage_log != NULL) {
fprintf(coverage_log, "%s\n", str);
fflush(coverage_log);
}
- instr->SetInstructionBits(0x0); // Overwrite with nop.
+ // Overwrite the instruction and address with nops.
+ instr->SetInstructionBits(kNopInstr);
+ reinterpret_cast<Instr*>(msg_address)->SetInstructionBits(kNopInstr);
}
- sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+ sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstructionSize);
}
-#else // ndef GENERATED_CODE_COVERAGE
+
+#else // GENERATED_CODE_COVERAGE
#define UNSUPPORTED() printf("Unsupported instruction.\n");
static void InitializeCoverage() {}
-void Debugger::Stop(Instruction* instr) {
- const char* str = reinterpret_cast<char*>(instr->InstructionBits());
- PrintF("Simulator hit %s\n", str);
- sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+void MipsDebugger::Stop(Instruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->Bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg = *reinterpret_cast<char**>(sim_->get_pc() +
+ Instruction::kInstrSize);
+ // Update this stop description.
+ if (!sim_->watched_stops[code].desc) {
+ sim_->watched_stops[code].desc = msg;
+ }
+ PrintF("Simulator hit %s (%u)\n", msg, code);
+ sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
Debug();
}
#endif // GENERATED_CODE_COVERAGE
-int32_t Debugger::GetRegisterValue(int regnum) {
+int32_t MipsDebugger::GetRegisterValue(int regnum) {
if (regnum == kNumSimuRegisters) {
return sim_->get_pc();
} else {
@@ -153,11 +185,54 @@ int32_t Debugger::GetRegisterValue(int regnum) {
}
-bool Debugger::GetValue(const char* desc, int32_t* value) {
+int32_t MipsDebugger::GetFPURegisterValueInt(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register(regnum);
+ }
+}
+
+
+int64_t MipsDebugger::GetFPURegisterValueLong(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_long(regnum);
+ }
+}
+
+
+float MipsDebugger::GetFPURegisterValueFloat(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_float(regnum);
+ }
+}
+
+
+double MipsDebugger::GetFPURegisterValueDouble(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_double(regnum);
+ }
+}
+
+
+bool MipsDebugger::GetValue(const char* desc, int32_t* value) {
int regnum = Registers::Number(desc);
+ int fpuregnum = FPURegisters::Number(desc);
+
if (regnum != kInvalidRegister) {
*value = GetRegisterValue(regnum);
return true;
+ } else if (fpuregnum != kInvalidFPURegister) {
+ *value = GetFPURegisterValueInt(fpuregnum);
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
} else {
return SScanF(desc, "%i", value) == 1;
}
@@ -165,7 +240,7 @@ bool Debugger::GetValue(const char* desc, int32_t* value) {
}
-bool Debugger::SetBreakpoint(Instruction* breakpc) {
+bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != NULL) {
return false;
@@ -180,7 +255,7 @@ bool Debugger::SetBreakpoint(Instruction* breakpc) {
}
-bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
+bool MipsDebugger::DeleteBreakpoint(Instruction* breakpc) {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
@@ -191,32 +266,33 @@ bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
}
-void Debugger::UndoBreakpoints() {
+void MipsDebugger::UndoBreakpoints() {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
-void Debugger::RedoBreakpoints() {
+void MipsDebugger::RedoBreakpoints() {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
-void Debugger::PrintAllRegs() {
+
+void MipsDebugger::PrintAllRegs() {
#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
PrintF("\n");
- // at, v0, a0
+ // at, v0, a0.
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(1), REG_INFO(2), REG_INFO(4));
- // v1, a1
+ // v1, a1.
PrintF("%26s\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
"", REG_INFO(3), REG_INFO(5));
- // a2
+ // a2.
PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(6));
- // a3
+ // a3.
PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(7));
PrintF("\n");
// t0-t7, s0-s7
@@ -225,22 +301,57 @@ void Debugger::PrintAllRegs() {
REG_INFO(8+i), REG_INFO(16+i));
}
PrintF("\n");
- // t8, k0, LO
+ // t8, k0, LO.
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(24), REG_INFO(26), REG_INFO(32));
- // t9, k1, HI
+ // t9, k1, HI.
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(25), REG_INFO(27), REG_INFO(33));
- // sp, fp, gp
+ // sp, fp, gp.
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(29), REG_INFO(30), REG_INFO(28));
- // pc
+ // pc.
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(31), REG_INFO(34));
+
#undef REG_INFO
+#undef FPU_REG_INFO
}
-void Debugger::Debug() {
+
+void MipsDebugger::PrintAllRegsIncludingFPU() {
+#define FPU_REG_INFO(n) FPURegisters::Name(n), FPURegisters::Name(n+1), \
+ GetFPURegisterValueInt(n+1), \
+ GetFPURegisterValueInt(n), \
+ GetFPURegisterValueDouble(n)
+
+ PrintAllRegs();
+
+ PrintF("\n\n");
+ // f0, f1, f2, ... f31.
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(0) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(2) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(4) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(6) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(8) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(10));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(12));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(14));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(16));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(18));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(20));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(22));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(24));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(26));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(28));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(30));
+
+#undef REG_INFO
+#undef FPU_REG_INFO
+}
+
+
+void MipsDebugger::Debug() {
intptr_t last_pc = -1;
bool done = false;
@@ -253,8 +364,9 @@ void Debugger::Debug() {
char cmd[COMMAND_SIZE + 1];
char arg1[ARG_SIZE + 1];
char arg2[ARG_SIZE + 1];
+ char* argv[3] = { cmd, arg1, arg2 };
- // make sure to have a proper terminating character if reaching the limit
+ // Make sure to have a proper terminating character if reaching the limit.
cmd[COMMAND_SIZE] = 0;
arg1[ARG_SIZE] = 0;
arg2[ARG_SIZE] = 0;
@@ -267,10 +379,10 @@ void Debugger::Debug() {
if (last_pc != sim_->get_pc()) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
+ // Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer,
- reinterpret_cast<byte_*>(sim_->get_pc()));
+ reinterpret_cast<byte*>(sim_->get_pc()));
PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.start());
last_pc = sim_->get_pc();
}
@@ -280,19 +392,21 @@ void Debugger::Debug() {
} else {
// Use sscanf to parse the individual parts of the command line. At the
// moment no command expects more than two parameters.
- int args = SScanF(line,
+ int argc = SScanF(line,
"%" XSTR(COMMAND_SIZE) "s "
"%" XSTR(ARG_SIZE) "s "
"%" XSTR(ARG_SIZE) "s",
cmd, arg1, arg2);
if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
- if (!(reinterpret_cast<Instruction*>(sim_->get_pc())->IsTrap())) {
+ Instruction* instr = reinterpret_cast<Instruction*>(sim_->get_pc());
+ if (!(instr->IsTrap()) ||
+ instr->InstructionBits() == rtCallRedirInstr) {
sim_->InstructionDecode(
- reinterpret_cast<Instruction*>(sim_->get_pc()));
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
} else {
// Allow si to jump over generated breakpoints.
PrintF("/!\\ Jumping over generated breakpoint.\n");
- sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+ sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
}
} else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
// Execute the one instruction we broke at with breakpoints disabled.
@@ -300,23 +414,65 @@ void Debugger::Debug() {
// Leave the debugger shell.
done = true;
} else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
- if (args == 2) {
+ if (argc == 2) {
int32_t value;
+ float fvalue;
if (strcmp(arg1, "all") == 0) {
PrintAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ PrintAllRegsIncludingFPU();
} else {
- if (GetValue(arg1, &value)) {
+ int regnum = Registers::Number(arg1);
+ int fpuregnum = FPURegisters::Number(arg1);
+
+ if (regnum != kInvalidRegister) {
+ value = GetRegisterValue(regnum);
PrintF("%s: 0x%08x %d \n", arg1, value, value);
+ } else if (fpuregnum != kInvalidFPURegister) {
+ if (fpuregnum % 2 == 1) {
+ value = GetFPURegisterValueInt(fpuregnum);
+ fvalue = GetFPURegisterValueFloat(fpuregnum);
+ PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
+ } else {
+ double dfvalue;
+ int32_t lvalue1 = GetFPURegisterValueInt(fpuregnum);
+ int32_t lvalue2 = GetFPURegisterValueInt(fpuregnum + 1);
+ dfvalue = GetFPURegisterValueDouble(fpuregnum);
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n",
+ FPURegisters::Name(fpuregnum+1),
+ FPURegisters::Name(fpuregnum),
+ lvalue1,
+ lvalue2,
+ dfvalue);
+ }
} else {
PrintF("%s unrecognized\n", arg1);
}
}
} else {
- PrintF("print <register>\n");
+ if (argc == 3) {
+ if (strcmp(arg2, "single") == 0) {
+ int32_t value;
+ float fvalue;
+ int fpuregnum = FPURegisters::Number(arg1);
+
+ if (fpuregnum != kInvalidFPURegister) {
+ value = GetFPURegisterValueInt(fpuregnum);
+ fvalue = GetFPURegisterValueFloat(fpuregnum);
+ PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("print <fpu register> single\n");
+ }
+ } else {
+ PrintF("print <register> or print <fpu register> single\n");
+ }
}
} else if ((strcmp(cmd, "po") == 0)
|| (strcmp(cmd, "printobject") == 0)) {
- if (args == 2) {
+ if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
@@ -333,45 +489,106 @@ void Debugger::Debug() {
} else {
PrintF("printobject <value>\n");
}
- } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0)) {
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int32_t* cur = NULL;
+ int32_t* end = NULL;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
+ } else { // Command "mem".
+ int32_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int32_t*>(value);
+ next_arg++;
+ }
+
+ int32_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else if (argc == next_arg + 1) {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ PrintF(" 0x%08x: 0x%08x %10d",
+ reinterpret_cast<intptr_t>(cur), *cur, *cur);
+ HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+ int value = *cur;
+ Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ if (current_heap->Contains(obj) || ((value & 1) == 0)) {
+ PrintF(" (");
+ if ((value & 1) == 0) {
+ PrintF("smi %d", value / 2);
+ } else {
+ obj->ShortPrint();
+ }
+ PrintF(")");
+ }
+ PrintF("\n");
+ cur++;
+ }
+
+ } else if ((strcmp(cmd, "disasm") == 0) ||
+ (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
+ // Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
- byte_* cur = NULL;
- byte_* end = NULL;
-
- if (args == 1) {
- cur = reinterpret_cast<byte_*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstructionSize);
- } else if (args == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte_*>(value);
- // no length parameter passed, assume 10 instructions
- end = cur + (10 * Instruction::kInstructionSize);
+ byte* cur = NULL;
+ byte* end = NULL;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * Instruction::kInstrSize);
+ } else if (argc == 2) {
+ int regnum = Registers::Number(arg1);
+ if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * Instruction::kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * Instruction::kInstrSize);
+ }
}
} else {
int32_t value1;
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
- cur = reinterpret_cast<byte_*>(value1);
- end = cur + (value2 * Instruction::kInstructionSize);
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * Instruction::kInstrSize);
}
}
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n", cur, buffer.start());
- cur += Instruction::kInstructionSize;
+ PrintF(" 0x%08x %s\n",
+ reinterpret_cast<intptr_t>(cur), buffer.start());
+ cur += Instruction::kInstrSize;
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
v8::internal::OS::DebugBreak();
PrintF("regaining control from gdb\n");
} else if (strcmp(cmd, "break") == 0) {
- if (args == 2) {
+ if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
@@ -389,44 +606,104 @@ void Debugger::Debug() {
}
} else if (strcmp(cmd, "flags") == 0) {
PrintF("No flags on MIPS !\n");
- } else if (strcmp(cmd, "unstop") == 0) {
- PrintF("Unstop command not implemented on MIPS.");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int32_t value;
+ intptr_t stop_pc = sim_->get_pc() -
+ 2 * Instruction::kInstrSize;
+ Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+ Instruction* msg_address =
+ reinterpret_cast<Instruction*>(stop_pc +
+ Instruction::kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->IsStopInstruction(stop_instr)) {
+ stop_instr->SetInstructionBits(kNopInstr);
+ msg_address->SetInstructionBits(kNopInstr);
+ } else {
+ PrintF("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ PrintF("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->PrintStopInfo(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->PrintStopInfo(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->EnableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->EnableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->DisableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->DisableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ PrintF("Wrong usage. Use help command for more information.\n");
+ }
} else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
- // Print registers and disassemble
+ // Print registers and disassemble.
PrintAllRegs();
PrintF("\n");
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
+ // Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
- byte_* cur = NULL;
- byte_* end = NULL;
+ byte* cur = NULL;
+ byte* end = NULL;
- if (args == 1) {
- cur = reinterpret_cast<byte_*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstructionSize);
- } else if (args == 2) {
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * Instruction::kInstrSize);
+ } else if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte_*>(value);
+ cur = reinterpret_cast<byte*>(value);
// no length parameter passed, assume 10 instructions
- end = cur + (10 * Instruction::kInstructionSize);
+ end = cur + (10 * Instruction::kInstrSize);
}
} else {
int32_t value1;
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
- cur = reinterpret_cast<byte_*>(value1);
- end = cur + (value2 * Instruction::kInstructionSize);
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * Instruction::kInstrSize);
}
}
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n", cur, buffer.start());
- cur += Instruction::kInstructionSize;
+ PrintF(" 0x%08x %s\n",
+ reinterpret_cast<intptr_t>(cur), buffer.start());
+ cur += Instruction::kInstrSize;
}
} else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
PrintF("cont\n");
@@ -438,20 +715,43 @@ void Debugger::Debug() {
PrintF(" use register name 'all' to print all registers\n");
PrintF("printobject <register>\n");
PrintF(" print an object from a register (alias 'po')\n");
+ PrintF("stack [<words>]\n");
+ PrintF(" dump stack content, default dump 10 words)\n");
+ PrintF("mem <address> [<words>]\n");
+ PrintF(" dump memory content, default dump 10 words)\n");
PrintF("flags\n");
PrintF(" print flags\n");
PrintF("disasm [<instructions>]\n");
- PrintF("disasm [[<address>] <instructions>]\n");
- PrintF(" disassemble code, default is 10 instructions from pc\n");
+ PrintF("disasm [<address/register>]\n");
+ PrintF("disasm [[<address/register>] <instructions>]\n");
+ PrintF(" disassemble code, default is 10 instructions\n");
+ PrintF(" from pc (alias 'di')\n");
PrintF("gdb\n");
PrintF(" enter gdb\n");
PrintF("break <address>\n");
PrintF(" set a break point on the address\n");
PrintF("del\n");
PrintF(" delete the breakpoint\n");
- PrintF("unstop\n");
- PrintF(" ignore the stop instruction at the current location");
- PrintF(" from now on\n");
+ PrintF("stop feature:\n");
+ PrintF(" Description:\n");
+ PrintF(" Stops are debug instructions inserted by\n");
+ PrintF(" the Assembler::stop() function.\n");
+ PrintF(" When hitting a stop, the Simulator will\n");
+ PrintF(" stop and and give control to the Debugger.\n");
+ PrintF(" All stop codes are watched:\n");
+ PrintF(" - They can be enabled / disabled: the Simulator\n");
+ PrintF(" will / won't stop when hitting them.\n");
+ PrintF(" - The Simulator keeps track of how many times they \n");
+ PrintF(" are met. (See the info command.) Going over a\n");
+ PrintF(" disabled stop still increases its counter. \n");
+ PrintF(" Commands:\n");
+ PrintF(" stop info all/<code> : print infos about number <code>\n");
+ PrintF(" or all stop(s).\n");
+ PrintF(" stop enable/disable all/<code> : enables / disables\n");
+ PrintF(" all or number <code> stop(s)\n");
+ PrintF(" stop unstop\n");
+ PrintF(" ignore the stop instruction at the current location\n");
+ PrintF(" from now on\n");
} else {
PrintF("Unknown command: %s\n", cmd);
}
@@ -471,29 +771,120 @@ void Debugger::Debug() {
}
-// Create one simulator per thread and keep it in thread local storage.
-static v8::internal::Thread::LocalStorageKey simulator_key;
+static bool ICacheMatch(void* one, void* two) {
+ ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
+ ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+ return one == two;
+}
+
+
+static uint32_t ICacheHash(void* key) {
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
+}
+
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
-bool Simulator::initialized_ = false;
+void Simulator::FlushICache(v8::internal::HashMap* i_cache,
+ void* start_addr,
+ size_t size) {
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePage(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ ASSERT_EQ(0, start & CachePage::kPageMask);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePage(i_cache, start, size);
+ }
+}
-void Simulator::Initialize() {
- if (initialized_) return;
- simulator_key = v8::internal::Thread::CreateThreadLocalKey();
- initialized_ = true;
- ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
+CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
+ v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
+ ICacheHash(page),
+ true);
+ if (entry->value == NULL) {
+ CachePage* new_page = new CachePage();
+ entry->value = new_page;
+ }
+ return reinterpret_cast<CachePage*>(entry->value);
+}
+
+
+// Flush from start up to and not including start + size.
+void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
+ intptr_t start,
+ int size) {
+ ASSERT(size <= CachePage::kPageSize);
+ ASSERT(AllOnOnePage(start, size - 1));
+ ASSERT((start & CachePage::kLineMask) == 0);
+ ASSERT((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* valid_bytemap = cache_page->ValidityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+
+void Simulator::CheckICache(v8::internal::HashMap* i_cache,
+ Instruction* instr) {
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* cache_valid_byte = cache_page->ValidityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ CHECK(memcmp(reinterpret_cast<void*>(instr),
+ cache_page->CachedData(offset),
+ Instruction::kInstrSize) == 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
}
-Simulator::Simulator() {
- Initialize();
+void Simulator::Initialize(Isolate* isolate) {
+ if (isolate->simulator_initialized()) return;
+ isolate->set_simulator_initialized(true);
+ ::v8::internal::ExternalReference::set_redirector(isolate,
+ &RedirectExternalReference);
+}
+
+
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
+ i_cache_ = isolate_->simulator_i_cache();
+ if (i_cache_ == NULL) {
+ i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+ isolate_->set_simulator_i_cache(i_cache_);
+ }
+ Initialize(isolate);
// Setup simulator support first. Some of this information is needed to
// setup the architecture state.
- size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
- stack_ = reinterpret_cast<char*>(malloc(stack_size));
+ stack_ = reinterpret_cast<char*>(malloc(stack_size_));
pc_modified_ = false;
icount_ = 0;
+ break_count_ = 0;
break_pc_ = NULL;
break_instr_ = 0;
@@ -502,16 +893,23 @@ Simulator::Simulator() {
for (int i = 0; i < kNumSimuRegisters; i++) {
registers_[i] = 0;
}
+ for (int i = 0; i < kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+ FCSR_ = 0;
// The sp is initialized to point to the bottom (high address) of the
// allocated stack area. To be safe in potential stack underflows we leave
// some buffer below.
- registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64;
+ registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size_ - 64;
// The ra and pc are initialized to a known bad value that will cause an
// access violation if the simulator ever tries to execute it.
registers_[pc] = bad_ra;
registers_[ra] = bad_ra;
InitializeCoverage();
+ for (int i = 0; i < kNumExceptions; i++) {
+ exceptions[i] = 0;
+ }
}
@@ -524,12 +922,18 @@ Simulator::Simulator() {
// offset from the swi instruction so the simulator knows what to call.
class Redirection {
public:
- Redirection(void* external_function, bool fp_return)
+ Redirection(void* external_function, ExternalReference::Type type)
: external_function_(external_function),
swi_instruction_(rtCallRedirInstr),
- fp_return_(fp_return),
- next_(list_) {
- list_ = this;
+ type_(type),
+ next_(NULL) {
+ Isolate* isolate = Isolate::Current();
+ next_ = isolate->simulator_redirection();
+ Simulator::current(isolate)->
+ FlushICache(isolate->simulator_i_cache(),
+ reinterpret_cast<void*>(&swi_instruction_),
+ Instruction::kInstrSize);
+ isolate->set_simulator_redirection(this);
}
void* address_of_swi_instruction() {
@@ -537,14 +941,16 @@ class Redirection {
}
void* external_function() { return external_function_; }
- bool fp_return() { return fp_return_; }
+ ExternalReference::Type type() { return type_; }
- static Redirection* Get(void* external_function, bool fp_return) {
- Redirection* current;
- for (current = list_; current != NULL; current = current->next_) {
+ static Redirection* Get(void* external_function,
+ ExternalReference::Type type) {
+ Isolate* isolate = Isolate::Current();
+ Redirection* current = isolate->simulator_redirection();
+ for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) return current;
}
- return new Redirection(external_function, fp_return);
+ return new Redirection(external_function, type);
}
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@@ -557,31 +963,30 @@ class Redirection {
private:
void* external_function_;
uint32_t swi_instruction_;
- bool fp_return_;
+ ExternalReference::Type type_;
Redirection* next_;
- static Redirection* list_;
};
-Redirection* Redirection::list_ = NULL;
-
-
void* Simulator::RedirectExternalReference(void* external_function,
- bool fp_return) {
- Redirection* redirection = Redirection::Get(external_function, fp_return);
+ ExternalReference::Type type) {
+ Redirection* redirection = Redirection::Get(external_function, type);
return redirection->address_of_swi_instruction();
}
// Get the active Simulator for the current thread.
-Simulator* Simulator::current() {
- Initialize();
- Simulator* sim = reinterpret_cast<Simulator*>(
- v8::internal::Thread::GetThreadLocal(simulator_key));
+Simulator* Simulator::current(Isolate* isolate) {
+ v8::internal::Isolate::PerIsolateThreadData* isolate_data =
+ isolate->FindOrAllocatePerThreadDataForThisThread();
+ ASSERT(isolate_data != NULL);
+ ASSERT(isolate_data != NULL);
+
+ Simulator* sim = isolate_data->simulator();
if (sim == NULL) {
- // TODO(146): delete the simulator object when a thread goes away.
- sim = new Simulator();
- v8::internal::Thread::SetThreadLocal(simulator_key, sim);
+ // TODO(146): delete the simulator object when a thread/isolate goes away.
+ sim = new Simulator(isolate);
+ isolate_data->set_simulator(sim);
}
return sim;
}
@@ -595,18 +1000,26 @@ void Simulator::set_register(int reg, int32_t value) {
pc_modified_ = true;
}
- // zero register always hold 0.
+ // Zero register always holds 0.
registers_[reg] = (reg == 0) ? 0 : value;
}
+
void Simulator::set_fpu_register(int fpureg, int32_t value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
FPUregisters_[fpureg] = value;
}
+
+void Simulator::set_fpu_register_float(int fpureg, float value) {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ *BitCast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+
void Simulator::set_fpu_register_double(int fpureg, double value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- *v8i::BitCast<double*>(&FPUregisters_[fpureg]) = value;
+ *BitCast<double*>(&FPUregisters_[fpureg]) = value;
}
@@ -620,22 +1033,171 @@ int32_t Simulator::get_register(int reg) const {
return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
}
+
int32_t Simulator::get_fpu_register(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
return FPUregisters_[fpureg];
}
+
+int64_t Simulator::get_fpu_register_long(int fpureg) const {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+ return *BitCast<int64_t*>(
+ const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+
+float Simulator::get_fpu_register_float(int fpureg) const {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return *BitCast<float*>(
+ const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+
double Simulator::get_fpu_register_double(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- return *v8i::BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
+ return *BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
}
+
+// For use in calls that take two double values, constructed either
+// from a0-a3 or f12 and f14.
+void Simulator::GetFpArgs(double* x, double* y) {
+ if (!IsMipsSoftFloatABI) {
+ *x = get_fpu_register_double(12);
+ *y = get_fpu_register_double(14);
+ } else {
+ // We use a char buffer to get around the strict-aliasing rules which
+ // otherwise allow the compiler to optimize away the copy.
+ char buffer[sizeof(*x)];
+ int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+
+ // Registers a0 and a1 -> x.
+ reg_buffer[0] = get_register(a0);
+ reg_buffer[1] = get_register(a1);
+ memcpy(x, buffer, sizeof(buffer));
+
+ // Registers a2 and a3 -> y.
+ reg_buffer[0] = get_register(a2);
+ reg_buffer[1] = get_register(a3);
+ memcpy(y, buffer, sizeof(buffer));
+ }
+}
+
+
+// For use in calls that take one double value, constructed either
+// from a0 and a1 or f12.
+void Simulator::GetFpArgs(double* x) {
+ if (!IsMipsSoftFloatABI) {
+ *x = get_fpu_register_double(12);
+ } else {
+ // We use a char buffer to get around the strict-aliasing rules which
+ // otherwise allow the compiler to optimize away the copy.
+ char buffer[sizeof(*x)];
+ int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+ // Registers a0 and a1 -> x.
+ reg_buffer[0] = get_register(a0);
+ reg_buffer[1] = get_register(a1);
+ memcpy(x, buffer, sizeof(buffer));
+ }
+}
+
+
+// For use in calls that take one double value constructed either
+// from a0 and a1 or f12 and one integer value.
+void Simulator::GetFpArgs(double* x, int32_t* y) {
+ if (!IsMipsSoftFloatABI) {
+ *x = get_fpu_register_double(12);
+ *y = get_register(a2);
+ } else {
+ // We use a char buffer to get around the strict-aliasing rules which
+ // otherwise allow the compiler to optimize away the copy.
+ char buffer[sizeof(*x)];
+ int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+ // Registers 0 and 1 -> x.
+ reg_buffer[0] = get_register(a0);
+ reg_buffer[1] = get_register(a1);
+ memcpy(x, buffer, sizeof(buffer));
+
+ // Register 2 -> y.
+ reg_buffer[0] = get_register(a2);
+ memcpy(y, buffer, sizeof(*y));
+ }
+}
+
+
+// The return value is either in v0/v1 or f0.
+void Simulator::SetFpResult(const double& result) {
+ if (!IsMipsSoftFloatABI) {
+ set_fpu_register_double(0, result);
+ } else {
+ char buffer[2 * sizeof(registers_[0])];
+ int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+ memcpy(buffer, &result, sizeof(buffer));
+ // Copy result to v0 and v1.
+ set_register(v0, reg_buffer[0]);
+ set_register(v1, reg_buffer[1]);
+ }
+}
+
+
+// Helper functions for setting and testing the FCSR register's bits.
+void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
+ if (value) {
+ FCSR_ |= (1 << cc);
+ } else {
+ FCSR_ &= ~(1 << cc);
+ }
+}
+
+
+bool Simulator::test_fcsr_bit(uint32_t cc) {
+ return FCSR_ & (1 << cc);
+}
+
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round_error(double original, double rounded) {
+ bool ret = false;
+
+ if (!isfinite(original) || !isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded > INT_MAX || rounded < INT_MIN) {
+ set_fcsr_bit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+
// Raw access to the PC register.
void Simulator::set_pc(int32_t value) {
pc_modified_ = true;
registers_[pc] = value;
}
+
+bool Simulator::has_bad_pc() const {
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+
// Raw access to the PC register without the special adjustment when reading.
int32_t Simulator::get_pc() const {
return registers_[pc];
@@ -651,24 +1213,40 @@ int32_t Simulator::get_pc() const {
// get the correct MIPS-like behaviour on unaligned accesses.
int Simulator::ReadW(int32_t addr, Instruction* instr) {
- if ((addr & v8i::kPointerAlignmentMask) == 0) {
+ if (addr >=0 && addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ MipsDebugger dbg(this);
+ dbg.Debug();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
}
- PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
- OS::Abort();
+ PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MipsDebugger dbg(this);
+ dbg.Debug();
return 0;
}
void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
- if ((addr & v8i::kPointerAlignmentMask) == 0) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ MipsDebugger dbg(this);
+ dbg.Debug();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
return;
}
- PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
- OS::Abort();
+ PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MipsDebugger dbg(this);
+ dbg.Debug();
}
@@ -677,7 +1255,9 @@ double Simulator::ReadD(int32_t addr, Instruction* instr) {
double* ptr = reinterpret_cast<double*>(addr);
return *ptr;
}
- PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned (double) read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
return 0;
}
@@ -689,7 +1269,9 @@ void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
*ptr = value;
return;
}
- PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned (double) write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
}
@@ -699,7 +1281,9 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
}
- PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
return 0;
}
@@ -710,7 +1294,9 @@ int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
}
- PrintF("Unaligned signed halfword read at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned signed halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
return 0;
}
@@ -722,7 +1308,9 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
*ptr = value;
return;
}
- PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
}
@@ -733,7 +1321,9 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
*ptr = value;
return;
}
- PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
}
@@ -746,7 +1336,7 @@ uint32_t Simulator::ReadBU(int32_t addr) {
int32_t Simulator::ReadB(int32_t addr) {
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- return ((*ptr << 24) >> 24) & 0xff;
+ return *ptr;
}
@@ -773,7 +1363,7 @@ uintptr_t Simulator::StackLimit() const {
// Unsupported instructions use Format to print an error and stop execution.
void Simulator::Format(Instruction* instr, const char* format) {
PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
- instr, format);
+ reinterpret_cast<intptr_t>(instr), format);
UNIMPLEMENTED_MIPS();
}
@@ -782,19 +1372,36 @@ void Simulator::Format(Instruction* instr, const char* format) {
// Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair which is essentially two 32-bit values stuffed into a
// 64-bit value. With the code below we assume that all runtime calls return
-// 64 bits of result. If they don't, the r1 result register contains a bogus
+// 64 bits of result. If they don't, the v1 result register contains a bogus
// value, which is fine because it is caller-saved.
typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg1,
int32_t arg2,
- int32_t arg3);
-typedef double (*SimulatorRuntimeFPCall)(double fparg0,
- double fparg1);
-
+ int32_t arg3,
+ int32_t arg4,
+ int32_t arg5);
+typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
+ int32_t arg1,
+ int32_t arg2,
+ int32_t arg3);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+
+// This signature supports direct call to accessor getter callback.
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
+ int32_t arg1);
// Software interrupt instructions are used by the simulator to call into the
-// C-based V8 runtime.
+// C-based V8 runtime. They are also used for debugging with simulator.
void Simulator::SoftwareInterrupt(Instruction* instr) {
+ // There are several instructions that could get us here,
+ // the break_ instruction, or several variants of traps. All
+ // Are "SPECIAL" class opcode, and are distinuished by function.
+ int32_t func = instr->FunctionFieldRaw();
+ uint32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1;
+
// We first check if we met a call_rt_redirected.
if (instr->InstructionBits() == rtCallRedirInstr) {
Redirection* redirection = Redirection::FromSwiInstruction(instr);
@@ -802,55 +1409,257 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
int32_t arg1 = get_register(a1);
int32_t arg2 = get_register(a2);
int32_t arg3 = get_register(a3);
- // fp args are (not always) in f12 and f14.
- // See MIPS conventions for more details.
- double fparg0 = get_fpu_register_double(f12);
- double fparg1 = get_fpu_register_double(f14);
+ int32_t arg4 = 0;
+ int32_t arg5 = 0;
+
+ // Need to check if sp is valid before assigning arg4, arg5.
+ // This is a fix for cctest test-api/CatchStackOverflow which causes
+ // the stack to overflow. For some reason arm doesn't need this
+ // stack check here.
+ int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
+ int32_t* stack = reinterpret_cast<int32_t*>(stack_);
+ if (stack_pointer >= stack && stack_pointer < stack + stack_size_ - 5) {
+ // Args 4 and 5 are on the stack after the reserved space for args 0..3.
+ arg4 = stack_pointer[4];
+ arg5 = stack_pointer[5];
+ }
+
+ bool fp_call =
+ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+
+ if (!IsMipsSoftFloatABI) {
+ // With the hard floating point calling convention, double
+ // arguments are passed in FPU registers. Fetch the arguments
+ // from there and call the builtin using soft floating point
+ // convention.
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ arg0 = get_fpu_register(f12);
+ arg1 = get_fpu_register(f13);
+ arg2 = get_fpu_register(f14);
+ arg3 = get_fpu_register(f15);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ arg0 = get_fpu_register(f12);
+ arg1 = get_fpu_register(f13);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ arg0 = get_fpu_register(f12);
+ arg1 = get_fpu_register(f13);
+ arg2 = get_register(a2);
+ break;
+ default:
+ break;
+ }
+ }
+
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_ra = get_register(ra);
- if (redirection->fp_return()) {
- intptr_t external =
+
+ intptr_t external =
reinterpret_cast<intptr_t>(redirection->external_function());
+
+ // Based on CpuFeatures::IsSupported(FPU), Mips will use either hardware
+ // FPU, or gcc soft-float routines. Hardware FPU is simulated in this
+ // simulator. Soft-float has additional abstraction of ExternalReference,
+ // to support serialization.
+ if (fp_call) {
SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ double dval0, dval1;
+ int32_t ival;
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ GetFpArgs(&dval0, &dval1);
+ PrintF("Call to host function at %p with args %f, %f",
+ FUNCTION_ADDR(target), dval0, dval1);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ GetFpArgs(&dval0);
+ PrintF("Call to host function at %p with arg %f",
+ FUNCTION_ADDR(target), dval0);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ GetFpArgs(&dval0, &ival);
+ PrintF("Call to host function at %p with args %f, %d",
+ FUNCTION_ADDR(target), dval0, ival);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ double result = target(arg0, arg1, arg2, arg3);
+ if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
+ SetFpResult(result);
+ } else {
+ int32_t gpreg_pair[2];
+ memcpy(&gpreg_pair[0], &result, 2 * sizeof(int32_t));
+ set_register(v0, gpreg_pair[0]);
+ set_register(v1, gpreg_pair[1]);
+ }
+ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ // See DirectCEntryStub::GenerateCall for explanation of register usage.
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p with args %f, %f\n",
- FUNCTION_ADDR(target), fparg0, fparg1);
+ PrintF("Call to host function at %p args %08x\n",
+ FUNCTION_ADDR(target), arg1);
}
- double result = target(fparg0, fparg1);
- set_fpu_register_double(f0, result);
+ v8::Handle<v8::Value> result = target(arg1);
+ *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
+ set_register(v0, arg0);
+ } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ // See DirectCEntryStub::GenerateCall for explanation of register usage.
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x %08x\n",
+ FUNCTION_ADDR(target), arg1, arg2);
+ }
+ v8::Handle<v8::Value> result = target(arg1, arg2);
+ *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
+ set_register(v0, arg0);
} else {
- intptr_t external =
- reinterpret_cast<int32_t>(redirection->external_function());
SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
+ reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim) {
PrintF(
- "Call to host function at %p with args %08x, %08x, %08x, %08x\n",
+ "Call to host function at %p "
+ "args %08x, %08x, %08x, %08x, %08x, %08x\n",
FUNCTION_ADDR(target),
arg0,
arg1,
arg2,
- arg3);
+ arg3,
+ arg4,
+ arg5);
}
- int64_t result = target(arg0, arg1, arg2, arg3);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", lo_res);
- }
- set_register(v0, lo_res);
- set_register(v1, hi_res);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ set_register(v0, static_cast<int32_t>(result));
+ set_register(v1, static_cast<int32_t>(result >> 32));
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08x : %08x\n", get_register(v1), get_register(v0));
}
set_register(ra, saved_ra);
set_pc(get_register(ra));
+
+ } else if (func == BREAK && code <= kMaxStopCode) {
+ if (IsWatchpoint(code)) {
+ PrintWatchpoint(code);
+ } else {
+ IncreaseStopCounter(code);
+ HandleStop(code, instr);
+ }
} else {
- Debugger dbg(this);
+ // All remaining break_ codes, and all traps are handled here.
+ MipsDebugger dbg(this);
dbg.Debug();
}
}
+
+// Stop helper functions.
+bool Simulator::IsWatchpoint(uint32_t code) {
+ return (code <= kMaxWatchpointCode);
+}
+
+
+void Simulator::PrintWatchpoint(uint32_t code) {
+ MipsDebugger dbg(this);
+ ++break_count_;
+ PrintF("\n---- break %d marker: %3d (instr count: %8d) ----------"
+ "----------------------------------",
+ code, break_count_, icount_);
+ dbg.PrintAllRegs(); // Print registers and continue running.
+}
+
+
+void Simulator::HandleStop(uint32_t code, Instruction* instr) {
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (IsEnabledStop(code)) {
+ MipsDebugger dbg(this);
+ dbg.Stop(instr);
+ } else {
+ set_pc(get_pc() + 2 * Instruction::kInstrSize);
+ }
+}
+
+
+bool Simulator::IsStopInstruction(Instruction* instr) {
+ int32_t func = instr->FunctionFieldRaw();
+ uint32_t code = static_cast<uint32_t>(instr->Bits(25, 6));
+ return (func == BREAK) && code > kMaxWatchpointCode && code <= kMaxStopCode;
+}
+
+
+bool Simulator::IsEnabledStop(uint32_t code) {
+ ASSERT(code <= kMaxStopCode);
+ ASSERT(code > kMaxWatchpointCode);
+ return !(watched_stops[code].count & kStopDisabledBit);
+}
+
+
+void Simulator::EnableStop(uint32_t code) {
+ if (!IsEnabledStop(code)) {
+ watched_stops[code].count &= ~kStopDisabledBit;
+ }
+}
+
+
+void Simulator::DisableStop(uint32_t code) {
+ if (IsEnabledStop(code)) {
+ watched_stops[code].count |= kStopDisabledBit;
+ }
+}
+
+
+void Simulator::IncreaseStopCounter(uint32_t code) {
+ ASSERT(code <= kMaxStopCode);
+ if ((watched_stops[code].count & ~(1 << 31)) == 0x7fffffff) {
+ PrintF("Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n", code);
+ watched_stops[code].count = 0;
+ EnableStop(code);
+ } else {
+ watched_stops[code].count++;
+ }
+}
+
+
+// Print a stop status.
+void Simulator::PrintStopInfo(uint32_t code) {
+ if (code <= kMaxWatchpointCode) {
+ PrintF("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watched_stops[code].count & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watched_stops[code].desc) {
+ PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
+ code, code, state, count, watched_stops[code].desc);
+ } else {
+ PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n",
+ code, code, state, count);
+ }
+ }
+}
+
+
void Simulator::SignalExceptions() {
for (int i = 1; i < kNumExceptions; i++) {
if (exceptions[i] != 0) {
@@ -859,51 +1668,52 @@ void Simulator::SignalExceptions() {
}
}
-// Handle execution based on instruction types.
-void Simulator::DecodeTypeRegister(Instruction* instr) {
- // Instruction fields
- Opcode op = instr->OpcodeFieldRaw();
- int32_t rs_reg = instr->RsField();
- int32_t rs = get_register(rs_reg);
- uint32_t rs_u = static_cast<uint32_t>(rs);
- int32_t rt_reg = instr->RtField();
- int32_t rt = get_register(rt_reg);
- uint32_t rt_u = static_cast<uint32_t>(rt);
- int32_t rd_reg = instr->RdField();
- uint32_t sa = instr->SaField();
- int32_t fs_reg= instr->FsField();
-
- // ALU output
- // It should not be used as is. Instructions using it should always initialize
- // it first.
- int32_t alu_out = 0x12345678;
- // Output or temporary for floating point.
- double fp_out = 0.0;
-
- // For break and trap instructions.
- bool do_interrupt = false;
-
- // For jr and jalr
- // Get current pc.
- int32_t current_pc = get_pc();
- // Next pc
- int32_t next_pc = 0;
+// Handle execution based on instruction types.
- // ---------- Configuration
+void Simulator::ConfigureTypeRegister(Instruction* instr,
+ int32_t& alu_out,
+ int64_t& i64hilo,
+ uint64_t& u64hilo,
+ int32_t& next_pc,
+ bool& do_interrupt) {
+ // Every local variable declared here needs to be const.
+ // This is to make sure that changed values are sent back to
+ // DecodeTypeRegister correctly.
+
+ // Instruction fields.
+ const Opcode op = instr->OpcodeFieldRaw();
+ const int32_t rs_reg = instr->RsValue();
+ const int32_t rs = get_register(rs_reg);
+ const uint32_t rs_u = static_cast<uint32_t>(rs);
+ const int32_t rt_reg = instr->RtValue();
+ const int32_t rt = get_register(rt_reg);
+ const uint32_t rt_u = static_cast<uint32_t>(rt);
+ const int32_t rd_reg = instr->RdValue();
+ const uint32_t sa = instr->SaValue();
+
+ const int32_t fs_reg = instr->FsValue();
+
+
+ // ---------- Configuration.
switch (op) {
- case COP1: // Coprocessor instructions
+ case COP1: // Coprocessor instructions.
switch (instr->RsFieldRaw()) {
- case BC1: // branch on coprocessor condition
+ case BC1: // Handled in DecodeTypeImmed, should never come here.
UNREACHABLE();
break;
+ case CFC1:
+ // At the moment only FCSR is supported.
+ ASSERT(fs_reg == kFCSRRegister);
+ alu_out = FCSR_;
+ break;
case MFC1:
alu_out = get_fpu_register(fs_reg);
break;
case MFHC1:
- fp_out = get_fpu_register_double(fs_reg);
- alu_out = *v8i::BitCast<int32_t*>(&fp_out);
+ UNIMPLEMENTED_MIPS();
break;
+ case CTC1:
case MTC1:
case MTHC1:
// Do the store in the execution step.
@@ -923,13 +1733,22 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case JR:
case JALR:
- next_pc = get_register(instr->RsField());
+ next_pc = get_register(instr->RsValue());
break;
case SLL:
alu_out = rt << sa;
break;
case SRL:
- alu_out = rt_u >> sa;
+ if (rs_reg == 0) {
+ // Regular logical right shift of a word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = rt_u >> sa;
+ } else {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of SRL instruction, added in MIPS32 Release 2.
+ // RS field is equal to 00001.
+ alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
+ }
break;
case SRA:
alu_out = rt >> sa;
@@ -938,7 +1757,16 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
alu_out = rt << rs;
break;
case SRLV:
- alu_out = rt_u >> rs;
+ if (sa == 0) {
+ // Regular logical right-shift of a word by a variable number of
+ // bits instruction. SA field is always equal to 0.
+ alu_out = rt_u >> rs;
+ } else {
+ // Logical right-rotate of a word by a variable number of bits.
+ // This is special case od SRLV instruction, added in MIPS32
+ // Release 2. SA field is equal to 00001.
+ alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
+ }
break;
case SRAV:
alu_out = rt >> rs;
@@ -950,14 +1778,10 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
alu_out = get_register(LO);
break;
case MULT:
- UNIMPLEMENTED_MIPS();
+ i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
break;
case MULTU:
- UNIMPLEMENTED_MIPS();
- break;
- case DIV:
- case DIVU:
- exceptions[kDivideByZero] = rt == 0;
+ u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
break;
case ADD:
if (HaveSameSign(rs, rt)) {
@@ -1003,8 +1827,9 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
case SLTU:
alu_out = rs_u < rt_u ? 1 : 0;
break;
- // Break and trap instructions
+ // Break and trap instructions.
case BREAK:
+
do_interrupt = true;
break;
case TGE:
@@ -1025,6 +1850,15 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
case TNE:
do_interrupt = rs != rt;
break;
+ case MOVN:
+ case MOVZ:
+ case MOVCI:
+ // No action taken on decode.
+ break;
+ case DIV:
+ case DIVU:
+ // div and divu never raise exceptions.
+ break;
default:
UNREACHABLE();
};
@@ -1034,43 +1868,130 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
case MUL:
alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
break;
+ case CLZ:
+ alu_out = __builtin_clz(rs_u);
+ break;
default:
UNREACHABLE();
- }
+ };
+ break;
+ case SPECIAL3:
+ switch (instr->FunctionFieldRaw()) {
+ case INS: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 1;
+ uint32_t mask = (1 << size) - 1;
+ alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
+ break;
+ }
+ case EXT: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 1;
+ uint32_t mask = (1 << size) - 1;
+ alu_out = (rs_u & (mask << lsb)) >> lsb;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ };
break;
default:
UNREACHABLE();
};
+}
+
+
+void Simulator::DecodeTypeRegister(Instruction* instr) {
+ // Instruction fields.
+ const Opcode op = instr->OpcodeFieldRaw();
+ const int32_t rs_reg = instr->RsValue();
+ const int32_t rs = get_register(rs_reg);
+ const uint32_t rs_u = static_cast<uint32_t>(rs);
+ const int32_t rt_reg = instr->RtValue();
+ const int32_t rt = get_register(rt_reg);
+ const uint32_t rt_u = static_cast<uint32_t>(rt);
+ const int32_t rd_reg = instr->RdValue();
+
+ const int32_t fs_reg = instr->FsValue();
+ const int32_t ft_reg = instr->FtValue();
+ const int32_t fd_reg = instr->FdValue();
+ int64_t i64hilo = 0;
+ uint64_t u64hilo = 0;
+
+ // ALU output.
+ // It should not be used as is. Instructions using it should always
+ // initialize it first.
+ int32_t alu_out = 0x12345678;
+
+ // For break and trap instructions.
+ bool do_interrupt = false;
+
+ // For jr and jalr.
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Next pc
+ int32_t next_pc = 0;
+
+ // Setup the variables if needed before executing the instruction.
+ ConfigureTypeRegister(instr,
+ alu_out,
+ i64hilo,
+ u64hilo,
+ next_pc,
+ do_interrupt);
// ---------- Raise exceptions triggered.
SignalExceptions();
- // ---------- Execution
+ // ---------- Execution.
switch (op) {
case COP1:
switch (instr->RsFieldRaw()) {
- case BC1: // branch on coprocessor condition
+ case BC1: // Branch on coprocessor condition.
UNREACHABLE();
break;
+ case CFC1:
+ set_register(rt_reg, alu_out);
case MFC1:
- case MFHC1:
set_register(rt_reg, alu_out);
break;
+ case MFHC1:
+ UNIMPLEMENTED_MIPS();
+ break;
+ case CTC1:
+ // At the moment only FCSR is supported.
+ ASSERT(fs_reg == kFCSRRegister);
+ FCSR_ = registers_[rt_reg];
+ break;
case MTC1:
- // We don't need to set the higher bits to 0, because MIPS ISA says
- // they are in an unpredictable state after executing MTC1.
FPUregisters_[fs_reg] = registers_[rt_reg];
- FPUregisters_[fs_reg+1] = Unpredictable;
break;
case MTHC1:
- // Here we need to keep the lower bits unchanged.
- FPUregisters_[fs_reg+1] = registers_[rt_reg];
+ UNIMPLEMENTED_MIPS();
break;
case S:
+ float f;
switch (instr->FunctionFieldRaw()) {
case CVT_D_S:
+ f = get_fpu_register_float(fs_reg);
+ set_fpu_register_double(fd_reg, static_cast<double>(f));
+ break;
case CVT_W_S:
case CVT_L_S:
+ case TRUNC_W_S:
+ case TRUNC_L_S:
+ case ROUND_W_S:
+ case ROUND_L_S:
+ case FLOOR_W_S:
+ case FLOOR_L_S:
+ case CEIL_W_S:
+ case CEIL_L_S:
case CVT_PS_S:
UNIMPLEMENTED_MIPS();
break;
@@ -1079,10 +2000,138 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
}
break;
case D:
+ double ft, fs;
+ uint32_t cc, fcsr_cc;
+ int64_t i64;
+ fs = get_fpu_register_double(fs_reg);
+ ft = get_fpu_register_double(ft_reg);
+ cc = instr->FCccValue();
+ fcsr_cc = get_fcsr_condition_bit(cc);
switch (instr->FunctionFieldRaw()) {
- case CVT_S_D:
- case CVT_W_D:
- case CVT_L_D:
+ case ADD_D:
+ set_fpu_register_double(fd_reg, fs + ft);
+ break;
+ case SUB_D:
+ set_fpu_register_double(fd_reg, fs - ft);
+ break;
+ case MUL_D:
+ set_fpu_register_double(fd_reg, fs * ft);
+ break;
+ case DIV_D:
+ set_fpu_register_double(fd_reg, fs / ft);
+ break;
+ case ABS_D:
+ set_fpu_register_double(fd_reg, fs < 0 ? -fs : fs);
+ break;
+ case MOV_D:
+ set_fpu_register_double(fd_reg, fs);
+ break;
+ case NEG_D:
+ set_fpu_register_double(fd_reg, -fs);
+ break;
+ case SQRT_D:
+ set_fpu_register_double(fd_reg, sqrt(fs));
+ break;
+ case C_UN_D:
+ set_fcsr_bit(fcsr_cc, isnan(fs) || isnan(ft));
+ break;
+ case C_EQ_D:
+ set_fcsr_bit(fcsr_cc, (fs == ft));
+ break;
+ case C_UEQ_D:
+ set_fcsr_bit(fcsr_cc, (fs == ft) || (isnan(fs) || isnan(ft)));
+ break;
+ case C_OLT_D:
+ set_fcsr_bit(fcsr_cc, (fs < ft));
+ break;
+ case C_ULT_D:
+ set_fcsr_bit(fcsr_cc, (fs < ft) || (isnan(fs) || isnan(ft)));
+ break;
+ case C_OLE_D:
+ set_fcsr_bit(fcsr_cc, (fs <= ft));
+ break;
+ case C_ULE_D:
+ set_fcsr_bit(fcsr_cc, (fs <= ft) || (isnan(fs) || isnan(ft)));
+ break;
+ case CVT_W_D: // Convert double to word.
+ // Rounding modes are not yet supported.
+ ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ case ROUND_W_D: // Round double to word.
+ {
+ double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ }
+ break;
+ case TRUNC_W_D: // Truncate double to word (round towards 0).
+ {
+ double rounded = trunc(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ }
+ break;
+ case FLOOR_W_D: // Round double to word towards negative infinity.
+ {
+ double rounded = floor(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ }
+ break;
+ case CEIL_W_D: // Round double to word towards positive infinity.
+ {
+ double rounded = ceil(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ }
+ break;
+ case CVT_S_D: // Convert double to float (single).
+ set_fpu_register_float(fd_reg, static_cast<float>(fs));
+ break;
+ case CVT_L_D: { // Mips32r2: Truncate double to 64-bit long-word.
+ double rounded = trunc(fs);
+ i64 = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, i64 & 0xffffffff);
+ set_fpu_register(fd_reg + 1, i64 >> 32);
+ break;
+ }
+ case TRUNC_L_D: { // Mips32r2 instruction.
+ double rounded = trunc(fs);
+ i64 = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, i64 & 0xffffffff);
+ set_fpu_register(fd_reg + 1, i64 >> 32);
+ break;
+ }
+ case ROUND_L_D: { // Mips32r2 instruction.
+ double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+ i64 = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, i64 & 0xffffffff);
+ set_fpu_register(fd_reg + 1, i64 >> 32);
+ break;
+ }
+ case FLOOR_L_D: // Mips32r2 instruction.
+ i64 = static_cast<int64_t>(floor(fs));
+ set_fpu_register(fd_reg, i64 & 0xffffffff);
+ set_fpu_register(fd_reg + 1, i64 >> 32);
+ break;
+ case CEIL_L_D: // Mips32r2 instruction.
+ i64 = static_cast<int64_t>(ceil(fs));
+ set_fpu_register(fd_reg, i64 & 0xffffffff);
+ set_fpu_register(fd_reg + 1, i64 >> 32);
+ break;
+ case C_F_D:
UNIMPLEMENTED_MIPS();
break;
default:
@@ -1091,11 +2140,13 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
case W:
switch (instr->FunctionFieldRaw()) {
- case CVT_S_W:
- UNIMPLEMENTED_MIPS();
+ case CVT_S_W: // Convert word to float (single).
+ alu_out = get_fpu_register(fs_reg);
+ set_fpu_register_float(fd_reg, static_cast<float>(alu_out));
break;
case CVT_D_W: // Convert word to double.
- set_fpu_register(rd_reg, static_cast<double>(rs));
+ alu_out = get_fpu_register(fs_reg);
+ set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
break;
default:
UNREACHABLE();
@@ -1103,8 +2154,14 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
case L:
switch (instr->FunctionFieldRaw()) {
+ case CVT_D_L: // Mips32r2 instruction.
+ // Watch the signs here, we want 2 32-bit vals
+ // to make a sign-64.
+ i64 = (uint32_t) get_fpu_register(fs_reg);
+ i64 |= ((int64_t) get_fpu_register(fs_reg + 1) << 32);
+ set_fpu_register_double(fd_reg, static_cast<double>(i64));
+ break;
case CVT_S_L:
- case CVT_D_L:
UNIMPLEMENTED_MIPS();
break;
default:
@@ -1121,7 +2178,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case JR: {
Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
- current_pc+Instruction::kInstructionSize);
+ current_pc+Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
set_pc(next_pc);
pc_modified_ = true;
@@ -1129,27 +2186,38 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
}
case JALR: {
Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
- current_pc+Instruction::kInstructionSize);
+ current_pc+Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
- set_register(31, current_pc + 2* Instruction::kInstructionSize);
+ set_register(31, current_pc + 2 * Instruction::kInstrSize);
set_pc(next_pc);
pc_modified_ = true;
break;
}
// Instructions using HI and LO registers.
case MULT:
+ set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(i64hilo >> 32));
+ break;
case MULTU:
+ set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(u64hilo >> 32));
break;
case DIV:
- // Divide by zero was checked in the configuration step.
- set_register(LO, rs / rt);
- set_register(HI, rs % rt);
+ // Divide by zero was not checked in the configuration step - div and
+ // divu do not raise exceptions. On division by 0, the result will
+ // be UNPREDICTABLE.
+ if (rt != 0) {
+ set_register(LO, rs / rt);
+ set_register(HI, rs % rt);
+ }
break;
case DIVU:
- set_register(LO, rs_u / rt_u);
- set_register(HI, rs_u % rt_u);
+ if (rt_u != 0) {
+ set_register(LO, rs_u / rt_u);
+ set_register(HI, rs_u % rt_u);
+ }
break;
- // Break and trap instructions
+ // Break and trap instructions.
case BREAK:
case TGE:
case TGEU:
@@ -1161,6 +2229,23 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
SoftwareInterrupt(instr);
}
break;
+ // Conditional moves.
+ case MOVN:
+ if (rt) set_register(rd_reg, rs);
+ break;
+ case MOVCI: {
+ uint32_t cc = instr->FBccValue();
+ uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
+ if (instr->Bit(16)) { // Read Tf bit.
+ if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
+ } else {
+ if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
+ }
+ break;
+ }
+ case MOVZ:
+ if (!rt) set_register(rd_reg, rs);
+ break;
default: // For other special opcodes we do the default operation.
set_register(rd_reg, alu_out);
};
@@ -1173,9 +2258,23 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
set_register(LO, Unpredictable);
set_register(HI, Unpredictable);
break;
+ default: // For other special2 opcodes we do the default operation.
+ set_register(rd_reg, alu_out);
+ }
+ break;
+ case SPECIAL3:
+ switch (instr->FunctionFieldRaw()) {
+ case INS:
+ // Ins instr leaves result in Rt, rather than Rd.
+ set_register(rt_reg, alu_out);
+ break;
+ case EXT:
+ // Ext instr leaves result in Rt, rather than Rd.
+ set_register(rt_reg, alu_out);
+ break;
default:
UNREACHABLE();
- }
+ };
break;
// Unimplemented opcodes raised an error in the configuration step before,
// so we can use the default here to set the destination register in common
@@ -1185,22 +2284,22 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
};
}
-// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq)
+
+// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq).
void Simulator::DecodeTypeImmediate(Instruction* instr) {
- // Instruction fields
+ // Instruction fields.
Opcode op = instr->OpcodeFieldRaw();
- int32_t rs = get_register(instr->RsField());
+ int32_t rs = get_register(instr->RsValue());
uint32_t rs_u = static_cast<uint32_t>(rs);
- int32_t rt_reg = instr->RtField(); // destination register
+ int32_t rt_reg = instr->RtValue(); // Destination register.
int32_t rt = get_register(rt_reg);
- int16_t imm16 = instr->Imm16Field();
+ int16_t imm16 = instr->Imm16Value();
- int32_t ft_reg = instr->FtField(); // destination register
- int32_t ft = get_register(ft_reg);
+ int32_t ft_reg = instr->FtValue(); // Destination register.
- // zero extended immediate
+ // Zero extended immediate.
uint32_t oe_imm16 = 0xffff & imm16;
- // sign extended immediate
+ // Sign extended immediate.
int32_t se_imm16 = imm16;
// Get current pc.
@@ -1208,31 +2307,44 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Next pc.
int32_t next_pc = bad_ra;
- // Used for conditional branch instructions
+ // Used for conditional branch instructions.
bool do_branch = false;
bool execute_branch_delay_instruction = false;
- // Used for arithmetic instructions
+ // Used for arithmetic instructions.
int32_t alu_out = 0;
- // Floating point
+ // Floating point.
double fp_out = 0.0;
+ uint32_t cc, cc_value, fcsr_cc;
- // Used for memory instructions
+ // Used for memory instructions.
int32_t addr = 0x0;
+ // Value to be written in memory.
+ uint32_t mem_value = 0x0;
- // ---------- Configuration (and execution for REGIMM)
+ // ---------- Configuration (and execution for REGIMM).
switch (op) {
- // ------------- COP1. Coprocessor instructions
+ // ------------- COP1. Coprocessor instructions.
case COP1:
switch (instr->RsFieldRaw()) {
- case BC1: // branch on coprocessor condition
- UNIMPLEMENTED_MIPS();
+ case BC1: // Branch on coprocessor condition.
+ cc = instr->FBccValue();
+ fcsr_cc = get_fcsr_condition_bit(cc);
+ cc_value = test_fcsr_bit(fcsr_cc);
+ do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
break;
default:
UNREACHABLE();
};
break;
- // ------------- REGIMM class
+ // ------------- REGIMM class.
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
@@ -1257,9 +2369,9 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case BGEZAL:
// Branch instructions common part.
execute_branch_delay_instruction = true;
- // Set next_pc
+ // Set next_pc.
if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstructionSize;
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
if (instr->IsLinkingInstruction()) {
set_register(31, current_pc + kBranchReturnOffset);
}
@@ -1269,8 +2381,8 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
default:
break;
};
- break; // case REGIMM
- // ------------- Branch instructions
+ break; // case REGIMM.
+ // ------------- Branch instructions.
// When comparing to zero, the encoding of rt field is always 0, so we don't
// need to replace rt with zero.
case BEQ:
@@ -1285,7 +2397,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case BGTZ:
do_branch = rs > 0;
break;
- // ------------- Arithmetic instructions
+ // ------------- Arithmetic instructions.
case ADDI:
if (HaveSameSign(rs, se_imm16)) {
if (rs > 0) {
@@ -1318,11 +2430,26 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case LUI:
alu_out = (oe_imm16 << 16);
break;
- // ------------- Memory instructions
+ // ------------- Memory instructions.
case LB:
addr = rs + se_imm16;
alu_out = ReadB(addr);
break;
+ case LH:
+ addr = rs + se_imm16;
+ alu_out = ReadH(addr, instr);
+ break;
+ case LWL: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = (1 << byte_shift * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = ReadW(addr, instr);
+ alu_out <<= byte_shift * 8;
+ alu_out |= rt & mask;
+ break;
+ }
case LW:
addr = rs + se_imm16;
alu_out = ReadW(addr, instr);
@@ -1331,12 +2458,47 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
addr = rs + se_imm16;
alu_out = ReadBU(addr);
break;
+ case LHU:
+ addr = rs + se_imm16;
+ alu_out = ReadHU(addr, instr);
+ break;
+ case LWR: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = ReadW(addr, instr);
+ alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
+ alu_out |= rt & mask;
+ break;
+ }
case SB:
addr = rs + se_imm16;
break;
+ case SH:
+ addr = rs + se_imm16;
+ break;
+ case SWL: {
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = ReadW(addr, instr) & mask;
+ mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
+ break;
+ }
case SW:
addr = rs + se_imm16;
break;
+ case SWR: {
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint32_t mask = (1 << al_offset * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = ReadW(addr, instr);
+ mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ break;
+ }
case LWC1:
addr = rs + se_imm16;
alu_out = ReadW(addr, instr);
@@ -1356,26 +2518,26 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// ---------- Raise exceptions triggered.
SignalExceptions();
- // ---------- Execution
+ // ---------- Execution.
switch (op) {
- // ------------- Branch instructions
+ // ------------- Branch instructions.
case BEQ:
case BNE:
case BLEZ:
case BGTZ:
// Branch instructions common part.
execute_branch_delay_instruction = true;
- // Set next_pc
+ // Set next_pc.
if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstructionSize;
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + 2* Instruction::kInstructionSize);
+ set_register(31, current_pc + 2* Instruction::kInstrSize);
}
} else {
- next_pc = current_pc + 2 * Instruction::kInstructionSize;
+ next_pc = current_pc + 2 * Instruction::kInstrSize;
}
break;
- // ------------- Arithmetic instructions
+ // ------------- Arithmetic instructions.
case ADDI:
case ADDIU:
case SLTI:
@@ -1386,18 +2548,31 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case LUI:
set_register(rt_reg, alu_out);
break;
- // ------------- Memory instructions
+ // ------------- Memory instructions.
case LB:
+ case LH:
+ case LWL:
case LW:
case LBU:
+ case LHU:
+ case LWR:
set_register(rt_reg, alu_out);
break;
case SB:
WriteB(addr, static_cast<int8_t>(rt));
break;
+ case SH:
+ WriteH(addr, static_cast<uint16_t>(rt), instr);
+ break;
+ case SWL:
+ WriteW(addr, mem_value, instr);
+ break;
case SW:
WriteW(addr, rt, instr);
break;
+ case SWR:
+ WriteW(addr, mem_value, instr);
+ break;
case LWC1:
set_fpu_register(ft_reg, alu_out);
break;
@@ -1410,7 +2585,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
break;
case SDC1:
addr = rs + se_imm16;
- WriteD(addr, ft, instr);
+ WriteD(addr, get_fpu_register_double(ft_reg), instr);
break;
default:
break;
@@ -1422,7 +2597,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// We don't check for end_sim_pc. First it should not be met as the current
// pc is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc+Instruction::kInstructionSize);
+ reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
}
@@ -1432,42 +2607,47 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
}
}
-// Type 3: instructions using a 26 bytes immediate. (eg: j, jal)
+
+// Type 3: instructions using a 26 bytes immediate. (eg: j, jal).
void Simulator::DecodeTypeJump(Instruction* instr) {
// Get current pc.
int32_t current_pc = get_pc();
// Get unchanged bits of pc.
int32_t pc_high_bits = current_pc & 0xf0000000;
- // Next pc
- int32_t next_pc = pc_high_bits | (instr->Imm26Field() << 2);
+ // Next pc.
+ int32_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
- // Execute branch delay slot
+ // Execute branch delay slot.
// We don't check for end_sim_pc. First it should not be met as the current pc
// is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc+Instruction::kInstructionSize);
+ reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
// Update pc and ra if necessary.
// Do this after the branch delay execution.
if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + 2* Instruction::kInstructionSize);
+ set_register(31, current_pc + 2 * Instruction::kInstrSize);
}
set_pc(next_pc);
pc_modified_ = true;
}
+
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
+ if (v8::internal::FLAG_check_icache) {
+ CheckICache(isolate_->simulator_i_cache(), instr);
+ }
pc_modified_ = false;
if (::v8::internal::FLAG_trace_sim) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
+ // Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer,
- reinterpret_cast<byte_*>(instr));
- PrintF(" 0x%08x %s\n", instr, buffer.start());
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
+ PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr),
+ buffer.start());
}
switch (instr->InstructionType()) {
@@ -1485,7 +2665,7 @@ void Simulator::InstructionDecode(Instruction* instr) {
}
if (!pc_modified_) {
set_register(pc, reinterpret_cast<int32_t>(instr) +
- Instruction::kInstructionSize);
+ Instruction::kInstrSize);
}
}
@@ -1511,7 +2691,7 @@ void Simulator::Execute() {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
- Debugger dbg(this);
+ MipsDebugger dbg(this);
dbg.Debug();
} else {
InstructionDecode(instr);
@@ -1522,10 +2702,10 @@ void Simulator::Execute() {
}
-int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
va_list parameters;
va_start(parameters, argument_count);
- // Setup arguments
+ // Setup arguments.
// First four arguments passed in registers.
ASSERT(argument_count >= 4);
@@ -1538,7 +2718,7 @@ int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
int original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
- - kArgsSlotsSize);
+ - kCArgsSlotsSize);
if (OS::ActivationFrameAlignment() != 0) {
entry_stack &= -OS::ActivationFrameAlignment();
}
@@ -1550,7 +2730,7 @@ int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
va_end(parameters);
set_register(sp, entry_stack);
- // Prepare to execute the code at entry
+ // Prepare to execute the code at entry.
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
// when the PC reaches this value. By saving the "end simulation" value into
@@ -1586,7 +2766,7 @@ int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
set_register(gp, callee_saved_value);
set_register(fp, callee_saved_value);
- // Start the simulation
+ // Start the simulation.
Execute();
// Check that the callee-saved registers have been preserved.
@@ -1643,8 +2823,8 @@ uintptr_t Simulator::PopAddress() {
#undef UNSUPPORTED
-} } // namespace assembler::mips
+} } // namespace v8::internal
-#endif // !__mips || USE_SIMULATOR
+#endif // USE_SIMULATOR
#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 6e42683a2..69dddfad3 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -37,19 +37,40 @@
#define V8_MIPS_SIMULATOR_MIPS_H_
#include "allocation.h"
+#include "constants-mips.h"
-#if defined(__mips) && !defined(USE_SIMULATOR)
+#if !defined(USE_SIMULATOR)
+// Running without a simulator on a native mips platform.
+
+namespace v8 {
+namespace internal {
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- entry(p0, p1, p2, p3, p4);
+ entry(p0, p1, p2, p3, p4)
+
+typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
+ void*, int*, Address, int, Isolate*);
+
+
+// Call the generated regexp code directly. The code at the entry address
+// should act as a function matching the type arm_regexp_matcher.
+// The fifth argument is a dummy that reserves the space used for
+// the return address added by the ExitFrame in native calls.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
+ p0, p1, p2, p3, NULL, p4, p5, p6, p7))
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ reinterpret_cast<TryCatch*>(try_catch_address)
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
// just use the C stack limit.
class SimulatorStack : public v8::internal::AllStatic {
public:
- static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+ static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+ uintptr_t c_limit) {
return c_limit;
}
@@ -60,6 +81,8 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline void UnregisterCTryCatch() { }
};
+} } // namespace v8::internal
+
// Calculated the stack limit beyond which we will throw stack overflow errors.
// This macro must be called from a C++ method. It relies on being able to take
// the address of "this" to get a value on the current execution stack and then
@@ -70,39 +93,51 @@ class SimulatorStack : public v8::internal::AllStatic {
(reinterpret_cast<uintptr_t>(this) >= limit ? \
reinterpret_cast<uintptr_t>(this) - limit : 0)
-// Call the generated regexp code directly. The entry function pointer should
-// expect seven int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- entry(p0, p1, p2, p3, p4, p5, p6)
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- reinterpret_cast<TryCatch*>(try_catch_address)
+#else // !defined(USE_SIMULATOR)
+// Running with a simulator.
+#include "hashmap.h"
+#include "assembler.h"
-#else // #if !defined(__mips) || defined(USE_SIMULATOR)
+namespace v8 {
+namespace internal {
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(\
- assembler::mips::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
- p0, p1, p2, p3, p4))
+// -----------------------------------------------------------------------------
+// Utility functions
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- assembler::mips::Simulator::current()->Call(\
- FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() {
+ memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
+ }
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == NULL ? \
- NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+ char* ValidityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+ char* CachedData(int offset) {
+ return &data_[offset];
+ }
-namespace assembler {
-namespace mips {
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
class Simulator {
public:
- friend class Debugger;
+ friend class MipsDebugger;
// Registers are declared in order. See SMRL chapter 2.
enum Register {
@@ -119,7 +154,7 @@ class Simulator {
sp,
s8,
ra,
- // LO, HI, and pc
+ // LO, HI, and pc.
LO,
HI,
pc, // pc must be the last register.
@@ -132,29 +167,35 @@ class Simulator {
// Generated code will always use doubles. So we will only use even registers.
enum FPURegister {
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
- f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters
+ f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters.
f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
f26, f27, f28, f29, f30, f31,
kNumFPURegisters
};
- Simulator();
+ explicit Simulator(Isolate* isolate);
~Simulator();
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
- static Simulator* current();
+ static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state. Reading the pc value adheres to the MIPS
// architecture specification and is off by a 8 from the currently executing
// instruction.
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
- // Same for FPURegisters
+ // Same for FPURegisters.
void set_fpu_register(int fpureg, int32_t value);
+ void set_fpu_register_float(int fpureg, float value);
void set_fpu_register_double(int fpureg, double value);
int32_t get_fpu_register(int fpureg) const;
+ int64_t get_fpu_register_long(int fpureg) const;
+ float get_fpu_register_float(int fpureg) const;
double get_fpu_register_double(int fpureg) const;
+ void set_fcsr_bit(uint32_t cc, bool value);
+ bool test_fcsr_bit(uint32_t cc);
+ bool set_fcsr_round_error(double original, double rounded);
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
@@ -167,12 +208,12 @@ class Simulator {
void Execute();
// Call on program start.
- static void Initialize();
+ static void Initialize(Isolate* isolate);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
- int32_t Call(byte_* entry, int argument_count, ...);
+ int32_t Call(byte* entry, int argument_count, ...);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -180,6 +221,14 @@ class Simulator {
// Pop an address from the JS stack.
uintptr_t PopAddress();
+ // ICache checking.
+ static void FlushICache(v8::internal::HashMap* i_cache, void* start,
+ size_t size);
+
+ // Returns true if pc register contains one of the 'special_values' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
@@ -223,15 +272,35 @@ class Simulator {
inline int32_t SetDoubleHIW(double* addr);
inline int32_t SetDoubleLOW(double* addr);
-
// Executing is handled based on the instruction type.
void DecodeTypeRegister(Instruction* instr);
+
+ // Helper function for DecodeTypeRegister.
+ void ConfigureTypeRegister(Instruction* instr,
+ int32_t& alu_out,
+ int64_t& i64hilo,
+ uint64_t& u64hilo,
+ int32_t& next_pc,
+ bool& do_interrupt);
+
void DecodeTypeImmediate(Instruction* instr);
void DecodeTypeJump(Instruction* instr);
// Used for breakpoints and traps.
void SoftwareInterrupt(Instruction* instr);
+ // Stop helper functions.
+ bool IsWatchpoint(uint32_t code);
+ void PrintWatchpoint(uint32_t code);
+ void HandleStop(uint32_t code, Instruction* instr);
+ bool IsStopInstruction(Instruction* instr);
+ bool IsEnabledStop(uint32_t code);
+ void EnableStop(uint32_t code);
+ void DisableStop(uint32_t code);
+ void IncreaseStopCounter(uint32_t code);
+ void PrintStopInfo(uint32_t code);
+
+
// Executes one instruction.
void InstructionDecode(Instruction* instr);
// Execute one instruction placed in a branch delay slot.
@@ -239,11 +308,17 @@ class Simulator {
if (instr->IsForbiddenInBranchDelay()) {
V8_Fatal(__FILE__, __LINE__,
"Eror:Unexpected %i opcode in a branch delay slot.",
- instr->OpcodeField());
+ instr->OpcodeValue());
}
InstructionDecode(instr);
}
+ // ICache.
+ static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
+ static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+ int size);
+ static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+
enum Exception {
none,
kIntegerOverflow,
@@ -258,30 +333,68 @@ class Simulator {
// Runtime call support.
static void* RedirectExternalReference(void* external_function,
- bool fp_return);
+ ExternalReference::Type type);
+
+ // For use in calls that take double value arguments.
+ void GetFpArgs(double* x, double* y);
+ void GetFpArgs(double* x);
+ void GetFpArgs(double* x, int32_t* y);
+ void SetFpResult(const double& result);
- // Used for real time calls that takes two double values as arguments and
- // returns a double.
- void SetFpResult(double result);
// Architecture state.
// Registers.
int32_t registers_[kNumSimuRegisters];
// Coprocessor Registers.
int32_t FPUregisters_[kNumFPURegisters];
+ // FPU control register.
+ uint32_t FCSR_;
// Simulator support.
+ // Allocate 1MB for stack.
+ static const size_t stack_size_ = 1 * 1024*1024;
char* stack_;
bool pc_modified_;
int icount_;
- static bool initialized_;
+ int break_count_;
+
+ // Icache simulation.
+ v8::internal::HashMap* i_cache_;
+
+ v8::internal::Isolate* isolate_;
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1 << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watched_stops[code].count is unset.
+ // The value watched_stops[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count;
+ char* desc;
+ };
+ StopCountAndDesc watched_stops[kMaxStopCode + 1];
};
-} } // namespace assembler::mips
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
+ FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ Simulator::current(Isolate::Current())->Call( \
+ entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ try_catch_address == NULL ? \
+ NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
// The simulator has its own stack. Thus it has a different stack limit from
@@ -291,21 +404,22 @@ class Simulator {
// trouble down the line.
class SimulatorStack : public v8::internal::AllStatic {
public:
- static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
- return assembler::mips::Simulator::current()->StackLimit();
+ static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+ uintptr_t c_limit) {
+ return Simulator::current(isolate)->StackLimit();
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- assembler::mips::Simulator* sim = assembler::mips::Simulator::current();
+ Simulator* sim = Simulator::current(Isolate::Current());
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch() {
- assembler::mips::Simulator::current()->PopAddress();
+ Simulator::current(Isolate::Current())->PopAddress();
}
};
-#endif // !defined(__mips) || defined(USE_SIMULATOR)
+} } // namespace v8::internal
+#endif // !defined(USE_SIMULATOR)
#endif // V8_MIPS_SIMULATOR_MIPS_H_
-
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 683b8626e..3e5a0091c 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_MIPS)
#include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "stub-cache.h"
namespace v8 {
@@ -39,6 +39,124 @@ namespace internal {
#define __ ACCESS_MASM(masm)
+static void ProbeTable(Isolate* isolate,
+ MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register name,
+ Register offset,
+ Register scratch,
+ Register scratch2) {
+ ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+ ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+
+ uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
+ uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
+
+ // Check the relative positions of the address fields.
+ ASSERT(value_off_addr > key_off_addr);
+ ASSERT((value_off_addr - key_off_addr) % 4 == 0);
+ ASSERT((value_off_addr - key_off_addr) < (256 * 4));
+
+ Label miss;
+ Register offsets_base_addr = scratch;
+
+ // Check that the key in the entry matches the name.
+ __ li(offsets_base_addr, Operand(key_offset));
+ __ sll(scratch2, offset, 1);
+ __ addu(scratch2, offsets_base_addr, scratch2);
+ __ lw(scratch2, MemOperand(scratch2));
+ __ Branch(&miss, ne, name, Operand(scratch2));
+
+ // Get the code entry from the cache.
+ __ Addu(offsets_base_addr, offsets_base_addr,
+ Operand(value_off_addr - key_off_addr));
+ __ sll(scratch2, offset, 1);
+ __ addu(scratch2, offsets_base_addr, scratch2);
+ __ lw(scratch2, MemOperand(scratch2));
+
+ // Check that the flags match what we're looking for.
+ __ lw(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
+ __ And(scratch2, scratch2, Operand(~Code::kFlagsNotUsedInLookup));
+ __ Branch(&miss, ne, scratch2, Operand(flags));
+
+ // Re-load code entry from cache.
+ __ sll(offset, offset, 1);
+ __ addu(offset, offset, offsets_base_addr);
+ __ lw(offset, MemOperand(offset));
+
+ // Jump to the first instruction in the code stub.
+ __ Addu(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(offset);
+
+ // Miss: fall through.
+ __ bind(&miss);
+}
+
+
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a symbol and receiver must be a heap object.
+MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
+ MacroAssembler* masm,
+ Label* miss_label,
+ Register receiver,
+ String* name,
+ Register scratch0,
+ Register scratch1) {
+ ASSERT(name->IsSymbol());
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+ __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+ Label done;
+
+ const int kInterceptorOrAccessCheckNeededMask =
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+ // Bail out if the receiver has a named interceptor or requires access checks.
+ Register map = scratch1;
+ __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ And(at, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+ __ Branch(miss_label, ne, at, Operand(zero_reg));
+
+
+ // Check that receiver is a JSObject.
+ __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Load properties array.
+ Register properties = scratch0;
+ __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ // Check that the properties array is a dictionary.
+ __ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+ Register tmp = properties;
+ __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
+ __ Branch(miss_label, ne, map, Operand(tmp));
+
+ // Restore the temporarily used register.
+ __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+ MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
+ masm,
+ miss_label,
+ &done,
+ receiver,
+ properties,
+ name,
+ scratch1);
+ if (result->IsFailure()) return result;
+
+ __ bind(&done);
+ __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+ return result;
+}
+
+
void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
@@ -46,14 +164,96 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register scratch,
Register extra,
Register extra2) {
- UNIMPLEMENTED_MIPS();
+ Isolate* isolate = masm->isolate();
+ Label miss;
+
+ // Make sure that code is valid. The shifting code relies on the
+ // entry size being 8.
+ ASSERT(sizeof(Entry) == 8);
+
+ // Make sure the flags does not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ ASSERT(!scratch.is(receiver));
+ ASSERT(!scratch.is(name));
+ ASSERT(!extra.is(receiver));
+ ASSERT(!extra.is(name));
+ ASSERT(!extra.is(scratch));
+ ASSERT(!extra2.is(receiver));
+ ASSERT(!extra2.is(name));
+ ASSERT(!extra2.is(scratch));
+ ASSERT(!extra2.is(extra));
+
+ // Check scratch, extra and extra2 registers are valid.
+ ASSERT(!scratch.is(no_reg));
+ ASSERT(!extra.is(no_reg));
+ ASSERT(!extra2.is(no_reg));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss, t0);
+
+ // Get the map of the receiver and compute the hash.
+ __ lw(scratch, FieldMemOperand(name, String::kHashFieldOffset));
+ __ lw(t8, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Addu(scratch, scratch, Operand(t8));
+ __ Xor(scratch, scratch, Operand(flags));
+ __ And(scratch,
+ scratch,
+ Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+
+ // Probe the primary table.
+ ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ Subu(scratch, scratch, Operand(name));
+ __ Addu(scratch, scratch, Operand(flags));
+ __ And(scratch,
+ scratch,
+ Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+
+ // Probe the secondary table.
+ ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
}
void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
- UNIMPLEMENTED_MIPS();
+ // Load the global or builtins object from the current context.
+ __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Load the global context from the global or builtins object.
+ __ lw(prototype,
+ FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
+ // Load the function from the global context.
+ __ lw(prototype, MemOperand(prototype, Context::SlotOffset(index)));
+ // Load the initial map. The global functions all have initial maps.
+ __ lw(prototype,
+ FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the prototype from the initial map.
+ __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ Isolate* isolate = masm->isolate();
+ // Check we're still in the same context.
+ __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ ASSERT(!prototype.is(at));
+ __ li(at, isolate->global());
+ __ Branch(miss, ne, prototype, Operand(at));
+ // Get the global function with the given index.
+ JSFunction* function =
+ JSFunction::cast(isolate->global_context()->get(index));
+ // Load its initial map. The global functions all have initial maps.
+ __ li(prototype, Handle<Map>(function->initial_map()));
+ // Load the prototype from the initial map.
+ __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
}
@@ -63,7 +263,18 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst, Register src,
JSObject* holder, int index) {
- UNIMPLEMENTED_MIPS();
+ // Adjust for the number of properties stored in the holder.
+ index -= holder->map()->inobject_properties();
+ if (index < 0) {
+ // Get the property straight out of the holder.
+ int offset = holder->map()->instance_size() + (index * kPointerSize);
+ __ lw(dst, FieldMemOperand(src, offset));
+ } else {
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+ __ lw(dst, FieldMemOperand(dst, offset));
+ }
}
@@ -71,7 +282,76 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
Register receiver,
Register scratch,
Label* miss_label) {
- UNIMPLEMENTED_MIPS();
+ // Check that the receiver isn't a smi.
+ __ And(scratch, receiver, Operand(kSmiTagMask));
+ __ Branch(miss_label, eq, scratch, Operand(zero_reg));
+
+ // Check that the object is a JS array.
+ __ GetObjectType(receiver, scratch, scratch);
+ __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+ // Load length directly from the JS array.
+ __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Ret();
+}
+
+
+// Generate code to check if an object is a string. If the object is a
+// heap object, its map's instance type is left in the scratch1 register.
+// If this is not needed, scratch1 and scratch2 may be the same register.
+static void GenerateStringCheck(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* smi,
+ Label* non_string_object) {
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, smi, t0);
+
+ // Check that the object is a string.
+ __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ And(scratch2, scratch1, Operand(kIsNotStringMask));
+ // The cast is to resolve the overload for the argument of 0x0.
+ __ Branch(non_string_object,
+ ne,
+ scratch2,
+ Operand(static_cast<int32_t>(kStringTag)));
+}
+
+
+// Generate code to load the length from a string object and return the length.
+// If the receiver object is not a string or a wrapped string object the
+// execution continues at the miss label. The register containing the
+// receiver is potentially clobbered.
+void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss,
+ bool support_wrappers) {
+ Label check_wrapper;
+
+ // Check if the object is a string leaving the instance type in the
+ // scratch1 register.
+ GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
+ support_wrappers ? &check_wrapper : miss);
+
+ // Load length directly from the string.
+ __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
+ __ Ret();
+
+ if (support_wrappers) {
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
+
+ // Unwrap the value and check if the wrapped value is a string.
+ __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+ __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
+ __ Ret();
+ }
}
@@ -80,11 +360,13 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Label* miss_label) {
- UNIMPLEMENTED_MIPS();
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+ __ mov(v0, scratch1);
+ __ Ret();
}
-// Generate StoreField code, value is passed in r0 register.
+// Generate StoreField code, value is passed in a0 register.
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
@@ -95,12 +377,652 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Register name_reg,
Register scratch,
Label* miss_label) {
- UNIMPLEMENTED_MIPS();
+ // a0 : value.
+ Label exit;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver_reg, miss_label, scratch);
+
+ // Check that the map of the receiver hasn't changed.
+ __ lw(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+ __ Branch(miss_label, ne, scratch, Operand(Handle<Map>(object->map())));
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ // Perform map transition for the receiver if necessary.
+ if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ push(receiver_reg);
+ __ li(a2, Operand(Handle<Map>(transition)));
+ __ Push(a2, a0);
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+ masm->isolate()),
+ 3, 1);
+ return;
+ }
+
+ if (transition != NULL) {
+ // Update the map of the object; no write barrier updating is
+ // needed because the map is never in new space.
+ __ li(t0, Operand(Handle<Map>(transition)));
+ __ sw(t0, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+ }
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ sw(a0, FieldMemOperand(receiver_reg, offset));
+
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(a0, &exit, scratch);
+
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array.
+ __ lw(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ sw(a0, FieldMemOperand(scratch, offset));
+
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(a0, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
+ }
+
+ // Return the value (register v0).
+ __ bind(&exit);
+ __ mov(v0, a0);
+ __ Ret();
}
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
+ Code* code = NULL;
+ if (kind == Code::LOAD_IC) {
+ code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
+ } else {
+ code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
+ }
+
+ Handle<Code> ic(code);
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
+static void GenerateCallFunction(MacroAssembler* masm,
+ Object* object,
+ const ParameterCount& arguments,
+ Label* miss,
+ Code::ExtraICState extra_ic_state) {
+ // ----------- S t a t e -------------
+ // -- a0: receiver
+ // -- a1: function to call
+ // -----------------------------------
+ // Check that the function really is a function.
+ __ JumpIfSmi(a1, miss);
+ __ GetObjectType(a1, a3, a3);
+ __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
+
+ // Patch the receiver on the stack with the global proxy if
+ // necessary.
+ if (object->IsGlobalObject()) {
+ __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+ __ sw(a3, MemOperand(sp, arguments.immediate() * kPointerSize));
+ }
+
+ // Invoke the function.
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(a1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ JSObject* holder_obj) {
+ __ push(name);
+ InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+ ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+ Register scratch = name;
+ __ li(scratch, Operand(Handle<Object>(interceptor)));
+ __ Push(scratch, receiver, holder);
+ __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
+ __ push(scratch);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ JSObject* holder_obj) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
+ masm->isolate());
+ __ li(a0, Operand(5));
+ __ li(a1, Operand(ref));
+
+ CEntryStub stub(1);
+ __ CallStub(&stub);
+}
+
+
+static const int kFastApiCallArguments = 3;
+
+
+// Reserves space for the extra arguments to FastHandleApiCall in the
+// caller's frame.
+//
+// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
+static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
+ Register scratch) {
+ ASSERT(Smi::FromInt(0) == 0);
+ for (int i = 0; i < kFastApiCallArguments; i++) {
+ __ push(zero_reg);
+ }
+}
+
+
+// Undoes the effects of ReserveSpaceForFastApiCall.
+static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
+ __ Drop(kFastApiCallArguments);
+}
+
+
+static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : holder (set by CheckPrototypes)
+ // -- sp[4] : callee js function
+ // -- sp[8] : call data
+ // -- sp[12] : last js argument
+ // -- ...
+ // -- sp[(argc + 3) * 4] : first js argument
+ // -- sp[(argc + 4) * 4] : receiver
+ // -----------------------------------
+ // Get the function and setup the context.
+ JSFunction* function = optimization.constant_function();
+ __ li(t1, Operand(Handle<JSFunction>(function)));
+ __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
+
+ // Pass the additional arguments FastHandleApiCall expects.
+ Object* call_data = optimization.api_call_info()->data();
+ Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
+ if (masm->isolate()->heap()->InNewSpace(call_data)) {
+ __ li(a0, api_call_info_handle);
+ __ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
+ } else {
+ __ li(t2, Operand(Handle<Object>(call_data)));
+ }
+
+ // Store js function and call data.
+ __ sw(t1, MemOperand(sp, 1 * kPointerSize));
+ __ sw(t2, MemOperand(sp, 2 * kPointerSize));
+
+ // a2 points to call data as expected by Arguments
+ // (refer to layout above).
+ __ Addu(a2, sp, Operand(2 * kPointerSize));
+
+ Object* callback = optimization.api_call_info()->callback();
+ Address api_function_address = v8::ToCData<Address>(callback);
+ ApiFunction fun(api_function_address);
+
+ const int kApiStackSpace = 4;
+
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
+ // struct from the function (which is currently the case). This means we pass
+ // the first argument in a1 instead of a0. TryCallApiFunctionAndReturn
+ // will handle setting up a0.
+
+ // a1 = v8::Arguments&
+ // Arguments is built at sp + 1 (sp is a reserved spot for ra).
+ __ Addu(a1, sp, kPointerSize);
+
+ // v8::Arguments::implicit_args = data
+ __ sw(a2, MemOperand(a1, 0 * kPointerSize));
+ // v8::Arguments::values = last argument
+ __ Addu(t0, a2, Operand(argc * kPointerSize));
+ __ sw(t0, MemOperand(a1, 1 * kPointerSize));
+ // v8::Arguments::length_ = argc
+ __ li(t0, Operand(argc));
+ __ sw(t0, MemOperand(a1, 2 * kPointerSize));
+ // v8::Arguments::is_construct_call = 0
+ __ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
+
+ // Emitting a stub call may try to allocate (if the code is not
+ // already generated). Do not allow the assembler to perform a
+ // garbage collection but instead return the allocation failure
+ // object.
+ const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+ ExternalReference ref =
+ ExternalReference(&fun,
+ ExternalReference::DIRECT_API_CALL,
+ masm->isolate());
+ return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+}
+
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+ CallInterceptorCompiler(StubCompiler* stub_compiler,
+ const ParameterCount& arguments,
+ Register name,
+ Code::ExtraICState extra_ic_state)
+ : stub_compiler_(stub_compiler),
+ arguments_(arguments),
+ name_(name),
+ extra_ic_state_(extra_ic_state) {}
+
+ MaybeObject* Compile(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ CallOptimization optimization(lookup);
+
+ if (optimization.is_constant_call()) {
+ return CompileCacheable(masm,
+ object,
+ receiver,
+ scratch1,
+ scratch2,
+ scratch3,
+ holder,
+ lookup,
+ name,
+ optimization,
+ miss);
+ } else {
+ CompileRegular(masm,
+ object,
+ receiver,
+ scratch1,
+ scratch2,
+ scratch3,
+ name,
+ holder,
+ miss);
+ return masm->isolate()->heap()->undefined_value();
+ }
+ }
+
+ private:
+ MaybeObject* CompileCacheable(MacroAssembler* masm,
+ JSObject* object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ JSObject* interceptor_holder,
+ LookupResult* lookup,
+ String* name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
+ ASSERT(optimization.is_constant_call());
+ ASSERT(!lookup->holder()->IsGlobalObject());
+
+ Counters* counters = masm->isolate()->counters();
+
+ int depth1 = kInvalidProtoDepth;
+ int depth2 = kInvalidProtoDepth;
+ bool can_do_fast_api_call = false;
+ if (optimization.is_simple_api_call() &&
+ !lookup->holder()->IsGlobalObject()) {
+ depth1 =
+ optimization.GetPrototypeDepthOfExpectedType(object,
+ interceptor_holder);
+ if (depth1 == kInvalidProtoDepth) {
+ depth2 =
+ optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
+ lookup->holder());
+ }
+ can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
+ (depth2 != kInvalidProtoDepth);
+ }
+
+ __ IncrementCounter(counters->call_const_interceptor(), 1,
+ scratch1, scratch2);
+
+ if (can_do_fast_api_call) {
+ __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
+ scratch1, scratch2);
+ ReserveSpaceForFastApiCall(masm, scratch1);
+ }
+
+ // Check that the maps from receiver to interceptor's holder
+ // haven't changed and thus we can invoke interceptor.
+ Label miss_cleanup;
+ Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
+ Register holder =
+ stub_compiler_->CheckPrototypes(object, receiver,
+ interceptor_holder, scratch1,
+ scratch2, scratch3, name, depth1, miss);
+
+ // Invoke an interceptor and if it provides a value,
+ // branch to |regular_invoke|.
+ Label regular_invoke;
+ LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
+ &regular_invoke);
+
+ // Interceptor returned nothing for this property. Try to use cached
+ // constant function.
+
+ // Check that the maps from interceptor's holder to constant function's
+ // holder haven't changed and thus we can use cached constant function.
+ if (interceptor_holder != lookup->holder()) {
+ stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
+ lookup->holder(), scratch1,
+ scratch2, scratch3, name, depth2, miss);
+ } else {
+ // CheckPrototypes has a side effect of fetching a 'holder'
+ // for API (object which is instanceof for the signature). It's
+ // safe to omit it here, as if present, it should be fetched
+ // by the previous CheckPrototypes.
+ ASSERT(depth2 == kInvalidProtoDepth);
+ }
+
+ // Invoke function.
+ if (can_do_fast_api_call) {
+ MaybeObject* result = GenerateFastApiDirectCall(masm,
+ optimization,
+ arguments_.immediate());
+ if (result->IsFailure()) return result;
+ } else {
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(optimization.constant_function(), arguments_,
+ JUMP_FUNCTION, call_kind);
+ }
+
+ // Deferred code for fast API call case---clean preallocated space.
+ if (can_do_fast_api_call) {
+ __ bind(&miss_cleanup);
+ FreeSpaceForFastApiCall(masm);
+ __ Branch(miss_label);
+ }
+
+ // Invoke a regular function.
+ __ bind(&regular_invoke);
+ if (can_do_fast_api_call) {
+ FreeSpaceForFastApiCall(masm);
+ }
+
+ return masm->isolate()->heap()->undefined_value();
+ }
+
+ void CompileRegular(MacroAssembler* masm,
+ JSObject* object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ String* name,
+ JSObject* interceptor_holder,
+ Label* miss_label) {
+ Register holder =
+ stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, scratch3, name,
+ miss_label);
+
+ // Call a runtime function to load the interceptor property.
+ __ EnterInternalFrame();
+ // Save the name_ register across the call.
+ __ push(name_);
+
+ PushInterceptorArguments(masm,
+ receiver,
+ holder,
+ name_,
+ interceptor_holder);
+
+ __ CallExternalReference(
+ ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
+ masm->isolate()),
+ 5);
+
+ // Restore the name_ register.
+ __ pop(name_);
+ __ LeaveInternalFrame();
+ }
+
+ void LoadWithInterceptor(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ JSObject* holder_obj,
+ Register scratch,
+ Label* interceptor_succeeded) {
+ __ EnterInternalFrame();
+
+ __ Push(holder, name_);
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ __ LeaveInternalFrame();
+
+ // If interceptor returns no-result sentinel, call the constant function.
+ __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
+ }
+
+ StubCompiler* stub_compiler_;
+ const ParameterCount& arguments_;
+ Register name_;
+ Code::ExtraICState extra_ic_state_;
+};
+
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
+ MacroAssembler* masm,
+ GlobalObject* global,
+ String* name,
+ Register scratch,
+ Label* miss) {
+ Object* probe;
+ { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
+ if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+ }
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+ ASSERT(cell->value()->IsTheHole());
+ __ li(scratch, Operand(Handle<Object>(cell)));
+ __ lw(scratch,
+ FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(miss, ne, scratch, Operand(at));
+ return cell;
+}
+
+
+// Calls GenerateCheckPropertyCell for each global object in the prototype chain
+// from object to (but not including) holder.
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
+ MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ Register scratch,
+ Label* miss) {
+ JSObject* current = object;
+ while (current != holder) {
+ if (current->IsGlobalObject()) {
+ // Returns a cell or a failure.
+ MaybeObject* result = GenerateCheckPropertyCell(
+ masm,
+ GlobalObject::cast(current),
+ name,
+ scratch,
+ miss);
+ if (result->IsFailure()) return result;
+ }
+ ASSERT(current->IsJSObject());
+ current = JSObject::cast(current->GetPrototype());
+ }
+ return NULL;
+}
+
+
+// Convert and store int passed in register ival to IEEE 754 single precision
+// floating point value at memory location (dst + 4 * wordoffset)
+// If FPU is available use it for conversion.
+static void StoreIntAsFloat(MacroAssembler* masm,
+ Register dst,
+ Register wordoffset,
+ Register ival,
+ Register fval,
+ Register scratch1,
+ Register scratch2) {
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ mtc1(ival, f0);
+ __ cvt_s_w(f0, f0);
+ __ sll(scratch1, wordoffset, 2);
+ __ addu(scratch1, dst, scratch1);
+ __ swc1(f0, MemOperand(scratch1, 0));
+ } else {
+ // FPU is not available, do manual conversions.
+
+ Label not_special, done;
+ // Move sign bit from source to destination. This works because the sign
+ // bit in the exponent word of the double has the same position and polarity
+ // as the 2's complement sign bit in a Smi.
+ ASSERT(kBinary32SignMask == 0x80000000u);
+
+ __ And(fval, ival, Operand(kBinary32SignMask));
+ // Negate value if it is negative.
+ __ subu(scratch1, zero_reg, ival);
+ __ movn(ival, scratch1, fval);
+
+ // We have -1, 0 or 1, which we treat specially. Register ival contains
+ // absolute value: it is either equal to 1 (special case of -1 and 1),
+ // greater than 1 (not a special case) or less than 1 (special case of 0).
+ __ Branch(&not_special, gt, ival, Operand(1));
+
+ // For 1 or -1 we need to or in the 0 exponent (biased).
+ static const uint32_t exponent_word_for_1 =
+ kBinary32ExponentBias << kBinary32ExponentShift;
+
+ __ Xor(scratch1, ival, Operand(1));
+ __ li(scratch2, exponent_word_for_1);
+ __ or_(scratch2, fval, scratch2);
+ __ movz(fval, scratch2, scratch1); // Only if ival is equal to 1.
+ __ Branch(&done);
+
+ __ bind(&not_special);
+ // Count leading zeros.
+ // Gets the wrong answer for 0, but we already checked for that case above.
+ Register zeros = scratch2;
+ __ clz(zeros, ival);
+
+ // Compute exponent and or it into the exponent register.
+ __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias);
+ __ subu(scratch1, scratch1, zeros);
+
+ __ sll(scratch1, scratch1, kBinary32ExponentShift);
+ __ or_(fval, fval, scratch1);
+
+ // Shift up the source chopping the top bit off.
+ __ Addu(zeros, zeros, Operand(1));
+ // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
+ __ sllv(ival, ival, zeros);
+ // And the top (top 20 bits).
+ __ srl(scratch1, ival, kBitsPerInt - kBinary32MantissaBits);
+ __ or_(fval, fval, scratch1);
+
+ __ bind(&done);
+
+ __ sll(scratch1, wordoffset, 2);
+ __ addu(scratch1, dst, scratch1);
+ __ sw(fval, MemOperand(scratch1, 0));
+ }
+}
+
+
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+ Register hiword,
+ Register loword,
+ Register scratch,
+ int leading_zeroes) {
+ const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+ const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+ const int mantissa_shift_for_hi_word =
+ meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+
+ const int mantissa_shift_for_lo_word =
+ kBitsPerInt - mantissa_shift_for_hi_word;
+
+ __ li(scratch, biased_exponent << HeapNumber::kExponentShift);
+ if (mantissa_shift_for_hi_word > 0) {
+ __ sll(loword, hiword, mantissa_shift_for_lo_word);
+ __ srl(hiword, hiword, mantissa_shift_for_hi_word);
+ __ or_(hiword, scratch, hiword);
+ } else {
+ __ mov(loword, zero_reg);
+ __ sll(hiword, hiword, mantissa_shift_for_hi_word);
+ __ or_(hiword, scratch, hiword);
+ }
+
+ // If least significant bit of biased exponent was not 1 it was corrupted
+ // by most significant bit of mantissa so we should fix that.
+ if (!(biased_exponent & 1)) {
+ __ li(scratch, 1 << HeapNumber::kExponentShift);
+ __ nor(scratch, scratch, scratch);
+ __ and_(hiword, hiword, scratch);
+ }
}
@@ -108,15 +1030,163 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
#define __ ACCESS_MASM(masm())
+Register StubCompiler::CheckPrototypes(JSObject* object,
+ Register object_reg,
+ JSObject* holder,
+ Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ String* name,
+ int save_at_depth,
+ Label* miss) {
+ // Make sure there's no overlap between holder and object registers.
+ ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+ ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
+ && !scratch2.is(scratch1));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 0;
+
+ if (save_at_depth == depth) {
+ __ sw(reg, MemOperand(sp));
+ }
+
+ // Check the maps in the prototype chain.
+ // Traverse the prototype chain from the object and do map checks.
+ JSObject* current = object;
+ while (current != holder) {
+ depth++;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+
+ ASSERT(current->GetPrototype()->IsJSObject());
+ JSObject* prototype = JSObject::cast(current->GetPrototype());
+ if (!current->HasFastProperties() &&
+ !current->IsJSGlobalObject() &&
+ !current->IsJSGlobalProxy()) {
+ if (!name->IsSymbol()) {
+ MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
+ Object* lookup_result = NULL; // Initialization to please compiler.
+ if (!maybe_lookup_result->ToObject(&lookup_result)) {
+ set_failure(Failure::cast(maybe_lookup_result));
+ return reg;
+ }
+ name = String::cast(lookup_result);
+ }
+ ASSERT(current->property_dictionary()->FindEntry(name) ==
+ StringDictionary::kNotFound);
+
+ MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+ miss,
+ reg,
+ name,
+ scratch1,
+ scratch2);
+ if (negative_lookup->IsFailure()) {
+ set_failure(Failure::cast(negative_lookup));
+ return reg;
+ }
+
+ __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ reg = holder_reg; // From now the object is in holder_reg.
+ __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else if (heap()->InNewSpace(prototype)) {
+ // Get the map of the current object.
+ __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+ // Branch on the result of the map check.
+ __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ // Restore scratch register to be the map of the object. In the
+ // new space case below, we load the prototype from the map in
+ // the scratch register.
+ __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
+
+ reg = holder_reg; // From now the object is in holder_reg.
+ // The prototype is in new space; we cannot store a reference
+ // to it in the code. Load it from the map.
+ __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ } else {
+ // Check the map of the current object.
+ __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ // Branch on the result of the map check.
+ __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (current->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ }
+ // The prototype is in old space; load it directly.
+ reg = holder_reg; // From now the object is in holder_reg.
+ __ li(reg, Operand(Handle<JSObject>(prototype)));
+ }
+
+ if (save_at_depth == depth) {
+ __ sw(reg, MemOperand(sp));
+ }
+
+ // Go to the next object in the prototype chain.
+ current = prototype;
+ }
+
+ // Check the holder map.
+ __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+
+ // Log the check depth.
+ LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
+ // Perform security check for access to the global object.
+ ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+ if (holder->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ };
+
+ // If we've skipped any global objects, it's not enough to verify
+ // that their maps haven't changed. We also need to check that the
+ // property cell for the property is still empty.
+
+ MaybeObject* result = GenerateCheckPropertyCells(masm(),
+ object,
+ holder,
+ name,
+ scratch1,
+ miss);
+ if (result->IsFailure()) set_failure(Failure::cast(result));
+
+ // Return the register containing the holder.
+ return reg;
+}
+
+
void StubCompiler::GenerateLoadField(JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
int index,
String* name,
Label* miss) {
- UNIMPLEMENTED_MIPS();
+ // Check that the receiver isn't a smi.
+ __ And(scratch1, receiver, Operand(kSmiTagMask));
+ __ Branch(miss, eq, scratch1, Operand(zero_reg));
+
+ // Check that the maps haven't changed.
+ Register reg =
+ CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+ name, miss);
+ GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
+ __ Ret();
}
@@ -125,289 +1195,3048 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Object* value,
String* name,
Label* miss) {
- UNIMPLEMENTED_MIPS();
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss, scratch1);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, scratch3, name, miss);
+
+ // Return the constant value.
+ __ li(v0, Operand(Handle<Object>(value)));
+ __ Ret();
}
-bool StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- AccessorInfo* callback,
- String* name,
- Label* miss,
- Failure** failure) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x470);
- return false; // UNIMPLEMENTED RETURN
+MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ AccessorInfo* callback,
+ String* name,
+ Label* miss) {
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss, scratch1);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+ name, miss);
+
+ // Build AccessorInfo::args_ list on the stack and push property name below
+ // the exit frame to make GC aware of them and store pointers to them.
+ __ push(receiver);
+ __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
+ Handle<AccessorInfo> callback_handle(callback);
+ if (heap()->InNewSpace(callback_handle->data())) {
+ __ li(scratch3, callback_handle);
+ __ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
+ } else {
+ __ li(scratch3, Handle<Object>(callback_handle->data()));
+ }
+ __ Push(reg, scratch3, name_reg);
+ __ mov(a2, scratch2); // Saved in case scratch2 == a1.
+ __ mov(a1, sp); // a1 (first argument - see note below) = Handle<String>
+
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
+
+ // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
+ // struct from the function (which is currently the case). This means we pass
+ // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
+ // will handle setting up a0.
+
+ const int kApiStackSpace = 1;
+
+ __ EnterExitFrame(false, kApiStackSpace);
+ // Create AccessorInfo instance on the stack above the exit frame with
+ // scratch2 (internal::Object **args_) as the data.
+ __ sw(a2, MemOperand(sp, kPointerSize));
+ // a2 (second argument - see note above) = AccessorInfo&
+ __ Addu(a2, sp, kPointerSize);
+
+ // Emitting a stub call may try to allocate (if the code is not
+ // already generated). Do not allow the assembler to perform a
+ // garbage collection but instead return the allocation failure
+ // object.
+ ExternalReference ref =
+ ExternalReference(&fun,
+ ExternalReference::DIRECT_GETTER_CALL,
+ masm()->isolate());
+ // 4 args - will be freed later by LeaveExitFrame.
+ return masm()->TryCallApiFunctionAndReturn(ref, 4);
}
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* holder,
+ JSObject* interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
Label* miss) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x505);
+ ASSERT(interceptor_holder->HasNamedInterceptor());
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ bool compile_followup_inline = false;
+ if (lookup->IsProperty() && lookup->IsCacheable()) {
+ if (lookup->type() == FIELD) {
+ compile_followup_inline = true;
+ } else if (lookup->type() == CALLBACKS &&
+ lookup->GetCallbackObject()->IsAccessorInfo() &&
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
+ compile_followup_inline = true;
+ }
+ }
+
+ if (compile_followup_inline) {
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, scratch3,
+ name, miss);
+ ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ __ EnterInternalFrame();
+
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ Push(receiver, holder_reg, name_reg);
+ } else {
+ __ Push(holder_reg, name_reg);
+ }
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method).
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
+ __ LeaveInternalFrame();
+ __ Ret();
+
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
+ }
+
+ __ LeaveInternalFrame();
+
+ // Check that the maps from interceptor's holder to lookup's holder
+ // haven't changed. And load lookup's holder into |holder| register.
+ if (interceptor_holder != lookup->holder()) {
+ holder_reg = CheckPrototypes(interceptor_holder,
+ holder_reg,
+ lookup->holder(),
+ scratch1,
+ scratch2,
+ scratch3,
+ name,
+ miss);
+ }
+
+ if (lookup->type() == FIELD) {
+ // We found FIELD property in prototype chain of interceptor's holder.
+ // Retrieve a field from field's holder.
+ GenerateFastPropertyLoad(masm(), v0, holder_reg,
+ lookup->holder(), lookup->GetFieldIndex());
+ __ Ret();
+ } else {
+ // We found CALLBACKS property in prototype chain of interceptor's
+ // holder.
+ ASSERT(lookup->type() == CALLBACKS);
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ ASSERT(callback != NULL);
+ ASSERT(callback->getter() != NULL);
+
+ // Tail call to runtime.
+ // Important invariant in CALLBACKS case: the code above must be
+ // structured to never clobber |receiver| register.
+ __ li(scratch2, Handle<AccessorInfo>(callback));
+ // holder_reg is either receiver or scratch1.
+ if (!receiver.is(holder_reg)) {
+ ASSERT(scratch1.is(holder_reg));
+ __ Push(receiver, holder_reg);
+ __ lw(scratch3,
+ FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
+ __ Push(scratch3, scratch2, name_reg);
+ } else {
+ __ push(receiver);
+ __ lw(scratch3,
+ FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
+ __ Push(holder_reg, scratch3, scratch2, name_reg);
+ }
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 5, 1);
+ }
+ } else { // !compile_followup_inline
+ // Call the runtime system to load the interceptor.
+ // Check that the maps haven't changed.
+ Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, scratch3,
+ name, miss);
+ PushInterceptorArguments(masm(), receiver, holder_reg,
+ name_reg, interceptor_holder);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate());
+ __ TailCallExternalReference(ref, 5, 1);
+ }
+}
+
+
+void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+ if (kind_ == Code::KEYED_CALL_IC) {
+ __ Branch(miss, ne, a2, Operand(Handle<String>(name)));
+ }
}
-Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
- // Registers:
- // a1: function
- // ra: return address
+void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
+ JSObject* holder,
+ String* name,
+ Label* miss) {
+ ASSERT(holder->IsGlobalObject());
+
+ // Get the number of arguments.
+ const int argc = arguments().immediate();
- // Enter an internal frame.
- __ EnterInternalFrame();
- // Preserve the function.
- __ Push(a1);
- // Setup aligned call.
- __ SetupAlignedCall(t0, 1);
- // Push the function on the stack as the argument to the runtime function.
- __ Push(a1);
- // Call the runtime function
- __ CallRuntime(Runtime::kLazyCompile, 1);
- __ ReturnFromAlignedCall();
- // Calculate the entry point.
- __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
- // Restore saved function.
- __ Pop(a1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
- // Do a tail-call of the compiled function.
- __ Jump(t9);
+ // Get the receiver from the stack.
+ __ lw(a0, MemOperand(sp, argc * kPointerSize));
- return GetCodeWithFlags(flags, "LazyCompileStub");
+ // If the object is the holder then we know that it's a global
+ // object which can only happen for contextual calls. In this case,
+ // the receiver cannot be a smi.
+ if (object != holder) {
+ __ JumpIfSmi(a0, miss);
+ }
+
+ // Check that the maps haven't changed.
+ CheckPrototypes(object, a0, holder, a3, a1, t0, name, miss);
}
-Object* CallStubCompiler::CompileCallField(JSObject* object,
- JSObject* holder,
- int index,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ Label* miss) {
+ // Get the value from the cell.
+ __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ lw(a1, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
+
+ // Check that the cell contains the same function.
+ if (heap()->InNewSpace(function)) {
+ // We can't embed a pointer to a function in new space so we have
+ // to verify that the shared function info is unchanged. This has
+ // the nice side effect that multiple closures based on the same
+ // function can all use this call IC. Before we load through the
+ // function, we have to verify that it still is a function.
+ __ JumpIfSmi(a1, miss);
+ __ GetObjectType(a1, a3, a3);
+ __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
+
+ // Check the shared function info. Make sure it hasn't changed.
+ __ li(a3, Handle<SharedFunctionInfo>(function->shared()));
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Branch(miss, ne, t0, Operand(a3));
+ } else {
+ __ Branch(miss, ne, a1, Operand(Handle<JSFunction>(function)));
+ }
}
-Object* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::GenerateMissBranch() {
+ MaybeObject* maybe_obj =
+ isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
+ kind_,
+ extra_ic_state_);
+ Object* obj;
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
+ return obj;
}
-Object* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
+ JSObject* holder,
+ int index,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ GenerateNameCheck(name, &miss);
+
+ const int argc = arguments().immediate();
+
+ // Get the receiver of the function from the stack into a0.
+ __ lw(a0, MemOperand(sp, argc * kPointerSize));
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(a0, &miss, t0);
+
+ // Do the right check and compute the holder register.
+ Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
+ GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
+
+ GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(FIELD, name);
}
-Object* CallStubCompiler::CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // If object is not an array, bail out to regular call.
+ if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+
+ Label miss;
+
+ GenerateNameCheck(name, &miss);
+
+ Register receiver = a1;
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the maps haven't changed.
+ CheckPrototypes(JSObject::cast(object), receiver,
+ holder, a3, v0, t0, name, &miss);
+
+ if (argc == 0) {
+ // Nothing to do, just return the length.
+ __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Drop(argc + 1);
+ __ Ret();
+ } else {
+ Label call_builtin;
+
+ Register elements = a3;
+ Register end_elements = t1;
+
+ // Get the elements array of the object.
+ __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ v0,
+ Heap::kFixedArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
+
+ if (argc == 1) { // Otherwise fall through to call the builtin.
+ Label exit, with_write_barrier, attempt_to_grow_elements;
+
+ // Get the array's length into v0 and calculate new length.
+ __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
+
+ // Get the element's length.
+ __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
+
+ // Save new length.
+ __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Push the element.
+ __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
+ // We may need a register containing the address end_elements below,
+ // so write back the value in end_elements.
+ __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(end_elements, elements, end_elements);
+ const int kEndElementsOffset =
+ FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
+ __ sw(t0, MemOperand(end_elements, kEndElementsOffset));
+ __ Addu(end_elements, end_elements, kPointerSize);
+
+ // Check for a smi.
+ __ JumpIfNotSmi(t0, &with_write_barrier);
+ __ bind(&exit);
+ __ Drop(argc + 1);
+ __ Ret();
+
+ __ bind(&with_write_barrier);
+ __ InNewSpace(elements, t0, eq, &exit);
+ __ RecordWriteHelper(elements, end_elements, t0);
+ __ Drop(argc + 1);
+ __ Ret();
+
+ __ bind(&attempt_to_grow_elements);
+ // v0: array's length + 1.
+ // t0: elements' length.
+
+ if (!FLAG_inline_new) {
+ __ Branch(&call_builtin);
+ }
+
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(
+ masm()->isolate());
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address(
+ masm()->isolate());
+
+ const int kAllocationDelta = 4;
+ // Load top and check if it is the end of elements.
+ __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(end_elements, elements, end_elements);
+ __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
+ __ li(t3, Operand(new_space_allocation_top));
+ __ lw(t2, MemOperand(t3));
+ __ Branch(&call_builtin, ne, end_elements, Operand(t2));
+
+ __ li(t5, Operand(new_space_allocation_limit));
+ __ lw(t5, MemOperand(t5));
+ __ Addu(t2, t2, Operand(kAllocationDelta * kPointerSize));
+ __ Branch(&call_builtin, hi, t2, Operand(t5));
+
+ // We fit and could grow elements.
+ // Update new_space_allocation_top.
+ __ sw(t2, MemOperand(t3));
+ // Push the argument.
+ __ lw(t2, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ sw(t2, MemOperand(end_elements));
+ // Fill the rest with holes.
+ __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
+ for (int i = 1; i < kAllocationDelta; i++) {
+ __ sw(t2, MemOperand(end_elements, i * kPointerSize));
+ }
+
+ // Update elements' and array's sizes.
+ __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
+ __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // Elements are in new space, so write barrier is not required.
+ __ Drop(argc + 1);
+ __ Ret();
+ }
+ __ bind(&call_builtin);
+ __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
+ masm()->isolate()),
+ argc + 1,
+ 1);
+ }
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(function);
}
-Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x782);
- return GetCode(INTERCEPTOR, name);
+MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // If object is not an array, bail out to regular call.
+ if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+
+ Label miss, return_undefined, call_builtin;
+
+ Register receiver = a1;
+ Register elements = a3;
+
+ GenerateNameCheck(name, &miss);
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the maps haven't changed.
+ CheckPrototypes(JSObject::cast(object),
+ receiver, holder, elements, t0, v0, name, &miss);
+
+ // Get the elements array of the object.
+ __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ v0,
+ Heap::kFixedArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
+
+ // Get the array's length into t0 and calculate new length.
+ __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Subu(t0, t0, Operand(Smi::FromInt(1)));
+ __ Branch(&return_undefined, lt, t0, Operand(zero_reg));
+
+ // Get the last element.
+ __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ // We can't address the last element in one operation. Compute the more
+ // expensive shift first, and use an offset later on.
+ __ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(elements, elements, t1);
+ __ lw(v0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Branch(&call_builtin, eq, v0, Operand(t2));
+
+ // Set the array's length.
+ __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Fill with the hole.
+ __ sw(t2, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Drop(argc + 1);
+ __ Ret();
+
+ __ bind(&return_undefined);
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ __ Drop(argc + 1);
+ __ Ret();
+
+ __ bind(&call_builtin);
+ __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
+ masm()->isolate()),
+ argc + 1,
+ 1);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(function);
}
-Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : function name
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // If object is not a string, bail out to regular call.
+ if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+
+ const int argc = arguments().immediate();
+
+ Label miss;
+ Label name_miss;
+ Label index_out_of_range;
+
+ Label* index_out_of_range_label = &index_out_of_range;
+
+ if (kind_ == Code::CALL_IC &&
+ (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ DEFAULT_STRING_STUB)) {
+ index_out_of_range_label = &miss;
+ }
+
+ GenerateNameCheck(name, &name_miss);
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ v0,
+ &miss);
+ ASSERT(object != holder);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
+ a1, a3, t0, name, &miss);
+
+ Register receiver = a1;
+ Register index = t1;
+ Register scratch = a3;
+ Register result = v0;
+ __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+ if (argc > 0) {
+ __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
+ } else {
+ __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+ }
+
+ StringCharCodeAtGenerator char_code_at_generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ char_code_at_generator.GenerateFast(masm());
+ __ Drop(argc + 1);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_code_at_generator.GenerateSlow(masm(), call_helper);
+
+ if (index_out_of_range.is_linked()) {
+ __ bind(&index_out_of_range);
+ __ LoadRoot(v0, Heap::kNanValueRootIndex);
+ __ Drop(argc + 1);
+ __ Ret();
+ }
+
+ __ bind(&miss);
+ // Restore function name in a2.
+ __ li(a2, Handle<String>(name));
+ __ bind(&name_miss);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(function);
}
-Object* StoreStubCompiler::CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileStringCharAtCall(
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : function name
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // If object is not a string, bail out to regular call.
+ if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+
+ const int argc = arguments().immediate();
+
+ Label miss;
+ Label name_miss;
+ Label index_out_of_range;
+ Label* index_out_of_range_label = &index_out_of_range;
+
+ if (kind_ == Code::CALL_IC &&
+ (CallICBase::StringStubState::decode(extra_ic_state_) ==
+ DEFAULT_STRING_STUB)) {
+ index_out_of_range_label = &miss;
+ }
+
+ GenerateNameCheck(name, &name_miss);
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ v0,
+ &miss);
+ ASSERT(object != holder);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
+ a1, a3, t0, name, &miss);
+
+ Register receiver = v0;
+ Register index = t1;
+ Register scratch1 = a1;
+ Register scratch2 = a3;
+ Register result = v0;
+ __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+ if (argc > 0) {
+ __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
+ } else {
+ __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+ }
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ index_out_of_range_label,
+ STRING_INDEX_IS_NUMBER);
+ char_at_generator.GenerateFast(masm());
+ __ Drop(argc + 1);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm(), call_helper);
+
+ if (index_out_of_range.is_linked()) {
+ __ bind(&index_out_of_range);
+ __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
+ __ Drop(argc + 1);
+ __ Ret();
+ }
+
+ __ bind(&miss);
+ // Restore function name in a2.
+ __ li(a2, Handle<String>(name));
+ __ bind(&name_miss);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(function);
}
-Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
- AccessorInfo* callback,
- String* name) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x906);
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : function name
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+
+ Label miss;
+ GenerateNameCheck(name, &miss);
+
+ if (cell == NULL) {
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(a1, &miss);
+
+ CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
+ &miss);
+ } else {
+ ASSERT(cell->value() == function);
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the char code argument.
+ Register code = a1;
+ __ lw(code, MemOperand(sp, 0 * kPointerSize));
+
+ // Check the code is a smi.
+ Label slow;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfNotSmi(code, &slow);
+
+ // Convert the smi code to uint16.
+ __ And(code, code, Operand(Smi::FromInt(0xffff)));
+
+ StringCharFromCodeGenerator char_from_code_generator(code, v0);
+ char_from_code_generator.GenerateFast(masm());
+ __ Drop(argc + 1);
+ __ Ret();
+
+ StubRuntimeCallHelper call_helper;
+ char_from_code_generator.GenerateSlow(masm(), call_helper);
+
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ bind(&slow);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+
+ __ bind(&miss);
+ // a2: function name.
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
}
-Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : function name
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ if (!CpuFeatures::IsSupported(FPU))
+ return heap()->undefined_value();
+ CpuFeatures::Scope scope_fpu(FPU);
+
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+
+ Label miss, slow;
+ GenerateNameCheck(name, &miss);
+
+ if (cell == NULL) {
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(a1, &miss);
+
+ CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+ &miss);
+ } else {
+ ASSERT(cell->value() == function);
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the (only) argument into v0.
+ __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+
+ // If the argument is a smi, just return.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ And(t0, v0, Operand(kSmiTagMask));
+ __ Drop(argc + 1, eq, t0, Operand(zero_reg));
+ __ Ret(eq, t0, Operand(zero_reg));
+
+ __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
+
+ Label wont_fit_smi, no_fpu_error, restore_fcsr_and_return;
+
+ // If fpu is enabled, we use the floor instruction.
+
+ // Load the HeapNumber value.
+ __ ldc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+
+ // Backup FCSR.
+ __ cfc1(a3, FCSR);
+ // Clearing FCSR clears the exception mask with no side-effects.
+ __ ctc1(zero_reg, FCSR);
+ // Convert the argument to an integer.
+ __ floor_w_d(f0, f0);
+
+ // Start checking for special cases.
+ // Get the argument exponent and clear the sign bit.
+ __ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize));
+ __ And(t2, t1, Operand(~HeapNumber::kSignMask));
+ __ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord);
+
+ // Retrieve FCSR and check for fpu errors.
+ __ cfc1(t5, FCSR);
+ __ And(t5, t5, Operand(kFCSRExceptionFlagMask));
+ __ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
+
+ // Check for NaN, Infinity, and -Infinity.
+ // They are invariant through a Math.Floor call, so just
+ // return the original argument.
+ __ Subu(t3, t2, Operand(HeapNumber::kExponentMask
+ >> HeapNumber::kMantissaBitsInTopWord));
+ __ Branch(&restore_fcsr_and_return, eq, t3, Operand(zero_reg));
+ // We had an overflow or underflow in the conversion. Check if we
+ // have a big exponent.
+ // If greater or equal, the argument is already round and in v0.
+ __ Branch(&restore_fcsr_and_return, ge, t3,
+ Operand(HeapNumber::kMantissaBits));
+ __ Branch(&wont_fit_smi);
+
+ __ bind(&no_fpu_error);
+ // Move the result back to v0.
+ __ mfc1(v0, f0);
+ // Check if the result fits into a smi.
+ __ Addu(a1, v0, Operand(0x40000000));
+ __ Branch(&wont_fit_smi, lt, a1, Operand(zero_reg));
+ // Tag the result.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ sll(v0, v0, kSmiTagSize);
+
+ // Check for -0.
+ __ Branch(&restore_fcsr_and_return, ne, v0, Operand(zero_reg));
+ // t1 already holds the HeapNumber exponent.
+ __ And(t0, t1, Operand(HeapNumber::kSignMask));
+ // If our HeapNumber is negative it was -0, so load its address and return.
+ // Else v0 is loaded with 0, so we can also just return.
+ __ Branch(&restore_fcsr_and_return, eq, t0, Operand(zero_reg));
+ __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+
+ __ bind(&restore_fcsr_and_return);
+ // Restore FCSR and return.
+ __ ctc1(a3, FCSR);
+
+ __ Drop(argc + 1);
+ __ Ret();
+
+ __ bind(&wont_fit_smi);
+ // Restore FCSR and fall to slow case.
+ __ ctc1(a3, FCSR);
+
+ __ bind(&slow);
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+
+ __ bind(&miss);
+ // a2: function name.
+ MaybeObject* obj = GenerateMissBranch();
+ if (obj->IsFailure()) return obj;
+
+ // Return the generated code.
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
}
-Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
- JSGlobalPropertyCell* cell,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : function name
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ const int argc = arguments().immediate();
+
+ // If the object is not a JSObject or we got an unexpected number of
+ // arguments, bail out to the regular call.
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+
+ Label miss;
+ GenerateNameCheck(name, &miss);
+
+ if (cell == NULL) {
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfSmi(a1, &miss);
+
+ CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
+ &miss);
+ } else {
+ ASSERT(cell->value() == function);
+ GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+ }
+
+ // Load the (only) argument into v0.
+ __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+
+ // Check if the argument is a smi.
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfNotSmi(v0, &not_smi);
+
+ // Do bitwise not or do nothing depending on the sign of the
+ // argument.
+ __ sra(t0, v0, kBitsPerInt - 1);
+ __ Xor(a1, v0, t0);
+
+ // Add 1 or do nothing depending on the sign of the argument.
+ __ Subu(v0, a1, t0);
+
+ // If the result is still negative, go to the slow case.
+ // This only happens for the most negative smi.
+ Label slow;
+ __ Branch(&slow, lt, v0, Operand(zero_reg));
+
+ // Smi case done.
+ __ Drop(argc + 1);
+ __ Ret();
+
+ // Check if the argument is a heap number and load its exponent and
+ // sign.
+ __ bind(&not_smi);
+ __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
+ __ lw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+
+ // Check the sign of the argument. If the argument is positive,
+ // just return it.
+ Label negative_sign;
+ __ And(t0, a1, Operand(HeapNumber::kSignMask));
+ __ Branch(&negative_sign, ne, t0, Operand(zero_reg));
+ __ Drop(argc + 1);
+ __ Ret();
+
+ // If the argument is negative, clear the sign, and return a new
+ // number.
+ __ bind(&negative_sign);
+ __ Xor(a1, a1, Operand(HeapNumber::kSignMask));
+ __ lw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+ __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(v0, t0, t1, t2, &slow);
+ __ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+ __ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+ __ Drop(argc + 1);
+ __ Ret();
+
+ // Tail call the full function. We do not have to patch the receiver
+ // because the function makes no use of it.
+ __ bind(&slow);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+
+ __ bind(&miss);
+ // a2: function name.
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
}
-Object* LoadStubCompiler::CompileLoadField(JSObject* object,
- JSObject* holder,
- int index,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileFastApiCall(
+ const CallOptimization& optimization,
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+
+ Counters* counters = isolate()->counters();
+
+ ASSERT(optimization.is_simple_api_call());
+ // Bail out if object is a global object as we don't want to
+ // repatch it to global receiver.
+ if (object->IsGlobalObject()) return heap()->undefined_value();
+ if (cell != NULL) return heap()->undefined_value();
+ if (!object->IsJSObject()) return heap()->undefined_value();
+ int depth = optimization.GetPrototypeDepthOfExpectedType(
+ JSObject::cast(object), holder);
+ if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+
+ Label miss, miss_before_stack_reserved;
+
+ GenerateNameCheck(name, &miss_before_stack_reserved);
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(a1, &miss_before_stack_reserved);
+
+ __ IncrementCounter(counters->call_const(), 1, a0, a3);
+ __ IncrementCounter(counters->call_const_fast_api(), 1, a0, a3);
+
+ ReserveSpaceForFastApiCall(masm(), a0);
+
+ // Check that the maps haven't changed and find a Holder as a side effect.
+ CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+ depth, &miss);
+
+ MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
+ if (result->IsFailure()) return result;
+
+ __ bind(&miss);
+ FreeSpaceForFastApiCall(masm());
+
+ __ bind(&miss_before_stack_reserved);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(function);
}
-Object* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ if (HasCustomCallGenerator(function)) {
+ MaybeObject* maybe_result = CompileCustomCall(
+ object, holder, NULL, function, name);
+ Object* result;
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ // Undefined means bail out to regular compiler.
+ if (!result->IsUndefined()) return result;
+ }
+
+ Label miss;
+
+ GenerateNameCheck(name, &miss);
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ if (check != NUMBER_CHECK) {
+ __ And(t1, a1, Operand(kSmiTagMask));
+ __ Branch(&miss, eq, t1, Operand(zero_reg));
+ }
+
+ // Make sure that it's okay not to patch the on stack receiver
+ // unless we're doing a receiver map check.
+ ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+
+ SharedFunctionInfo* function_info = function->shared();
+ switch (check) {
+ case RECEIVER_MAP_CHECK:
+ __ IncrementCounter(masm()->isolate()->counters()->call_const(),
+ 1, a0, a3);
+
+ // Check that the maps haven't changed.
+ CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+ &miss);
+
+ // Patch the receiver on the stack with the global proxy if
+ // necessary.
+ if (object->IsGlobalObject()) {
+ __ lw(a3, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+ __ sw(a3, MemOperand(sp, argc * kPointerSize));
+ }
+ break;
+
+ case STRING_CHECK:
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ __ jmp(&miss);
+ } else {
+ // Check that the object is a two-byte string or a symbol.
+ __ GetObjectType(a1, a3, a3);
+ __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+ a1, t0, name, &miss);
+ }
+ break;
+
+ case NUMBER_CHECK: {
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ And(t1, a1, Operand(kSmiTagMask));
+ __ Branch(&fast, eq, t1, Operand(zero_reg));
+ __ GetObjectType(a1, a0, a0);
+ __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+ a1, t0, name, &miss);
+ }
+ break;
+ }
+
+ case BOOLEAN_CHECK: {
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a boolean.
+ __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+ __ Branch(&fast, eq, a1, Operand(t0));
+ __ LoadRoot(t0, Heap::kFalseValueRootIndex);
+ __ Branch(&miss, ne, a1, Operand(t0));
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+ a1, t0, name, &miss);
+ }
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(function);
}
-Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
- JSObject* holder,
- Object* value,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ Label miss;
+
+ GenerateNameCheck(name, &miss);
+
+ // Get the number of arguments.
+ const int argc = arguments().immediate();
+
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+
+ // Get the receiver from the stack.
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+ CallInterceptorCompiler compiler(this, arguments(), a2, extra_ic_state_);
+ MaybeObject* result = compiler.Compile(masm(),
+ object,
+ holder,
+ name,
+ &lookup,
+ a1,
+ a3,
+ t0,
+ a0,
+ &miss);
+ if (result->IsFailure()) {
+ return result;
+ }
+
+ // Move returned value, the function to call, to a1.
+ __ mov(a1, v0);
+ // Restore receiver.
+ __ lw(a0, MemOperand(sp, argc * kPointerSize));
+
+ GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
}
-Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
+MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+
+ if (HasCustomCallGenerator(function)) {
+ MaybeObject* maybe_result = CompileCustomCall(
+ object, holder, cell, function, name);
+ Object* result;
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ // Undefined means bail out to regular compiler.
+ if (!result->IsUndefined()) return result;
+ }
+
+ Label miss;
+
+ GenerateNameCheck(name, &miss);
+
+ // Get the number of arguments.
+ const int argc = arguments().immediate();
+
+ GenerateGlobalReceiverCheck(object, holder, name, &miss);
+ GenerateLoadFunctionFromCell(cell, function, &miss);
+
+ // Patch the receiver on the stack with the global proxy if
+ // necessary.
+ if (object->IsGlobalObject()) {
+ __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+ __ sw(a3, MemOperand(sp, argc * kPointerSize));
+ }
+
+ // Setup the context (function already in r1).
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Jump to the cached code (tail call).
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
+ ASSERT(function->is_compiled());
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ if (V8::UseCrankshaft()) {
+ UNIMPLEMENTED_MIPS();
+ } else {
+ __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
+ JUMP_FUNCTION, call_kind);
+ }
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
+ MaybeObject* maybe_result = GenerateMissBranch();
+ if (maybe_result->IsFailure()) return maybe_result;
+
+ // Return the generated code.
+ return GetCode(NORMAL, name);
}
-Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- String* name,
- bool is_dont_delete) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Name register might be clobbered.
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ a1, a2, a3,
+ &miss);
+ __ bind(&miss);
+ __ li(a2, Operand(Handle<String>(name))); // Restore name.
+ Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
}
-Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
- JSObject* receiver,
+MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+ AccessorInfo* callback,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(a1, &miss);
+
+ // Check that the map of the object hasn't changed.
+ __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(a1, a3, &miss);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ __ push(a1); // Receiver.
+ __ li(a3, Operand(Handle<AccessorInfo>(callback))); // Callback info.
+ __ Push(a3, a2, a0);
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
+ masm()->isolate());
+ __ TailCallExternalReference(store_callback_property, 4, 1);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
+MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(a1, &miss);
+
+ // Check that the map of the object hasn't changed.
+ __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Branch(&miss, ne, a3, Operand(Handle<Map>(receiver->map())));
+
+ // Perform global security token check if needed.
+ if (receiver->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(a1, a3, &miss);
+ }
+
+ // Stub is never generated for non-global objects that require access
+ // checks.
+ ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
+
+ __ Push(a1, a2, a0); // Receiver, name, value.
+
+ __ li(a0, Operand(Smi::FromInt(strict_mode_)));
+ __ push(a0); // Strict mode.
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property =
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
+ masm()->isolate());
+ __ TailCallExternalReference(store_ic_property, 4, 1);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
+}
+
+
+MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
+ JSGlobalPropertyCell* cell,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map of the global has not changed.
+ __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
+
+ // Check that the value in the cell is not the hole. If it is, this
+ // cell could have been deleted and reintroducing the global needs
+ // to update the property details in the property dictionary of the
+ // global object. We bail out to the runtime system to do that.
+ __ li(t0, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
+ __ lw(t2, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
+ __ Branch(&miss, eq, t1, Operand(t2));
+
+ // Store the value in the cell.
+ __ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
+ __ mov(v0, a0); // Stored value must be returned in v0.
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
+ __ Ret();
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ IncrementCounter(counters->named_store_global_inline_miss(), 1, a1, a3);
+ Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, name);
+}
+
+
+MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
+ JSObject* object,
+ JSObject* last) {
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the receiver is not a smi.
+ __ JumpIfSmi(a0, &miss);
+
+ // Check the maps of the full prototype chain.
+ CheckPrototypes(object, a0, last, a3, a1, t0, name, &miss);
+
+ // If the last object in the prototype chain is a global object,
+ // check that the global property cell is empty.
+ if (last->IsGlobalObject()) {
+ MaybeObject* cell = GenerateCheckPropertyCell(masm(),
+ GlobalObject::cast(last),
+ name,
+ a1,
+ &miss);
+ if (cell->IsFailure()) {
+ miss.Unuse();
+ return cell;
+ }
+ }
+
+ // Return undefined if maps of the full prototype chain is still the same.
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(NONEXISTENT, heap()->empty_string());
+}
+
+
+MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
JSObject* holder,
- int index) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ int index,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ __ mov(v0, a0);
+
+ GenerateLoadField(object, holder, v0, a3, a1, t0, index, name, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(FIELD, name);
}
-Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* receiver,
+MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
+ JSObject* object,
JSObject* holder,
AccessorInfo* callback) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ MaybeObject* result = GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0,
+ callback, name, &miss);
+ if (result->IsFailure()) {
+ miss.Unuse();
+ return result;
+ }
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
}
-Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
- JSObject* receiver,
+MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
JSObject* holder,
- Object* value) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ Object* value,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ GenerateLoadConstant(object, holder, a0, a3, a1, t0, value, name, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CONSTANT_FUNCTION, name);
}
-Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
JSObject* holder,
String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ Label miss;
+
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+ GenerateLoadInterceptor(object,
+ holder,
+ &lookup,
+ a0,
+ a2,
+ a3,
+ a1,
+ t0,
+ name,
+ &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
}
-Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ String* name,
+ bool is_dont_delete) {
+ // ----------- S t a t e -------------
+ // -- a0 : receiver
+ // -- a2 : name
+ // -- ra : return address
+ // -----------------------------------
+ Label miss;
+
+ // If the object is the holder then we know that it's a global
+ // object which can only happen for contextual calls. In this case,
+ // the receiver cannot be a smi.
+ if (object != holder) {
+ __ And(t0, a0, Operand(kSmiTagMask));
+ __ Branch(&miss, eq, t0, Operand(zero_reg));
+ }
+
+ // Check that the map of the global has not changed.
+ CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss);
+
+ // Get the value from the cell.
+ __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ lw(t0, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
+
+ // Check for deleted property if property can actually be deleted.
+ if (!is_dont_delete) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&miss, eq, t0, Operand(at));
+ }
+
+ __ mov(v0, t0);
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+ __ Ret();
+
+ __ bind(&miss);
+ __ IncrementCounter(counters->named_load_global_stub_miss(), 1, a1, a3);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(NORMAL, name);
}
-Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int index) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check the key is the cached one.
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+ GenerateLoadField(receiver, holder, a1, a2, a3, t0, index, name, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(FIELD, name);
}
-// TODO(1224671): implement the fast case.
-Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
+ String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check the key is the cached one.
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+ MaybeObject* result = GenerateLoadCallback(receiver, holder, a1, a0, a2, a3,
+ t0, callback, name, &miss);
+ if (result->IsFailure()) {
+ miss.Unuse();
+ return result;
+ }
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(CALLBACKS, name);
}
-Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check the key is the cached one.
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+ GenerateLoadConstant(receiver, holder, a1, a2, a3, t0, value, name, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CONSTANT_FUNCTION, name);
}
-Object* ConstructStubCompiler::CompileConstructStub(
- SharedFunctionInfo* shared) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check the key is the cached one.
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+ GenerateLoadInterceptor(receiver,
+ holder,
+ &lookup,
+ a1,
+ a0,
+ a2,
+ a3,
+ t0,
+ name,
+ &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(INTERCEPTOR, name);
}
-Object* ExternalArrayStubCompiler::CompileKeyedLoadStub(
- ExternalArrayType array_type, Code::Flags flags) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check the key is the cached one.
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+ GenerateLoadArrayLength(masm(), a1, a2, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(CALLBACKS, name);
}
-Object* ExternalArrayStubCompiler::CompileKeyedStoreStub(
- ExternalArrayType array_type, Code::Flags flags) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
+
+ // Check the key is the cached one.
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+ GenerateLoadStringLength(masm(), a1, a2, a3, &miss, true);
+ __ bind(&miss);
+ __ DecrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
+
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(CALLBACKS, name);
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
+
+ // Check the name hasn't changed.
+ __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+ GenerateLoadFunctionPrototype(masm(), a1, a2, a3, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(CALLBACKS, name);
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Code* stub;
+ MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map);
+ if (!maybe_stub->To(&stub)) return maybe_stub;
+ __ DispatchMap(a1,
+ a2,
+ Handle<Map>(receiver_map),
+ Handle<Code>(stub),
+ DO_SMI_CHECK);
+
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+ MapList* receiver_maps,
+ CodeList* handler_ics) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss;
+ __ JumpIfSmi(a1, &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<Map> map(receiver_maps->at(current));
+ Handle<Code> code(handler_ics->at(current));
+ __ Jump(code, RelocInfo::CODE_TARGET, eq, a2, Operand(map));
+ }
+
+ __ bind(&miss);
+ Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL, MEGAMORPHIC);
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ Label miss;
+
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->keyed_store_field(), 1, a3, t0);
+
+ // Check that the name has not changed.
+ __ Branch(&miss, ne, a1, Operand(Handle<String>(name)));
+
+ // a3 is used as scratch register. a1 and a2 keep their values if a jump to
+ // the miss label is generated.
+ GenerateStoreField(masm(),
+ object,
+ index,
+ transition,
+ a2, a1, a3,
+ &miss);
+ __ bind(&miss);
+
+ __ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
+ Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -- a3 : scratch
+ // -----------------------------------
+ Code* stub;
+ MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map);
+ if (!maybe_stub->To(&stub)) return maybe_stub;
+ __ DispatchMap(a2,
+ a3,
+ Handle<Map>(receiver_map),
+ Handle<Code>(stub),
+ DO_SMI_CHECK);
+
+ Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+ MapList* receiver_maps,
+ CodeList* handler_ics) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -- a3 : scratch
+ // -----------------------------------
+ Label miss;
+ __ JumpIfSmi(a2, &miss);
+
+ int receiver_count = receiver_maps->length();
+ __ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ for (int current = 0; current < receiver_count; ++current) {
+ Handle<Map> map(receiver_maps->at(current));
+ Handle<Code> code(handler_ics->at(current));
+ __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map));
+ }
+
+ __ bind(&miss);
+ Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL, MEGAMORPHIC);
+}
+
+
+MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+ // a0 : argc
+ // a1 : constructor
+ // ra : return address
+ // [sp] : last argument
+ Label generic_stub_call;
+
+ // Use t7 for holding undefined which is used in several places below.
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Check to see whether there are any break points in the function code. If
+ // there are jump to the generic constructor stub which calls the actual
+ // code for the function thereby hitting the break points.
+ __ lw(t5, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(t5, SharedFunctionInfo::kDebugInfoOffset));
+ __ Branch(&generic_stub_call, ne, a2, Operand(t7));
+#endif
+
+ // Load the initial map and verify that it is in fact a map.
+ // a1: constructor function
+ // t7: undefined
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ And(t0, a2, Operand(kSmiTagMask));
+ __ Branch(&generic_stub_call, eq, t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, t0);
+ __ Branch(&generic_stub_call, ne, t0, Operand(MAP_TYPE));
+
+#ifdef DEBUG
+ // Cannot construct functions this way.
+ // a0: argc
+ // a1: constructor function
+ // a2: initial map
+ // t7: undefined
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Check(ne, "Function constructed by construct stub.",
+ a3, Operand(JS_FUNCTION_TYPE));
+#endif
+
+ // Now allocate the JSObject in new space.
+ // a0: argc
+ // a1: constructor function
+ // a2: initial map
+ // t7: undefined
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ AllocateInNewSpace(a3,
+ t4,
+ t5,
+ t6,
+ &generic_stub_call,
+ SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to initial
+ // map and properties and elements are set to empty fixed array.
+ // a0: argc
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size (in words)
+ // t4: JSObject (not tagged)
+ // t7: undefined
+ __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(t5, t4);
+ __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+ __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+ __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+ __ Addu(t5, t5, Operand(3 * kPointerSize));
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+ ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+
+ // Calculate the location of the first argument. The stack contains only the
+ // argc arguments.
+ __ sll(a1, a0, kPointerSizeLog2);
+ __ Addu(a1, a1, sp);
+
+ // Fill all the in-object properties with undefined.
+ // a0: argc
+ // a1: first argument
+ // a3: object size (in words)
+ // t4: JSObject (not tagged)
+ // t5: First in-object property of JSObject (not tagged)
+ // t7: undefined
+ // Fill the initialized properties with a constant value or a passed argument
+ // depending on the this.x = ...; assignment in the function.
+ SharedFunctionInfo* shared = function->shared();
+ for (int i = 0; i < shared->this_property_assignments_count(); i++) {
+ if (shared->IsThisPropertyAssignmentArgument(i)) {
+ Label not_passed, next;
+ // Check if the argument assigned to the property is actually passed.
+ int arg_number = shared->GetThisPropertyAssignmentArgument(i);
+ __ Branch(&not_passed, less_equal, a0, Operand(arg_number));
+ // Argument passed - find it on the stack.
+ __ lw(a2, MemOperand(a1, (arg_number + 1) * -kPointerSize));
+ __ sw(a2, MemOperand(t5));
+ __ Addu(t5, t5, kPointerSize);
+ __ jmp(&next);
+ __ bind(&not_passed);
+ // Set the property to undefined.
+ __ sw(t7, MemOperand(t5));
+ __ Addu(t5, t5, Operand(kPointerSize));
+ __ bind(&next);
+ } else {
+ // Set the property to the constant value.
+ Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+ __ li(a2, Operand(constant));
+ __ sw(a2, MemOperand(t5));
+ __ Addu(t5, t5, kPointerSize);
+ }
+ }
+
+ // Fill the unused in-object property fields with undefined.
+ ASSERT(function->has_initial_map());
+ for (int i = shared->this_property_assignments_count();
+ i < function->initial_map()->inobject_properties();
+ i++) {
+ __ sw(t7, MemOperand(t5));
+ __ Addu(t5, t5, kPointerSize);
+ }
+
+ // a0: argc
+ // t4: JSObject (not tagged)
+ // Move argc to a1 and the JSObject to return to v0 and tag it.
+ __ mov(a1, a0);
+ __ mov(v0, t4);
+ __ Or(v0, v0, Operand(kHeapObjectTag));
+
+ // v0: JSObject
+ // a1: argc
+ // Remove caller arguments and receiver from the stack and return.
+ __ sll(t0, a1, kPointerSizeLog2);
+ __ Addu(sp, sp, t0);
+ __ Addu(sp, sp, Operand(kPointerSize));
+ Counters* counters = masm()->isolate()->counters();
+ __ IncrementCounter(counters->constructed_objects(), 1, a1, a2);
+ __ IncrementCounter(counters->constructed_objects_stub(), 1, a1, a2);
+ __ Ret();
+
+ // Jump to the generic stub in case the specialized code cannot handle the
+ // construction.
+ __ bind(&generic_stub_call);
+ Handle<Code> generic_construct_stub =
+ masm()->isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+static bool IsElementTypeSigned(JSObject::ElementsKind elements_kind) {
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ return true;
+
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ return false;
+
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ return false;
+ }
+ return false;
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+ MacroAssembler* masm,
+ JSObject::ElementsKind elements_kind) {
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss_force_generic, slow, failed_allocation;
+
+ Register key = a0;
+ Register receiver = a1;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &miss_force_generic);
+
+ __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ // a3: elements array
+
+ // Check that the index is in range.
+ __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
+ __ sra(t2, key, kSmiTagSize);
+ // Unsigned comparison catches both negative and too-large values.
+ __ Branch(&miss_force_generic, Uless, t1, Operand(t2));
+
+ __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
+ // a3: base pointer of external storage
+
+ // We are not untagging smi key and instead work with it
+ // as if it was premultiplied by 2.
+ ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
+
+ Register value = a2;
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ __ srl(t2, key, 1);
+ __ addu(t3, a3, t2);
+ __ lb(value, MemOperand(t3, 0));
+ break;
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ srl(t2, key, 1);
+ __ addu(t3, a3, t2);
+ __ lbu(value, MemOperand(t3, 0));
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ __ addu(t3, a3, key);
+ __ lh(value, MemOperand(t3, 0));
+ break;
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ addu(t3, a3, key);
+ __ lhu(value, MemOperand(t3, 0));
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ sll(t2, key, 1);
+ __ addu(t3, a3, t2);
+ __ lw(value, MemOperand(t3, 0));
+ break;
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ __ sll(t3, t2, 2);
+ __ addu(t3, a3, t3);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ lwc1(f0, MemOperand(t3, 0));
+ } else {
+ __ lw(value, MemOperand(t3, 0));
+ }
+ break;
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ __ sll(t2, key, 2);
+ __ addu(t3, a3, t2);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ ldc1(f0, MemOperand(t3, 0));
+ } else {
+ // t3: pointer to the beginning of the double we want to load.
+ __ lw(a2, MemOperand(t3, 0));
+ __ lw(a3, MemOperand(t3, Register::kSizeInBytes));
+ }
+ break;
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+
+ // For integer array types:
+ // a2: value
+ // For float array type:
+ // f0: value (if FPU is supported)
+ // a2: value (if FPU is not supported)
+ // For double array type:
+ // f0: value (if FPU is supported)
+ // a2/a3: value (if FPU is not supported)
+
+ if (elements_kind == JSObject::EXTERNAL_INT_ELEMENTS) {
+ // For the Int and UnsignedInt array types, we need to see whether
+ // the value can be represented in a Smi. If not, we need to convert
+ // it to a HeapNumber.
+ Label box_int;
+ __ Subu(t3, value, Operand(0xC0000000)); // Non-smi value gives neg result.
+ __ Branch(&box_int, lt, t3, Operand(zero_reg));
+ // Tag integer as smi and return it.
+ __ sll(v0, value, kSmiTagSize);
+ __ Ret();
+
+ __ bind(&box_int);
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion.
+ // The arm version uses a temporary here to save r0, but we don't need to
+ // (a0 is not modified).
+ __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(v0, a3, t0, t1, &slow);
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ mtc1(value, f0);
+ __ cvt_d_w(f0, f0);
+ __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+ __ Ret();
+ } else {
+ Register dst1 = t2;
+ Register dst2 = t3;
+ FloatingPointHelper::Destination dest =
+ FloatingPointHelper::kCoreRegisters;
+ FloatingPointHelper::ConvertIntToDouble(masm,
+ value,
+ dest,
+ f0,
+ dst1,
+ dst2,
+ t1,
+ f2);
+ __ sw(dst1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+ __ sw(dst2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+ __ Ret();
+ }
+ } else if (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ // The test is different for unsigned int values. Since we need
+ // the value to be in the range of a positive smi, we can't
+ // handle either of the top two bits being set in the value.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ Label pl_box_int;
+ __ And(t2, value, Operand(0xC0000000));
+ __ Branch(&pl_box_int, ne, t2, Operand(zero_reg));
+
+ // It can fit in an Smi.
+ // Tag integer as smi and return it.
+ __ sll(v0, value, kSmiTagSize);
+ __ Ret();
+
+ __ bind(&pl_box_int);
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all
+ // registers - also when jumping due to exhausted young space.
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(v0, t2, t3, t6, &slow);
+
+ // This is replaced by a macro:
+ // __ mtc1(value, f0); // LS 32-bits.
+ // __ mtc1(zero_reg, f1); // MS 32-bits are all zero.
+ // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit.
+
+ __ Cvt_d_uw(f0, value);
+
+ __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+
+ __ Ret();
+ } else {
+ // Check whether unsigned integer fits into smi.
+ Label box_int_0, box_int_1, done;
+ __ And(t2, value, Operand(0x80000000));
+ __ Branch(&box_int_0, ne, t2, Operand(zero_reg));
+ __ And(t2, value, Operand(0x40000000));
+ __ Branch(&box_int_1, ne, t2, Operand(zero_reg));
+
+ // Tag integer as smi and return it.
+ __ sll(v0, value, kSmiTagSize);
+ __ Ret();
+
+ Register hiword = value; // a2.
+ Register loword = a3;
+
+ __ bind(&box_int_0);
+ // Integer does not have leading zeros.
+ GenerateUInt2Double(masm, hiword, loword, t0, 0);
+ __ Branch(&done);
+
+ __ bind(&box_int_1);
+ // Integer has one leading zero.
+ GenerateUInt2Double(masm, hiword, loword, t0, 1);
+
+
+ __ bind(&done);
+ // Integer was converted to double in registers hiword:loword.
+ // Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber
+ // clobbers all registers - also when jumping due to exhausted young
+ // space.
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(t2, t3, t5, t6, &slow);
+
+ __ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset));
+ __ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset));
+
+ __ mov(v0, t2);
+ __ Ret();
+ }
+ } else if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ // For the floating-point array type, we need to always allocate a
+ // HeapNumber.
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
+ // exhausted young space.
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+ // The float (single) value is already in fpu reg f0 (if we use float).
+ __ cvt_d_s(f0, f0);
+ __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+ __ Ret();
+ } else {
+ // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
+ // exhausted young space.
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+ // FPU is not available, do manual single to double conversion.
+
+ // a2: floating point value (binary32).
+ // v0: heap number for result
+
+ // Extract mantissa to t4.
+ __ And(t4, value, Operand(kBinary32MantissaMask));
+
+ // Extract exponent to t5.
+ __ srl(t5, value, kBinary32MantissaBits);
+ __ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+ Label exponent_rebiased;
+ __ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg));
+
+ __ li(t0, 0x7ff);
+ __ Xor(t1, t5, Operand(0xFF));
+ __ movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff.
+ __ Branch(&exponent_rebiased, eq, t0, Operand(0xff));
+
+ // Rebias exponent.
+ __ Addu(t5,
+ t5,
+ Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+ __ bind(&exponent_rebiased);
+ __ And(a2, value, Operand(kBinary32SignMask));
+ value = no_reg;
+ __ sll(t0, t5, HeapNumber::kMantissaBitsInTopWord);
+ __ or_(a2, a2, t0);
+
+ // Shift mantissa.
+ static const int kMantissaShiftForHiWord =
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+ static const int kMantissaShiftForLoWord =
+ kBitsPerInt - kMantissaShiftForHiWord;
+
+ __ srl(t0, t4, kMantissaShiftForHiWord);
+ __ or_(a2, a2, t0);
+ __ sll(a0, t4, kMantissaShiftForLoWord);
+
+ __ sw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+ __ sw(a0, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+ __ Ret();
+ }
+
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
+ // exhausted young space.
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+ // The double value is already in f0
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+ } else {
+ // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
+ // exhausted young space.
+ __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+
+ __ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+ __ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+ __ Ret();
+ }
+
+ } else {
+ // Tag integer as smi and return it.
+ __ sll(v0, value, kSmiTagSize);
+ __ Ret();
+ }
+
+ // Slow case, key and receiver still in a0 and a1.
+ __ bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(),
+ 1, a2, a3);
+
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+
+ __ Push(a1, a0);
+
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+
+ __ bind(&miss_force_generic);
+ Code* stub = masm->isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_MissForceGeneric);
+ __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreExternalArray(
+ MacroAssembler* masm,
+ JSObject::ElementsKind elements_kind) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -----------------------------------
+
+ Label slow, check_heap_number, miss_force_generic;
+
+ // Register usage.
+ Register value = a0;
+ Register key = a1;
+ Register receiver = a2;
+ // a3 mostly holds the elements array or the destination external array.
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key, &miss_force_generic);
+
+ // Check that the index is in range.
+ __ SmiUntag(t0, key);
+ __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
+ // Unsigned comparison catches both negative and too-large values.
+ __ Branch(&miss_force_generic, Ugreater_equal, t0, Operand(t1));
+
+ // Handle both smis and HeapNumbers in the fast path. Go to the
+ // runtime for all other kinds of values.
+ // a3: external array.
+ // t0: key (integer).
+
+ if (elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS) {
+ // Double to pixel conversion is only implemented in the runtime for now.
+ __ JumpIfNotSmi(value, &slow);
+ } else {
+ __ JumpIfNotSmi(value, &check_heap_number);
+ }
+ __ SmiUntag(t1, value);
+ __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
+
+ // a3: base pointer of external storage.
+ // t0: key (integer).
+ // t1: value (integer).
+
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS: {
+ // Clamp the value to [0..255].
+ // v0 is used as a scratch register here.
+ Label done;
+ __ li(v0, Operand(255));
+ // Normal branch: nop in delay slot.
+ __ Branch(&done, gt, t1, Operand(v0));
+ // Use delay slot in this branch.
+ __ Branch(USE_DELAY_SLOT, &done, lt, t1, Operand(zero_reg));
+ __ mov(v0, zero_reg); // In delay slot.
+ __ mov(v0, t1); // Value is in range 0..255.
+ __ bind(&done);
+ __ mov(t1, v0);
+ __ addu(t8, a3, t0);
+ __ sb(t1, MemOperand(t8, 0));
+ }
+ break;
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ addu(t8, a3, t0);
+ __ sb(t1, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ sll(t8, t0, 1);
+ __ addu(t8, a3, t8);
+ __ sh(t1, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ sll(t8, t0, 2);
+ __ addu(t8, a3, t8);
+ __ sw(t1, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ // Perform int-to-float conversion and store to memory.
+ StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
+ break;
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ __ sll(t8, t0, 3);
+ __ addu(a3, a3, t8);
+ // a3: effective address of the double element
+ FloatingPointHelper::Destination destination;
+ if (CpuFeatures::IsSupported(FPU)) {
+ destination = FloatingPointHelper::kFPURegisters;
+ } else {
+ destination = FloatingPointHelper::kCoreRegisters;
+ }
+ FloatingPointHelper::ConvertIntToDouble(
+ masm, t1, destination,
+ f0, t2, t3, // These are: double_dst, dst1, dst2.
+ t0, f2); // These are: scratch2, single_scratch.
+ if (destination == FloatingPointHelper::kFPURegisters) {
+ CpuFeatures::Scope scope(FPU);
+ __ sdc1(f0, MemOperand(a3, 0));
+ } else {
+ __ sw(t2, MemOperand(a3, 0));
+ __ sw(t3, MemOperand(a3, Register::kSizeInBytes));
+ }
+ break;
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+
+ // Entry registers are intact, a0 holds the value which is the return value.
+ __ mov(v0, value);
+ __ Ret();
+
+ if (elements_kind != JSObject::EXTERNAL_PIXEL_ELEMENTS) {
+ // a3: external array.
+ // t0: index (integer).
+ __ bind(&check_heap_number);
+ __ GetObjectType(value, t1, t2);
+ __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
+
+ __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
+
+ // a3: base pointer of external storage.
+ // t0: key (integer).
+
+ // The WebGL specification leaves the behavior of storing NaN and
+ // +/-Infinity into integer arrays basically undefined. For more
+ // reproducible behavior, convert these to zero.
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+
+ __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
+
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ __ cvt_s_d(f0, f0);
+ __ sll(t8, t0, 2);
+ __ addu(t8, a3, t8);
+ __ swc1(f0, MemOperand(t8, 0));
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ __ sll(t8, t0, 3);
+ __ addu(t8, a3, t8);
+ __ sdc1(f0, MemOperand(t8, 0));
+ } else {
+ __ EmitECMATruncate(t3, f0, f2, t2, t1, t5);
+
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ addu(t8, a3, t0);
+ __ sb(t3, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ sll(t8, t0, 1);
+ __ addu(t8, a3, t8);
+ __ sh(t3, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ sll(t8, t0, 2);
+ __ addu(t8, a3, t8);
+ __ sw(t3, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ // Entry registers are intact, a0 holds the value
+ // which is the return value.
+ __ mov(v0, value);
+ __ Ret();
+ } else {
+ // FPU is not available, do manual conversions.
+
+ __ lw(t3, FieldMemOperand(value, HeapNumber::kExponentOffset));
+ __ lw(t4, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
+ Label done, nan_or_infinity_or_zero;
+ static const int kMantissaInHiWordShift =
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+ static const int kMantissaInLoWordShift =
+ kBitsPerInt - kMantissaInHiWordShift;
+
+ // Test for all special exponent values: zeros, subnormal numbers, NaNs
+ // and infinities. All these should be converted to 0.
+ __ li(t5, HeapNumber::kExponentMask);
+ __ and_(t6, t3, t5);
+ __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(zero_reg));
+
+ __ xor_(t1, t6, t5);
+ __ li(t2, kBinary32ExponentMask);
+ __ movz(t6, t2, t1); // Only if t6 is equal to t5.
+ __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(t5));
+
+ // Rebias exponent.
+ __ srl(t6, t6, HeapNumber::kExponentShift);
+ __ Addu(t6,
+ t6,
+ Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
+
+ __ li(t1, Operand(kBinary32MaxExponent));
+ __ Slt(t1, t1, t6);
+ __ And(t2, t3, Operand(HeapNumber::kSignMask));
+ __ Or(t2, t2, Operand(kBinary32ExponentMask));
+ __ movn(t3, t2, t1); // Only if t6 is gt kBinary32MaxExponent.
+ __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent));
+
+ __ Slt(t1, t6, Operand(kBinary32MinExponent));
+ __ And(t2, t3, Operand(HeapNumber::kSignMask));
+ __ movn(t3, t2, t1); // Only if t6 is lt kBinary32MinExponent.
+ __ Branch(&done, lt, t6, Operand(kBinary32MinExponent));
+
+ __ And(t7, t3, Operand(HeapNumber::kSignMask));
+ __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
+ __ sll(t3, t3, kMantissaInHiWordShift);
+ __ or_(t7, t7, t3);
+ __ srl(t4, t4, kMantissaInLoWordShift);
+ __ or_(t7, t7, t4);
+ __ sll(t6, t6, kBinary32ExponentShift);
+ __ or_(t3, t7, t6);
+
+ __ bind(&done);
+ __ sll(t9, a1, 2);
+ __ addu(t9, a2, t9);
+ __ sw(t3, MemOperand(t9, 0));
+
+ // Entry registers are intact, a0 holds the value which is the return
+ // value.
+ __ mov(v0, value);
+ __ Ret();
+
+ __ bind(&nan_or_infinity_or_zero);
+ __ And(t7, t3, Operand(HeapNumber::kSignMask));
+ __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
+ __ or_(t6, t6, t7);
+ __ sll(t3, t3, kMantissaInHiWordShift);
+ __ or_(t6, t6, t3);
+ __ srl(t4, t4, kMantissaInLoWordShift);
+ __ or_(t3, t6, t4);
+ __ Branch(&done);
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
+ __ sll(t8, t0, 3);
+ __ addu(t8, a3, t8);
+ // t8: effective address of destination element.
+ __ sw(t4, MemOperand(t8, 0));
+ __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
+ __ Ret();
+ } else {
+ bool is_signed_type = IsElementTypeSigned(elements_kind);
+ int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
+ int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
+
+ Label done, sign;
+
+ // Test for all special exponent values: zeros, subnormal numbers, NaNs
+ // and infinities. All these should be converted to 0.
+ __ li(t5, HeapNumber::kExponentMask);
+ __ and_(t6, t3, t5);
+ __ movz(t3, zero_reg, t6); // Only if t6 is equal to zero.
+ __ Branch(&done, eq, t6, Operand(zero_reg));
+
+ __ xor_(t2, t6, t5);
+ __ movz(t3, zero_reg, t2); // Only if t6 is equal to t5.
+ __ Branch(&done, eq, t6, Operand(t5));
+
+ // Unbias exponent.
+ __ srl(t6, t6, HeapNumber::kExponentShift);
+ __ Subu(t6, t6, Operand(HeapNumber::kExponentBias));
+ // If exponent is negative then result is 0.
+ __ slt(t2, t6, zero_reg);
+ __ movn(t3, zero_reg, t2); // Only if exponent is negative.
+ __ Branch(&done, lt, t6, Operand(zero_reg));
+
+ // If exponent is too big then result is minimal value.
+ __ slti(t1, t6, meaningfull_bits - 1);
+ __ li(t2, min_value);
+ __ movz(t3, t2, t1); // Only if t6 is ge meaningfull_bits - 1.
+ __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1));
+
+ __ And(t5, t3, Operand(HeapNumber::kSignMask));
+ __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
+ __ Or(t3, t3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
+
+ __ li(t9, HeapNumber::kMantissaBitsInTopWord);
+ __ subu(t6, t9, t6);
+ __ slt(t1, t6, zero_reg);
+ __ srlv(t2, t3, t6);
+ __ movz(t3, t2, t1); // Only if t6 is positive.
+ __ Branch(&sign, ge, t6, Operand(zero_reg));
+
+ __ subu(t6, zero_reg, t6);
+ __ sllv(t3, t3, t6);
+ __ li(t9, meaningfull_bits);
+ __ subu(t6, t9, t6);
+ __ srlv(t4, t4, t6);
+ __ or_(t3, t3, t4);
+
+ __ bind(&sign);
+ __ subu(t2, t3, zero_reg);
+ __ movz(t3, t2, t5); // Only if t5 is zero.
+
+ __ bind(&done);
+
+ // Result is in t3.
+ // This switch block should be exactly the same as above (FPU mode).
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ addu(t8, a3, t0);
+ __ sb(t3, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ sll(t8, t0, 1);
+ __ addu(t8, a3, t8);
+ __ sh(t3, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ sll(t8, t0, 2);
+ __ addu(t8, a3, t8);
+ __ sw(t3, MemOperand(t8, 0));
+ break;
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ }
+
+ // Slow case, key and receiver still in a0 and a1.
+ __ bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(),
+ 1, a2, a3);
+ // Entry registers are intact.
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedStoreIC_Slow();
+ __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+ // Miss case, call the runtime.
+ __ bind(&miss_force_generic);
+
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss_force_generic;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(a0, &miss_force_generic);
+
+ // Get the elements array.
+ __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
+ __ AssertFastElements(a2);
+
+ // Check that the key is within bounds.
+ __ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ Branch(&miss_force_generic, hs, a0, Operand(a3));
+
+ // Load the result and make sure it's not the hole.
+ __ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, t0, a3);
+ __ lw(t0, MemOperand(t0));
+ __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
+ __ Branch(&miss_force_generic, eq, t0, Operand(t1));
+ __ mov(v0, t0);
+ __ Ret();
+
+ __ bind(&miss_force_generic);
+ Code* stub = masm->isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_MissForceGeneric);
+ __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+ bool is_js_array) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -- a3 : scratch
+ // -- a4 : scratch (elements)
+ // -----------------------------------
+ Label miss_force_generic;
+
+ Register value_reg = a0;
+ Register key_reg = a1;
+ Register receiver_reg = a2;
+ Register scratch = a3;
+ Register elements_reg = t0;
+ Register scratch2 = t1;
+ Register scratch3 = t2;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(a0, &miss_force_generic);
+
+ // Get the elements array and make sure it is a fast element array, not 'cow'.
+ __ lw(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+ __ CheckMap(elements_reg,
+ scratch,
+ Heap::kFixedArrayMapRootIndex,
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ // Check that the key is within bounds.
+ if (is_js_array) {
+ __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ } else {
+ __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ }
+ // Compare smis.
+ __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
+
+ __ Addu(scratch,
+ elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(scratch3, scratch2, scratch);
+ __ sw(value_reg, MemOperand(scratch3));
+ __ RecordWrite(scratch, Operand(scratch2), receiver_reg , elements_reg);
+
+ // value_reg (a0) is preserved.
+ // Done.
+ __ Ret();
+
+ __ bind(&miss_force_generic);
+ Handle<Code> ic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/mips/virtual-frame-mips.cc b/deps/v8/src/mips/virtual-frame-mips.cc
deleted file mode 100644
index b61ce75bd..000000000
--- a/deps/v8/src/mips/virtual-frame-mips.cc
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::SyncElementBelowStackPointer(int index) {
- UNREACHABLE();
-}
-
-
-void VirtualFrame::SyncElementByPushing(int index) {
- UNREACHABLE();
-}
-
-
-void VirtualFrame::SyncRange(int begin, int end) {
- // All elements are in memory on MIPS (ie, synced).
-#ifdef DEBUG
- for (int i = begin; i <= end; i++) {
- ASSERT(elements_[i].is_synced());
- }
-#endif
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Enter() {
- // TODO(MIPS): Implement DEBUG
-
- // We are about to push four values to the frame.
- Adjust(4);
- __ MultiPush(ra.bit() | fp.bit() | cp.bit() | a1.bit());
- // Adjust FP to point to saved FP.
- __ addiu(fp, sp, 2 * kPointerSize);
-}
-
-
-void VirtualFrame::Exit() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
- int count = local_count();
- if (count > 0) {
- Comment cmnt(masm(), "[ Allocate space for locals");
- Adjust(count);
- // Initialize stack slots with 'undefined' value.
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ addiu(sp, sp, -count * kPointerSize);
- for (int i = 0; i < count; i++) {
- __ sw(t0, MemOperand(sp, (count-i-1)*kPointerSize));
- }
- }
-}
-
-
-void VirtualFrame::SaveContextRegister() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::RestoreContextRegister() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-int VirtualFrame::InvalidateFrameSlotAt(int index) {
- return kIllegalIndex;
-}
-
-
-void VirtualFrame::TakeFrameSlotAt(int index) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::StoreToFrameSlotAt(int index) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::RawCallStub(CodeStub* stub) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(f, arg_count);
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(id, arg_count);
-}
-
-
-void VirtualFrame::CallAlignedRuntime(Runtime::Function* f, int arg_count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallAlignedRuntime(Runtime::FunctionId id, int arg_count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags,
- Result* arg_count_register,
- int arg_count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- int dropped_args) {
- switch (code->kind()) {
- case Code::CALL_IC:
- break;
- case Code::FUNCTION:
- UNIMPLEMENTED_MIPS();
- break;
- case Code::KEYED_LOAD_IC:
- UNIMPLEMENTED_MIPS();
- break;
- case Code::LOAD_IC:
- UNIMPLEMENTED_MIPS();
- break;
- case Code::KEYED_STORE_IC:
- UNIMPLEMENTED_MIPS();
- break;
- case Code::STORE_IC:
- UNIMPLEMENTED_MIPS();
- break;
- case Code::BUILTIN:
- UNIMPLEMENTED_MIPS();
- break;
- default:
- UNREACHABLE();
- break;
- }
- Forget(dropped_args);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ Call(code, rmode);
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
- int dropped_args,
- bool set_auto_args_slots) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Drop(int count) {
- ASSERT(count >= 0);
- ASSERT(height() >= count);
- int num_virtual_elements = (element_count() - 1) - stack_pointer_;
-
- // Emit code to lower the stack pointer if necessary.
- if (num_virtual_elements < count) {
- int num_dropped = count - num_virtual_elements;
- stack_pointer_ -= num_dropped;
- __ addiu(sp, sp, num_dropped * kPointerSize);
- }
-
- // Discard elements from the virtual frame and free any registers.
- for (int i = 0; i < count; i++) {
- FrameElement dropped = elements_.RemoveLast();
- if (dropped.is_register()) {
- Unuse(dropped.reg());
- }
- }
-}
-
-
-void VirtualFrame::DropFromVFrameOnly(int count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Result VirtualFrame::Pop() {
- UNIMPLEMENTED_MIPS();
- Result res = Result();
- return res; // UNIMPLEMENTED RETURN
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_--;
- elements_.RemoveLast();
- __ Pop(reg);
-}
-
-
-void VirtualFrame::EmitMultiPop(RegList regs) {
- ASSERT(stack_pointer_ == element_count() - 1);
- for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
- stack_pointer_--;
- elements_.RemoveLast();
- }
- }
- __ MultiPop(regs);
-}
-
-
-void VirtualFrame::EmitPush(Register reg) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
- stack_pointer_++;
- __ Push(reg);
-}
-
-
-void VirtualFrame::EmitMultiPush(RegList regs) {
- ASSERT(stack_pointer_ == element_count() - 1);
- for (int16_t i = kNumRegisters; i > 0; i--) {
- if ((regs & (1 << i)) != 0) {
- elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
- stack_pointer_++;
- }
- }
- __ MultiPush(regs);
-}
-
-
-void VirtualFrame::EmitArgumentSlots(RegList reglist) {
- UNIMPLEMENTED_MIPS();
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/mips/virtual-frame-mips.h b/deps/v8/src/mips/virtual-frame-mips.h
deleted file mode 100644
index b32e2aeed..000000000
--- a/deps/v8/src/mips/virtual-frame-mips.h
+++ /dev/null
@@ -1,548 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-#define V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-
-#include "register-allocator.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame. It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
- // A utility class to introduce a scope where the virtual frame is
- // expected to remain spilled. The constructor spills the code
- // generator's current frame, but no attempt is made to require it
- // to stay spilled. It is intended as documentation while the code
- // generator is being transformed.
- class SpilledScope BASE_EMBEDDED {
- public:
- SpilledScope() {}
- };
-
- // An illegal index into the virtual frame.
- static const int kIllegalIndex = -1;
-
- // Construct an initial virtual frame on entry to a JS function.
- inline VirtualFrame();
-
- // Construct a virtual frame as a clone of an existing one.
- explicit inline VirtualFrame(VirtualFrame* original);
-
- CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
- MacroAssembler* masm() { return cgen()->masm(); }
-
- // Create a duplicate of an existing valid frame element.
- FrameElement CopyElementAt(int index,
- NumberInfo info = NumberInfo::Unknown());
-
- // The number of elements on the virtual frame.
- int element_count() { return elements_.length(); }
-
- // The height of the virtual expression stack.
- int height() {
- return element_count() - expression_base_index();
- }
-
- int register_location(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num];
- }
-
- int register_location(Register reg) {
- return register_locations_[RegisterAllocator::ToNumber(reg)];
- }
-
- void set_register_location(Register reg, int index) {
- register_locations_[RegisterAllocator::ToNumber(reg)] = index;
- }
-
- bool is_used(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num] != kIllegalIndex;
- }
-
- bool is_used(Register reg) {
- return register_locations_[RegisterAllocator::ToNumber(reg)]
- != kIllegalIndex;
- }
-
- // Add extra in-memory elements to the top of the frame to match an actual
- // frame (eg, the frame after an exception handler is pushed). No code is
- // emitted.
- void Adjust(int count);
-
- // Forget elements from the top of the frame to match an actual frame (eg,
- // the frame after a runtime call). No code is emitted.
- void Forget(int count) {
- ASSERT(count >= 0);
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_ -= count;
- // On mips, all elements are in memory, so there is no extra bookkeeping
- // (registers, copies, etc.) beyond dropping the elements.
- elements_.Rewind(stack_pointer_ + 1);
- }
-
- // Forget count elements from the top of the frame and adjust the stack
- // pointer downward. This is used, for example, before merging frames at
- // break, continue, and return targets.
- void ForgetElements(int count);
-
- // Spill all values from the frame to memory.
- void SpillAll();
-
- // Spill all occurrences of a specific register from the frame.
- void Spill(Register reg) {
- if (is_used(reg)) SpillElementAt(register_location(reg));
- }
-
- // Spill all occurrences of an arbitrary register if possible. Return the
- // register spilled or no_reg if it was not possible to free any register
- // (ie, they all have frame-external references).
- Register SpillAnyRegister();
-
- // Prepare this virtual frame for merging to an expected frame by
- // performing some state changes that do not require generating
- // code. It is guaranteed that no code will be generated.
- void PrepareMergeTo(VirtualFrame* expected);
-
- // Make this virtual frame have a state identical to an expected virtual
- // frame. As a side effect, code may be emitted to make this frame match
- // the expected one.
- void MergeTo(VirtualFrame* expected);
-
- // Detach a frame from its code generator, perhaps temporarily. This
- // tells the register allocator that it is free to use frame-internal
- // registers. Used when the code generator's frame is switched from this
- // one to NULL by an unconditional jump.
- void DetachFromCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Unuse(i);
- }
- }
-
- // (Re)attach a frame to its code generator. This informs the register
- // allocator that the frame-internal register references are active again.
- // Used when a code generator's frame is switched from NULL to this one by
- // binding a label.
- void AttachToCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Unuse(i);
- }
- }
-
- // Emit code for the physical JS entry and exit frame sequences. After
- // calling Enter, the virtual frame is ready for use; and after calling
- // Exit it should not be used. Note that Enter does not allocate space in
- // the physical frame for storing frame-allocated locals.
- void Enter();
- void Exit();
-
- // Prepare for returning from the frame by spilling locals and
- // dropping all non-locals elements in the virtual frame. This
- // avoids generating unnecessary merge code when jumping to the
- // shared return site. Emits code for spills.
- void PrepareForReturn();
-
- // Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots();
-
- // The current top of the expression stack as an assembly operand.
- MemOperand Top() { return MemOperand(sp, 0); }
-
- // An element of the expression stack as an assembly operand.
- MemOperand ElementAt(int index) {
- return MemOperand(sp, index * kPointerSize);
- }
-
- // Random-access store to a frame-top relative frame element. The result
- // becomes owned by the frame and is invalidated.
- void SetElementAt(int index, Result* value);
-
- // Set a frame element to a constant. The index is frame-top relative.
- void SetElementAt(int index, Handle<Object> value) {
- Result temp(value);
- SetElementAt(index, &temp);
- }
-
- void PushElementAt(int index) {
- PushFrameSlotAt(element_count() - index - 1);
- }
-
- // A frame-allocated local as an assembly operand.
- MemOperand LocalAt(int index) {
- ASSERT(0 <= index);
- ASSERT(index < local_count());
- return MemOperand(s8_fp, kLocal0Offset - index * kPointerSize);
- }
-
- // Push a copy of the value of a local frame slot on top of the frame.
- void PushLocalAt(int index) {
- PushFrameSlotAt(local0_index() + index);
- }
-
- // Push the value of a local frame slot on top of the frame and invalidate
- // the local slot. The slot should be written to before trying to read
- // from it again.
- void TakeLocalAt(int index) {
- TakeFrameSlotAt(local0_index() + index);
- }
-
- // Store the top value on the virtual frame into a local frame slot. The
- // value is left in place on top of the frame.
- void StoreToLocalAt(int index) {
- StoreToFrameSlotAt(local0_index() + index);
- }
-
- // Push the address of the receiver slot on the frame.
- void PushReceiverSlotAddress();
-
- // The function frame slot.
- MemOperand Function() { return MemOperand(s8_fp, kFunctionOffset); }
-
- // Push the function on top of the frame.
- void PushFunction() { PushFrameSlotAt(function_index()); }
-
- // The context frame slot.
- MemOperand Context() { return MemOperand(s8_fp, kContextOffset); }
-
- // Save the value of the cp register to the context frame slot.
- void SaveContextRegister();
-
- // Restore the cp register from the value of the context frame
- // slot.
- void RestoreContextRegister();
-
- // A parameter as an assembly operand.
- MemOperand ParameterAt(int index) {
- // Index -1 corresponds to the receiver.
- ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index <= parameter_count());
- uint16_t a = 0; // Number of argument slots.
- return MemOperand(s8_fp, (1 + parameter_count() + a - index) *kPointerSize);
- }
-
- // Push a copy of the value of a parameter frame slot on top of the frame.
- void PushParameterAt(int index) {
- PushFrameSlotAt(param0_index() + index);
- }
-
- // Push the value of a paramter frame slot on top of the frame and
- // invalidate the parameter slot. The slot should be written to before
- // trying to read from it again.
- void TakeParameterAt(int index) {
- TakeFrameSlotAt(param0_index() + index);
- }
-
- // Store the top value on the virtual frame into a parameter frame slot.
- // The value is left in place on top of the frame.
- void StoreToParameterAt(int index) {
- StoreToFrameSlotAt(param0_index() + index);
- }
-
- // The receiver frame slot.
- MemOperand Receiver() { return ParameterAt(-1); }
-
- // Push a try-catch or try-finally handler on top of the virtual frame.
- void PushTryHandler(HandlerType type);
-
- // Call stub given the number of arguments it expects on (and
- // removes from) the stack.
- void CallStub(CodeStub* stub, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- RawCallStub(stub);
- }
-
- void CallStub(CodeStub* stub, Result* arg);
-
- void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
- // Call runtime given the number of arguments expected on (and
- // removed from) the stack.
- void CallRuntime(Runtime::Function* f, int arg_count);
- void CallRuntime(Runtime::FunctionId id, int arg_count);
-
- // Call runtime with sp aligned to 8 bytes.
- void CallAlignedRuntime(Runtime::Function* f, int arg_count);
- void CallAlignedRuntime(Runtime::FunctionId id, int arg_count);
-
- // Invoke builtin given the number of arguments it expects on (and
- // removes from) the stack.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flag,
- Result* arg_count_register,
- int arg_count);
-
- // Call into an IC stub given the number of arguments it removes
- // from the stack. Register arguments are passed as results and
- // consumed by the call.
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- int dropped_args);
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args);
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
- int dropped_args,
- bool set_auto_args_slots = false);
-
- // Drop a number of elements from the top of the expression stack. May
- // emit code to affect the physical frame. Does not clobber any registers
- // excepting possibly the stack pointer.
- void Drop(int count);
- // Similar to VirtualFrame::Drop but we don't modify the actual stack.
- // This is because we need to manually restore sp to the correct position.
- void DropFromVFrameOnly(int count);
-
- // Drop one element.
- void Drop() { Drop(1); }
- void DropFromVFrameOnly() { DropFromVFrameOnly(1); }
-
- // Duplicate the top element of the frame.
- void Dup() { PushFrameSlotAt(element_count() - 1); }
-
- // Pop an element from the top of the expression stack. Returns a
- // Result, which may be a constant or a register.
- Result Pop();
-
- // Pop and save an element from the top of the expression stack and
- // emit a corresponding pop instruction.
- void EmitPop(Register reg);
- // Same but for multiple registers
- void EmitMultiPop(RegList regs);
- void EmitMultiPopReversed(RegList regs);
-
- // Push an element on top of the expression stack and emit a
- // corresponding push instruction.
- void EmitPush(Register reg);
- // Same but for multiple registers.
- void EmitMultiPush(RegList regs);
- void EmitMultiPushReversed(RegList regs);
-
- // Push an element on the virtual frame.
- inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown());
- inline void Push(Handle<Object> value);
- inline void Push(Smi* value);
-
- // Pushing a result invalidates it (its contents become owned by the frame).
- void Push(Result* result) {
- if (result->is_register()) {
- Push(result->reg());
- } else {
- ASSERT(result->is_constant());
- Push(result->handle());
- }
- result->Unuse();
- }
-
- // Nip removes zero or more elements from immediately below the top
- // of the frame, leaving the previous top-of-frame value on top of
- // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
- inline void Nip(int num_dropped);
-
- // This pushes 4 arguments slots on the stack and saves asked 'a' registers
- // 'a' registers are arguments register a0 to a3.
- void EmitArgumentSlots(RegList reglist);
-
- inline void SetTypeForLocalAt(int index, NumberInfo info);
- inline void SetTypeForParamAt(int index, NumberInfo info);
-
- private:
- static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
- static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
- static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
- static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
-
- ZoneList<FrameElement> elements_;
-
- // The index of the element that is at the processor's stack pointer
- // (the sp register).
- int stack_pointer_;
-
- // The index of the register frame element using each register, or
- // kIllegalIndex if a register is not on the frame.
- int register_locations_[RegisterAllocator::kNumRegisters];
-
- // The number of frame-allocated locals and parameters respectively.
- int parameter_count() { return cgen()->scope()->num_parameters(); }
- int local_count() { return cgen()->scope()->num_stack_slots(); }
-
- // The index of the element that is at the processor's frame pointer
- // (the fp register). The parameters, receiver, function, and context
- // are below the frame pointer.
- int frame_pointer() { return parameter_count() + 3; }
-
- // The index of the first parameter. The receiver lies below the first
- // parameter.
- int param0_index() { return 1; }
-
- // The index of the context slot in the frame. It is immediately
- // below the frame pointer.
- int context_index() { return frame_pointer() - 1; }
-
- // The index of the function slot in the frame. It is below the frame
- // pointer and context slot.
- int function_index() { return frame_pointer() - 2; }
-
- // The index of the first local. Between the frame pointer and the
- // locals lies the return address.
- int local0_index() { return frame_pointer() + 2; }
-
- // The index of the base of the expression stack.
- int expression_base_index() { return local0_index() + local_count(); }
-
- // Convert a frame index into a frame pointer relative offset into the
- // actual stack.
- int fp_relative(int index) {
- ASSERT(index < element_count());
- ASSERT(frame_pointer() < element_count()); // FP is on the frame.
- return (frame_pointer() - index) * kPointerSize;
- }
-
- // Record an occurrence of a register in the virtual frame. This has the
- // effect of incrementing the register's external reference count and
- // of updating the index of the register's location in the frame.
- void Use(Register reg, int index) {
- ASSERT(!is_used(reg));
- set_register_location(reg, index);
- cgen()->allocator()->Use(reg);
- }
-
- // Record that a register reference has been dropped from the frame. This
- // decrements the register's external reference count and invalidates the
- // index of the register's location in the frame.
- void Unuse(Register reg) {
- ASSERT(is_used(reg));
- set_register_location(reg, kIllegalIndex);
- cgen()->allocator()->Unuse(reg);
- }
-
- // Spill the element at a particular index---write it to memory if
- // necessary, free any associated register, and forget its value if
- // constant.
- void SpillElementAt(int index);
-
- // Sync the element at a particular index. If it is a register or
- // constant that disagrees with the value on the stack, write it to memory.
- // Keep the element type as register or constant, and clear the dirty bit.
- void SyncElementAt(int index);
-
- // Sync the range of elements in [begin, end] with memory.
- void SyncRange(int begin, int end);
-
- // Sync a single unsynced element that lies beneath or at the stack pointer.
- void SyncElementBelowStackPointer(int index);
-
- // Sync a single unsynced element that lies just above the stack pointer.
- void SyncElementByPushing(int index);
-
- // Push a copy of a frame slot (typically a local or parameter) on top of
- // the frame.
- inline void PushFrameSlotAt(int index);
-
- // Push a the value of a frame slot (typically a local or parameter) on
- // top of the frame and invalidate the slot.
- void TakeFrameSlotAt(int index);
-
- // Store the value on top of the frame to a frame slot (typically a local
- // or parameter).
- void StoreToFrameSlotAt(int index);
-
- // Spill all elements in registers. Spill the top spilled_args elements
- // on the frame. Sync all other frame elements.
- // Then drop dropped_args elements from the virtual frame, to match
- // the effect of an upcoming call that will drop them from the stack.
- void PrepareForCall(int spilled_args, int dropped_args);
-
- // Move frame elements currently in registers or constants, that
- // should be in memory in the expected frame, to memory.
- void MergeMoveRegistersToMemory(VirtualFrame* expected);
-
- // Make the register-to-register moves necessary to
- // merge this frame with the expected frame.
- // Register to memory moves must already have been made,
- // and memory to register moves must follow this call.
- // This is because some new memory-to-register moves are
- // created in order to break cycles of register moves.
- // Used in the implementation of MergeTo().
- void MergeMoveRegistersToRegisters(VirtualFrame* expected);
-
- // Make the memory-to-register and constant-to-register moves
- // needed to make this frame equal the expected frame.
- // Called after all register-to-memory and register-to-register
- // moves have been made. After this function returns, the frames
- // should be equal.
- void MergeMoveMemoryToRegisters(VirtualFrame* expected);
-
- // Invalidates a frame slot (puts an invalid frame element in it).
- // Copies on the frame are correctly handled, and if this slot was
- // the backing store of copies, the index of the new backing store
- // is returned. Otherwise, returns kIllegalIndex.
- // Register counts are correctly updated.
- int InvalidateFrameSlotAt(int index);
-
- // Call a code stub that has already been prepared for calling (via
- // PrepareForCall).
- void RawCallStub(CodeStub* stub);
-
- // Calls a code object which has already been prepared for calling
- // (via PrepareForCall).
- void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
- inline bool Equals(VirtualFrame* other);
-
- // Classes that need raw access to the elements_ array.
- friend class DeferredCode;
- friend class JumpTarget;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-