summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2010-02-03 09:06:03 -0800
committerRyan Dahl <ry@tinyclouds.org>2010-02-03 09:07:02 -0800
commitc7cb4daa25966e4f9af3c6d5499d762736454da9 (patch)
tree27c6541f5a1207eb74797ed63a43126c9bf2ba81 /deps/v8/src/arm
parentc723acc72192334a62bea6ff4baa33aab0da50ad (diff)
downloadnode-c7cb4daa25966e4f9af3c6d5499d762736454da9.tar.gz
Upgrade V8 to 2.1.0
Diffstat (limited to 'deps/v8/src/arm')
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h14
-rw-r--r--deps/v8/src/arm/assembler-arm.cc36
-rw-r--r--deps/v8/src/arm/assembler-arm.h16
-rw-r--r--deps/v8/src/arm/builtins-arm.cc61
-rw-r--r--deps/v8/src/arm/codegen-arm.cc949
-rw-r--r--deps/v8/src/arm/codegen-arm.h120
-rw-r--r--deps/v8/src/arm/constants-arm.h6
-rw-r--r--deps/v8/src/arm/debug-arm.cc2
-rw-r--r--deps/v8/src/arm/disasm-arm.cc52
-rw-r--r--deps/v8/src/arm/fast-codegen-arm.cc1742
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc1781
-rw-r--r--deps/v8/src/arm/ic-arm.cc25
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc63
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h19
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc110
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h6
-rw-r--r--deps/v8/src/arm/simulator-arm.cc103
-rw-r--r--deps/v8/src/arm/simulator-arm.h8
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc551
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.cc101
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.h34
21 files changed, 3354 insertions, 2445 deletions
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index fd2fcd305..354436cb1 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -174,20 +174,6 @@ Operand::Operand(const ExternalReference& f) {
}
-Operand::Operand(Object** opp) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(opp);
- rmode_ = RelocInfo::NONE;
-}
-
-
-Operand::Operand(Context** cpp) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(cpp);
- rmode_ = RelocInfo::NONE;
-}
-
-
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 07da80090..74547be6e 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -30,9 +30,9 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
#include "v8.h"
@@ -1371,6 +1371,36 @@ void Assembler::stc2(Coprocessor coproc,
// Support for VFP.
+void Assembler::vldr(const DwVfpRegister dst,
+ const Register base,
+ int offset,
+ const Condition cond) {
+ // Ddst = MEM(Rbase + offset).
+ // Instruction details available in ARM DDI 0406A, A8-628.
+ // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
+ // Vdst(15-12) | 1011(11-8) | offset
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(offset % 4 == 0);
+ emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
+ 0xB*B8 | ((offset / 4) & 255));
+}
+
+
+void Assembler::vstr(const DwVfpRegister src,
+ const Register base,
+ int offset,
+ const Condition cond) {
+ // MEM(Rbase + offset) = Dsrc.
+ // Instruction details available in ARM DDI 0406A, A8-786.
+ // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
+ // Vsrc(15-12) | 1011(11-8) | (offset/4)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(offset % 4 == 0);
+ emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
+ 0xB*B8 | ((offset / 4) & 255));
+}
+
+
void Assembler::vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index cd53dd609..208d583ce 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -30,9 +30,9 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
// A light-weight ARM Assembler
// Generates user mode instructions for the ARM architecture up to version 5
@@ -398,8 +398,6 @@ class Operand BASE_EMBEDDED {
RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(explicit Operand(const ExternalReference& f));
INLINE(explicit Operand(const char* s));
- INLINE(explicit Operand(Object** opp));
- INLINE(explicit Operand(Context** cpp));
explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value));
@@ -796,6 +794,14 @@ class Assembler : public Malloced {
// However, some simple modifications can allow
// these APIs to support D16 to D31.
+ void vldr(const DwVfpRegister dst,
+ const Register base,
+ int offset, // Offset must be a multiple of 4.
+ const Condition cond = al);
+ void vstr(const DwVfpRegister src,
+ const Register base,
+ int offset, // Offset must be a multiple of 4.
+ const Condition cond = al);
void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 5389a3c5f..ae7dae3b0 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -38,15 +38,32 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
- // TODO(428): Don't pass the function in a static variable.
- __ mov(ip, Operand(ExternalReference::builtin_passed_function()));
- __ str(r1, MemOperand(ip, 0));
-
- // The actual argument count has already been loaded into register
- // r0, but JumpToRuntime expects r0 to contain the number of
- // arguments including the receiver.
- __ add(r0, r0, Operand(1));
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments excluding receiver
+ // -- r1 : called function (only guaranteed when
+ // extra_args requires it)
+ // -- cp : context
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument (argc == r0)
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ __ push(r1);
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToRuntime expects r0 to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ add(r0, r0, Operand(num_extra_args + 1));
__ JumpToRuntime(ExternalReference(id));
}
@@ -491,7 +508,8 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
}
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function) {
// Enter a construct frame.
__ EnterConstructFrame();
@@ -727,8 +745,17 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Call the function.
// r0: number of arguments
// r1: constructor function
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION);
+ if (is_api_function) {
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ Handle<Code> code = Handle<Code>(
+ Builtins::builtin(Builtins::HandleApiCallConstruct));
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION);
+ } else {
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION);
+ }
// Pop the function from the stack.
// sp[0]: constructor function
@@ -783,6 +810,16 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
}
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true);
+}
+
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 70d8ab495..ea4b165fd 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -47,7 +47,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Condition cc,
bool never_nan_nan);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Label* rhs_not_nan,
+ Label* lhs_not_nan,
Label* slow,
bool strict);
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
@@ -121,12 +121,13 @@ CodeGenState::~CodeGenState() {
// -------------------------------------------------------------------------
// CodeGenerator implementation
-CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
+CodeGenerator::CodeGenerator(MacroAssembler* masm,
+ Handle<Script> script,
bool is_eval)
: is_eval_(is_eval),
script_(script),
deferred_(8),
- masm_(new MacroAssembler(NULL, buffer_size)),
+ masm_(masm),
scope_(NULL),
frame_(NULL),
allocator_(NULL),
@@ -142,7 +143,9 @@ CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
// r1: called JS function
// cp: callee's context
-void CodeGenerator::GenCode(FunctionLiteral* fun) {
+void CodeGenerator::Generate(FunctionLiteral* fun,
+ Mode mode,
+ CompilationInfo* info) {
// Record the position for debugging purposes.
CodeForFunctionPosition(fun);
@@ -168,8 +171,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
// r1: called JS function
// cp: callee's context
allocator_->Initialize();
- frame_->Enter();
- // tos: code slot
+
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -178,104 +180,118 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
}
#endif
- // Allocate space for locals and initialize them. This also checks
- // for stack overflow.
- frame_->AllocateStackSlots();
- // Initialize the function return target after the locals are set
- // up, because it needs the expected frame height from the frame.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
- function_return_is_shadowed_ = false;
+ if (mode == PRIMARY) {
+ frame_->Enter();
+ // tos: code slot
- VirtualFrame::SpilledScope spilled_scope;
- int heap_slots = scope_->num_heap_slots();
- if (heap_slots > 0) {
- // Allocate local context.
- // Get outer context and create a new context based on it.
- __ ldr(r0, frame_->Function());
- frame_->EmitPush(r0);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(heap_slots);
- frame_->CallStub(&stub, 1);
- } else {
- frame_->CallRuntime(Runtime::kNewContext, 1);
- }
+ // Allocate space for locals and initialize them. This also checks
+ // for stack overflow.
+ frame_->AllocateStackSlots();
+
+ VirtualFrame::SpilledScope spilled_scope;
+ int heap_slots = scope_->num_heap_slots();
+ if (heap_slots > 0) {
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ __ ldr(r0, frame_->Function());
+ frame_->EmitPush(r0);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ frame_->CallStub(&stub, 1);
+ } else {
+ frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
#ifdef DEBUG
- JumpTarget verified_true;
- __ cmp(r0, Operand(cp));
- verified_true.Branch(eq);
- __ stop("NewContext: r0 is expected to be the same as cp");
- verified_true.Bind();
+ JumpTarget verified_true;
+ __ cmp(r0, Operand(cp));
+ verified_true.Branch(eq);
+ __ stop("NewContext: r0 is expected to be the same as cp");
+ verified_true.Bind();
#endif
- // Update context local.
- __ str(cp, frame_->Context());
- }
+ // Update context local.
+ __ str(cp, frame_->Context());
+ }
- // TODO(1241774): Improve this code:
- // 1) only needed if we have a context
- // 2) no need to recompute context ptr every single time
- // 3) don't copy parameter operand code from SlotOperand!
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
-
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope_->num_parameters(); i++) {
- Variable* par = scope_->parameter(i);
- Slot* slot = par->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- ASSERT(!scope_->is_global_scope()); // no parameters in global scope
- __ ldr(r1, frame_->ParameterAt(i));
- // Loads r2 with context; used below in RecordWrite.
- __ str(r1, SlotOperand(slot, r2));
- // Load the offset into r3.
- int slot_offset =
- FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r3, Operand(slot_offset));
- __ RecordWrite(r2, r3, r1);
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ Variable* par = scope_->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ // No parameters in global scope.
+ ASSERT(!scope_->is_global_scope());
+ __ ldr(r1, frame_->ParameterAt(i));
+ // Loads r2 with context; used below in RecordWrite.
+ __ str(r1, SlotOperand(slot, r2));
+ // Load the offset into r3.
+ int slot_offset =
+ FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ mov(r3, Operand(slot_offset));
+ __ RecordWrite(r2, r3, r1);
+ }
}
}
- }
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in the
- // context.
- if (scope_->arguments() != NULL) {
- Comment cmnt(masm_, "[ allocate arguments object");
- ASSERT(scope_->arguments_shadow() != NULL);
- Variable* arguments = scope_->arguments()->var();
- Variable* shadow = scope_->arguments_shadow()->var();
- ASSERT(arguments != NULL && arguments->slot() != NULL);
- ASSERT(shadow != NULL && shadow->slot() != NULL);
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ ldr(r2, frame_->Function());
- // The receiver is below the arguments, the return address, and the
- // frame pointer on the stack.
- const int kReceiverDisplacement = 2 + scope_->num_parameters();
- __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
- __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
- frame_->Adjust(3);
- __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
- frame_->CallStub(&stub, 3);
- frame_->EmitPush(r0);
- StoreToSlot(arguments->slot(), NOT_CONST_INIT);
- StoreToSlot(shadow->slot(), NOT_CONST_INIT);
- frame_->Drop(); // Value is no longer needed.
- }
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in the
+ // context.
+ if (scope_->arguments() != NULL) {
+ Comment cmnt(masm_, "[ allocate arguments object");
+ ASSERT(scope_->arguments_shadow() != NULL);
+ Variable* arguments = scope_->arguments()->var();
+ Variable* shadow = scope_->arguments_shadow()->var();
+ ASSERT(arguments != NULL && arguments->slot() != NULL);
+ ASSERT(shadow != NULL && shadow->slot() != NULL);
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ ldr(r2, frame_->Function());
+ // The receiver is below the arguments, the return address, and the
+ // frame pointer on the stack.
+ const int kReceiverDisplacement = 2 + scope_->num_parameters();
+ __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
+ __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+ frame_->Adjust(3);
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
+ frame_->CallStub(&stub, 3);
+ frame_->EmitPush(r0);
+ StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+ StoreToSlot(shadow->slot(), NOT_CONST_INIT);
+ frame_->Drop(); // Value is no longer needed.
+ }
- // Initialize ThisFunction reference if present.
- if (scope_->is_function_scope() && scope_->function() != NULL) {
- __ mov(ip, Operand(Factory::the_hole_value()));
- frame_->EmitPush(ip);
- StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ // Initialize ThisFunction reference if present.
+ if (scope_->is_function_scope() && scope_->function() != NULL) {
+ __ mov(ip, Operand(Factory::the_hole_value()));
+ frame_->EmitPush(ip);
+ StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ }
+ } else {
+ // When used as the secondary compiler for splitting, r1, cp,
+ // fp, and lr have been pushed on the stack. Adjust the virtual
+ // frame to match this state.
+ frame_->Adjust(4);
+ allocator_->Unuse(r1);
+ allocator_->Unuse(lr);
}
+ // Initialize the function return target after the locals are set
+ // up, because it needs the expected frame height from the frame.
+ function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+ function_return_is_shadowed_ = false;
+
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
@@ -605,14 +621,19 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
}
-Reference::Reference(CodeGenerator* cgen, Expression* expression)
- : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+Reference::Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get)
+ : cgen_(cgen),
+ expression_(expression),
+ type_(ILLEGAL),
+ persist_after_get_(persist_after_get) {
cgen->LoadReference(this);
}
Reference::~Reference() {
- cgen_->UnloadReference(this);
+ ASSERT(is_unloaded() || is_illegal());
}
@@ -661,6 +682,7 @@ void CodeGenerator::UnloadReference(Reference* ref) {
frame_->Drop(size);
frame_->EmitPush(r0);
}
+ ref->set_unloaded();
}
@@ -1091,7 +1113,8 @@ void CodeGenerator::Comparison(Condition cc,
// Call the function on the stack with the given arguments.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- int position) {
+ CallFunctionFlags flags,
+ int position) {
VirtualFrame::SpilledScope spilled_scope;
// Push the arguments ("left-to-right") on the stack.
int arg_count = args->length();
@@ -1104,7 +1127,7 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
// Use the shared code stub to call the function.
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
+ CallFunctionStub call_function(arg_count, in_loop, flags);
frame_->CallStub(&call_function, arg_count + 1);
// Restore context and pop function from the stack.
@@ -1243,8 +1266,6 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
Reference target(this, node->proxy());
LoadAndSpill(val);
target.SetValue(NOT_CONST_INIT);
- // The reference is removed from the stack (preserving TOS) when
- // it goes out of scope.
}
// Get rid of the assigned value (declarations are statements).
frame_->Drop();
@@ -1931,25 +1952,17 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
if (each.size() > 0) {
__ ldr(r0, frame_->ElementAt(each.size()));
frame_->EmitPush(r0);
- }
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, r3 pushed above) is
- // right on top of or right underneath a zero-sized reference.
- each.SetValue(NOT_CONST_INIT);
- if (each.size() > 0) {
- // It's safe to pop the value lying on top of the reference before
- // unloading the reference itself (which preserves the top of stack,
- // ie, now the topmost value of the non-zero sized reference), since
- // we will discard the top of stack after unloading the reference
- // anyway.
- frame_->EmitPop(r0);
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop(2);
+ } else {
+ // If the reference was to a slot we rely on the convenient property
+ // that it doesn't matter whether a value (eg, r3 pushed above) is
+ // right on top of or right underneath a zero-sized reference.
+ each.SetValue(NOT_CONST_INIT);
+ frame_->Drop();
}
}
}
- // Discard the i'th entry pushed above or else the remainder of the
- // reference, whichever is currently on top of the stack.
- frame_->Drop();
-
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
@@ -2289,7 +2302,8 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
Comment cmnt(masm_, "[ DebuggerStatament");
CodeForStatementPosition(node);
#ifdef ENABLE_DEBUGGER_SUPPORT
- frame_->CallRuntime(Runtime::kDebugBreak, 0);
+ DebuggerStatementStub ces;
+ frame_->CallStub(&ces, 0);
#endif
// Ignore the return value.
ASSERT(frame_->height() == original_height);
@@ -2592,13 +2606,12 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
// Load the global object.
LoadGlobal();
// Setup the name register.
- Result name(r2);
__ mov(r2, Operand(slot->var()->name()));
// Call IC stub.
if (typeof_state == INSIDE_TYPEOF) {
- frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, &name, 0);
+ frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
} else {
- frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, &name, 0);
+ frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, 0);
}
// Drop the global object. The result is in r0.
@@ -2843,7 +2856,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Assignment");
- { Reference target(this, node->target());
+ { Reference target(this, node->target(), node->is_compound());
if (target.is_illegal()) {
// Fool the virtual frame into thinking that we left the assignment's
// value on the frame.
@@ -2858,8 +2871,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
node->op() == Token::INIT_CONST) {
LoadAndSpill(node->value());
- } else {
- // +=, *= and similar binary assignments.
+ } else { // Assignment is a compound assignment.
// Get the old value of the lhs.
target.GetValueAndSpill();
Literal* literal = node->value()->AsLiteral();
@@ -2880,13 +2892,12 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
frame_->EmitPush(r0);
}
}
-
Variable* var = node->target()->AsVariableProxy()->AsVariable();
if (var != NULL &&
(var->mode() == Variable::CONST) &&
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
// Assignment ignored - leave the value on the stack.
-
+ UnloadReference(&target);
} else {
CodeForSourcePosition(node->position());
if (node->op() == Token::INIT_CONST) {
@@ -2999,7 +3010,7 @@ void CodeGenerator::VisitCall(Call* node) {
CodeForSourcePosition(node->position());
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
+ CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
frame_->CallStub(&call_function, arg_count + 1);
__ ldr(cp, frame_->Context());
@@ -3056,7 +3067,7 @@ void CodeGenerator::VisitCall(Call* node) {
frame_->EmitPush(r1); // receiver
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
frame_->EmitPush(r0);
} else if (property != NULL) {
@@ -3096,20 +3107,24 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'array[index](1, 2, 3)'
// -------------------------------------------
- // Load the function to call from the property through a reference.
- Reference ref(this, property);
- ref.GetValueAndSpill(); // receiver
-
- // Pass receiver to called function.
+ LoadAndSpill(property->obj());
+ LoadAndSpill(property->key());
+ EmitKeyedLoad(false);
+ frame_->Drop(); // key
+ // Put the function below the receiver.
if (property->is_synthetic()) {
+ // Use the global receiver.
+ frame_->Drop();
+ frame_->EmitPush(r0);
LoadGlobalReceiver(r0);
} else {
- __ ldr(r0, frame_->ElementAt(ref.size()));
- frame_->EmitPush(r0);
+ frame_->EmitPop(r1); // receiver
+ frame_->EmitPush(r0); // function
+ frame_->EmitPush(r1); // receiver
}
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
frame_->EmitPush(r0);
}
@@ -3125,7 +3140,7 @@ void CodeGenerator::VisitCall(Call* node) {
LoadGlobalReceiver(r0);
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
frame_->EmitPush(r0);
}
ASSERT(frame_->height() == original_height + 1);
@@ -3159,22 +3174,15 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
}
// r0: the number of arguments.
- Result num_args(r0);
__ mov(r0, Operand(arg_count));
-
// Load the function into r1 as per calling convention.
- Result function(r1);
__ ldr(r1, frame_->ElementAt(arg_count + 1));
// Call the construct call builtin that handles allocation and
// constructor invocation.
CodeForSourcePosition(node->position());
Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
- frame_->CallCodeObject(ic,
- RelocInfo::CONSTRUCT_CALL,
- &num_args,
- &function,
- arg_count + 1);
+ frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
// Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
__ str(r0, frame_->Top());
@@ -3469,6 +3477,20 @@ void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r0);
+ __ tst(r0, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
+ __ tst(r1, Operand(1 << Map::kIsUndetectable));
+ cc_reg_ = ne;
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 0);
@@ -3561,7 +3583,8 @@ void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
Load(args->at(0));
Load(args->at(1));
- frame_->CallRuntime(Runtime::kStringCompare, 2);
+ StringCompareStub stub;
+ frame_->CallStub(&stub, 2);
frame_->EmitPush(r0);
}
@@ -3709,6 +3732,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0); // r0 has result
} else {
+ bool overwrite =
+ (node->expression()->AsBinaryOperation() != NULL &&
+ node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
LoadAndSpill(node->expression());
frame_->EmitPop(r0);
switch (op) {
@@ -3719,9 +3745,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break;
case Token::SUB: {
- bool overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
GenericUnaryOpStub stub(Token::SUB, overwrite);
frame_->CallStub(&stub, 0);
break;
@@ -3734,10 +3757,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
__ tst(r0, Operand(kSmiTagMask));
smi_label.Branch(eq);
- frame_->EmitPush(r0);
- frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, 1);
-
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ frame_->CallStub(&stub, 0);
continue_label.Jump();
+
smi_label.Bind();
__ mvn(r0, Operand(r0));
__ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
@@ -3791,7 +3814,9 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
frame_->EmitPush(r0);
}
- { Reference target(this, node->expression());
+ // A constant reference is not saved to, so a constant reference is not a
+ // compound assignment reference.
+ { Reference target(this, node->expression(), !is_const);
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
@@ -4252,6 +4277,16 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
+void CodeGenerator::EmitKeyedLoad(bool is_global) {
+ Comment cmnt(masm_, "[ Load from keyed Property");
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ RelocInfo::Mode rmode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ frame_->CallCodeObject(ic, rmode, 0);
+}
+
+
#ifdef DEBUG
bool CodeGenerator::HasValidEntryRegisters() { return true; }
#endif
@@ -4304,13 +4339,12 @@ void Reference::GetValue() {
Variable* var = expression_->AsVariableProxy()->AsVariable();
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Setup the name register.
- Result name_reg(r2);
__ mov(r2, Operand(name));
ASSERT(var == NULL || var->is_global());
RelocInfo::Mode rmode = (var == NULL)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- frame->CallCodeObject(ic, rmode, &name_reg, 0);
+ frame->CallCodeObject(ic, rmode, 0);
frame->EmitPush(r0);
break;
}
@@ -4318,23 +4352,21 @@ void Reference::GetValue() {
case KEYED: {
// TODO(181): Implement inlined version of array indexing once
// loop nesting is properly tracked on ARM.
- VirtualFrame* frame = cgen_->frame();
- Comment cmnt(masm, "[ Load from keyed Property");
ASSERT(property != NULL);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
Variable* var = expression_->AsVariableProxy()->AsVariable();
ASSERT(var == NULL || var->is_global());
- RelocInfo::Mode rmode = (var == NULL)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- frame->CallCodeObject(ic, rmode, 0);
- frame->EmitPush(r0);
+ cgen_->EmitKeyedLoad(var != NULL);
+ cgen_->frame()->EmitPush(r0);
break;
}
default:
UNREACHABLE();
}
+
+ if (!persist_after_get_) {
+ cgen_->UnloadReference(this);
+ }
}
@@ -4353,6 +4385,7 @@ void Reference::SetValue(InitState init_state) {
Comment cmnt(masm, "[ Store to Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
cgen_->StoreToSlot(slot, init_state);
+ cgen_->UnloadReference(this);
break;
}
@@ -4362,18 +4395,12 @@ void Reference::SetValue(InitState init_state) {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Handle<String> name(GetName());
- Result value(r0);
frame->EmitPop(r0);
-
// Setup the name register.
- Result property_name(r2);
__ mov(r2, Operand(name));
- frame->CallCodeObject(ic,
- RelocInfo::CODE_TARGET,
- &value,
- &property_name,
- 0);
+ frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
frame->EmitPush(r0);
+ cgen_->UnloadReference(this);
break;
}
@@ -4386,10 +4413,10 @@ void Reference::SetValue(InitState init_state) {
// Call IC code.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
// TODO(1222589): Make the IC grab the values from the stack.
- Result value(r0);
frame->EmitPop(r0); // value
- frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0);
+ frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
frame->EmitPush(r0);
+ cgen_->UnloadReference(this);
break;
}
@@ -4777,7 +4804,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
} else if (cc == gt) {
__ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
} else {
- __ mov(r0, Operand(0)); // Things are <=, >=, ==, === themselves.
+ __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
}
__ mov(pc, Operand(lr)); // Return.
@@ -4789,6 +4816,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
__ bind(&heap_number);
// It is a heap number, so return non-equal if it's NaN and equal if it's
// not NaN.
+
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
// Read top bits of double representation (second word of value).
@@ -4804,9 +4832,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ orr(r0, r3, Operand(r2), SetCC);
// For equal we already have the right value in r0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if not
- // (it's a NaN). For <= and >= we need to load r0 with the failing value
- // if it's a NaN.
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load r0 with the failing
+ // value if it's a NaN.
if (cc != eq) {
// All-zero means Infinity means equal.
__ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
@@ -4827,17 +4855,17 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// See comment at call site.
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Label* rhs_not_nan,
+ Label* lhs_not_nan,
Label* slow,
bool strict) {
- Label lhs_is_smi;
+ Label rhs_is_smi;
__ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &lhs_is_smi);
+ __ b(eq, &rhs_is_smi);
- // Rhs is a Smi. Check whether the non-smi is a heap number.
+ // Lhs is a Smi. Check whether the rhs is a heap number.
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
if (strict) {
- // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // If rhs is not a number and lhs is a Smi then strict equality cannot
// succeed. Return non-equal (r0 is already not zero)
__ mov(pc, Operand(lr), LeaveCC, ne); // Return.
} else {
@@ -4846,92 +4874,104 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ b(ne, slow);
}
- // Rhs is a smi, lhs is a number.
- __ push(lr);
-
+ // Lhs (r1) is a smi, rhs (r0) is a number.
if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert lhs to a double in d7 .
CpuFeatures::Scope scope(VFP3);
- __ IntegerToDoubleConversionWithVFP3(r1, r3, r2);
+ __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+ __ vmov(s15, r7);
+ __ vcvt(d7, s15);
+ // Load the double from rhs, tagged HeapNumber r0, to d6.
+ __ sub(r7, r0, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
} else {
+ __ push(lr);
+ // Convert lhs to a double in r2, r3.
__ mov(r7, Operand(r1));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Load rhs to a double in r0, r1.
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ pop(lr);
}
-
- // r3 and r2 are rhs as double.
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
- __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
// We now have both loaded as doubles but we can skip the lhs nan check
- // since it's a Smi.
- __ pop(lr);
- __ jmp(rhs_not_nan);
+ // since it's a smi.
+ __ jmp(lhs_not_nan);
- __ bind(&lhs_is_smi);
- // Lhs is a Smi. Check whether the non-smi is a heap number.
+ __ bind(&rhs_is_smi);
+ // Rhs is a smi. Check whether the non-smi lhs is a heap number.
__ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
if (strict) {
- // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // If lhs is not a number and rhs is a smi then strict equality cannot
// succeed. Return non-equal.
__ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
__ mov(pc, Operand(lr), LeaveCC, ne); // Return.
} else {
- // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // Smi compared non-strictly with a non-smi non-heap-number. Call
// the runtime.
__ b(ne, slow);
}
- // Lhs is a smi, rhs is a number.
- // r0 is Smi and r1 is heap number.
- __ push(lr);
- __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
- __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
-
+ // Rhs (r0) is a smi, lhs (r1) is a heap number.
if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert rhs to a double in d6 .
CpuFeatures::Scope scope(VFP3);
- __ IntegerToDoubleConversionWithVFP3(r0, r1, r0);
+ // Load the double from lhs, tagged HeapNumber r1, to d7.
+ __ sub(r7, r1, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+ __ vmov(s13, r7);
+ __ vcvt(d6, s13);
} else {
+ __ push(lr);
+ // Load lhs to a double in r2, r3.
+ __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ // Convert rhs to a double in r0, r1.
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
}
-
- __ pop(lr);
// Fall through to both_loaded_as_doubles.
}
-void EmitNanCheck(MacroAssembler* masm, Label* rhs_not_nan, Condition cc) {
+void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register lhs_exponent = exp_first ? r0 : r1;
- Register rhs_exponent = exp_first ? r2 : r3;
- Register lhs_mantissa = exp_first ? r1 : r0;
- Register rhs_mantissa = exp_first ? r3 : r2;
+ Register rhs_exponent = exp_first ? r0 : r1;
+ Register lhs_exponent = exp_first ? r2 : r3;
+ Register rhs_mantissa = exp_first ? r1 : r0;
+ Register lhs_mantissa = exp_first ? r3 : r2;
Label one_is_nan, neither_is_nan;
+ Label lhs_not_nan_exp_mask_is_loaded;
Register exp_mask_reg = r5;
__ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
- __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
+ __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
__ cmp(r4, Operand(exp_mask_reg));
- __ b(ne, rhs_not_nan);
+ __ b(ne, &lhs_not_nan_exp_mask_is_loaded);
__ mov(r4,
- Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
+ Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
SetCC);
__ b(ne, &one_is_nan);
- __ cmp(rhs_mantissa, Operand(0));
+ __ cmp(lhs_mantissa, Operand(0));
__ b(ne, &one_is_nan);
- __ bind(rhs_not_nan);
+ __ bind(lhs_not_nan);
__ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
- __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
+ __ bind(&lhs_not_nan_exp_mask_is_loaded);
+ __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
__ cmp(r4, Operand(exp_mask_reg));
__ b(ne, &neither_is_nan);
__ mov(r4,
- Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
+ Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
SetCC);
__ b(ne, &one_is_nan);
- __ cmp(lhs_mantissa, Operand(0));
+ __ cmp(rhs_mantissa, Operand(0));
__ b(eq, &neither_is_nan);
__ bind(&one_is_nan);
@@ -4951,21 +4991,21 @@ void EmitNanCheck(MacroAssembler* masm, Label* rhs_not_nan, Condition cc) {
// See comment at call site.
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register lhs_exponent = exp_first ? r0 : r1;
- Register rhs_exponent = exp_first ? r2 : r3;
- Register lhs_mantissa = exp_first ? r1 : r0;
- Register rhs_mantissa = exp_first ? r3 : r2;
+ Register rhs_exponent = exp_first ? r0 : r1;
+ Register lhs_exponent = exp_first ? r2 : r3;
+ Register rhs_mantissa = exp_first ? r1 : r0;
+ Register lhs_mantissa = exp_first ? r3 : r2;
// r0, r1, r2, r3 have the two doubles. Neither is a NaN.
if (cc == eq) {
// Doubles are not equal unless they have the same bit pattern.
// Exception: 0 and -0.
- __ cmp(lhs_mantissa, Operand(rhs_mantissa));
- __ orr(r0, lhs_mantissa, Operand(rhs_mantissa), LeaveCC, ne);
+ __ cmp(rhs_mantissa, Operand(lhs_mantissa));
+ __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
// Return non-zero if the numbers are unequal.
__ mov(pc, Operand(lr), LeaveCC, ne);
- __ sub(r0, lhs_exponent, Operand(rhs_exponent), SetCC);
+ __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
// If exponents are equal then return 0.
__ mov(pc, Operand(lr), LeaveCC, eq);
@@ -4975,12 +5015,12 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
// We start by seeing if the mantissas (that are equal) or the bottom
// 31 bits of the rhs exponent are non-zero. If so we return not
// equal.
- __ orr(r4, rhs_mantissa, Operand(rhs_exponent, LSL, kSmiTagSize), SetCC);
+ __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
__ mov(r0, Operand(r4), LeaveCC, ne);
__ mov(pc, Operand(lr), LeaveCC, ne); // Return conditionally.
// Now they are equal if and only if the lhs exponent is zero in its
// low 31 bits.
- __ mov(r0, Operand(lhs_exponent, LSL, kSmiTagSize));
+ __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
__ mov(pc, Operand(lr));
} else {
// Call a native function to do a comparison between two non-NaNs.
@@ -5035,17 +5075,26 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
Label* both_loaded_as_doubles,
Label* not_heap_numbers,
Label* slow) {
- __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
+ __ CompareObjectType(r0, r3, r2, HEAP_NUMBER_TYPE);
__ b(ne, not_heap_numbers);
- __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r2, r3);
__ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
- __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
- __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ sub(r7, r0, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
+ __ sub(r7, r1, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ } else {
+ __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ }
__ jmp(both_loaded_as_doubles);
}
@@ -5070,11 +5119,12 @@ static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
}
-// On entry r0 and r1 are the things to be compared. On exit r0 is 0,
-// positive or negative to indicate the result of the comparison.
+// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
+// On exit r0 is 0, positive or negative to indicate the result of
+// the comparison.
void CompareStub::Generate(MacroAssembler* masm) {
Label slow; // Call builtin.
- Label not_smis, both_loaded_as_doubles, rhs_not_nan;
+ Label not_smis, both_loaded_as_doubles, lhs_not_nan;
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
@@ -5094,32 +5144,44 @@ void CompareStub::Generate(MacroAssembler* masm) {
// 1) Return the answer.
// 2) Go to slow.
// 3) Fall through to both_loaded_as_doubles.
- // 4) Jump to rhs_not_nan.
+ // 4) Jump to lhs_not_nan.
// In cases 3 and 4 we have found out we were dealing with a number-number
- // comparison and the numbers have been loaded into r0, r1, r2, r3 as doubles.
- EmitSmiNonsmiComparison(masm, &rhs_not_nan, &slow, strict_);
+ // comparison. If VFP3 is supported the double values of the numbers have
+ // been loaded into d7 and d6. Otherwise, the double values have been loaded
+ // into r0, r1, r2, and r3.
+ EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
__ bind(&both_loaded_as_doubles);
- // r0, r1, r2, r3 are the double representations of the left hand side
- // and the right hand side.
-
- // Checks for NaN in the doubles we have loaded. Can return the answer or
- // fall through if neither is a NaN. Also binds rhs_not_nan.
- EmitNanCheck(masm, &rhs_not_nan, cc_);
-
+ // The arguments have been converted to doubles and stored in d6 and d7, if
+ // VFP3 is supported, or in r0, r1, r2, and r3.
if (CpuFeatures::IsSupported(VFP3)) {
+ __ bind(&lhs_not_nan);
CpuFeatures::Scope scope(VFP3);
+ Label no_nan;
// ARMv7 VFP3 instructions to implement double precision comparison.
- __ vmov(d6, r0, r1);
- __ vmov(d7, r2, r3);
-
- __ vcmp(d6, d7);
- __ vmrs(pc);
- __ mov(r0, Operand(0), LeaveCC, eq);
- __ mov(r0, Operand(1), LeaveCC, lt);
- __ mvn(r0, Operand(0), LeaveCC, gt);
+ __ vcmp(d7, d6);
+ __ vmrs(pc); // Move vector status bits to normal status bits.
+ Label nan;
+ __ b(vs, &nan);
+ __ mov(r0, Operand(EQUAL), LeaveCC, eq);
+ __ mov(r0, Operand(LESS), LeaveCC, lt);
+ __ mov(r0, Operand(GREATER), LeaveCC, gt);
+ __ mov(pc, Operand(lr));
+
+ __ bind(&nan);
+ // If one of the sides was a NaN then the v flag is set. Load r0 with
+ // whatever it takes to make the comparison fail, since comparisons with NaN
+ // always fail.
+ if (cc_ == lt || cc_ == le) {
+ __ mov(r0, Operand(GREATER));
+ } else {
+ __ mov(r0, Operand(LESS));
+ }
__ mov(pc, Operand(lr));
} else {
+ // Checks for NaN in the doubles we have loaded. Can return the answer or
+ // fall through if neither is a NaN. Also binds lhs_not_nan.
+ EmitNanCheck(masm, &lhs_not_nan, cc_);
// Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
// answer. Never falls through.
EmitTwoNonNanDoubleComparison(masm, cc_);
@@ -5135,14 +5197,15 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
Label check_for_symbols;
+ Label flat_string_check;
// Check for heap-number-heap-number comparison. Can jump to slow case,
// or load both doubles into r0, r1, r2, r3 and jump to the code that handles
// that case. If the inputs are not doubles then jumps to check_for_symbols.
- // In this case r2 will contain the type of r0.
+ // In this case r2 will contain the type of r0. Never falls through.
EmitCheckForTwoHeapNumbers(masm,
&both_loaded_as_doubles,
&check_for_symbols,
- &slow);
+ &flat_string_check);
__ bind(&check_for_symbols);
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
@@ -5150,10 +5213,27 @@ void CompareStub::Generate(MacroAssembler* masm) {
if (cc_ == eq && !strict_) {
// Either jumps to slow or returns the answer. Assumes that r2 is the type
// of r0 on entry.
- EmitCheckForSymbols(masm, &slow);
+ EmitCheckForSymbols(masm, &flat_string_check);
}
+ // Check for both being sequential ASCII strings, and inline if that is the
+ // case.
+ __ bind(&flat_string_check);
+
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(r0, r1, r2, r3, &slow);
+
+ __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ r1,
+ r0,
+ r2,
+ r3,
+ r4,
+ r5);
+ // Never falls through to here.
+
__ bind(&slow);
+
__ push(r1);
__ push(r0);
// Figure out which native to call and setup the arguments.
@@ -5220,10 +5300,18 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// The new heap number is in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
- if (CpuFeatures::IsSupported(VFP3)) {
+ // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
+ // using registers d7 and d6 for the double values.
+ bool use_fp_registers = CpuFeatures::IsSupported(VFP3) &&
+ Token::MOD != operation;
+ if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
- __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+ __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+ __ vmov(s15, r7);
+ __ vcvt(d7, s15);
+ __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+ __ vmov(s13, r7);
+ __ vcvt(d6, s13);
} else {
// Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
__ mov(r7, Operand(r0));
@@ -5305,9 +5393,16 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
if (mode == OVERWRITE_RIGHT) {
__ mov(r5, Operand(r0)); // Overwrite this heap number.
}
- // Calling convention says that second double is in r2 and r3.
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
+ if (use_fp_registers) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from tagged HeapNumber r0 to d7.
+ __ sub(r7, r0, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ } else {
+ // Calling convention says that second double is in r2 and r3.
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
+ }
__ jmp(&finished_loading_r0);
__ bind(&r0_is_smi);
if (mode == OVERWRITE_RIGHT) {
@@ -5315,10 +5410,12 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
-
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ // Convert smi in r0 to double in d7.
+ __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+ __ vmov(s15, r7);
+ __ vcvt(d7, s15);
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
@@ -5338,9 +5435,16 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
if (mode == OVERWRITE_LEFT) {
__ mov(r5, Operand(r1)); // Overwrite this heap number.
}
- // Calling convention says that first double is in r0 and r1.
- __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
- __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
+ if (use_fp_registers) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from tagged HeapNumber r1 to d6.
+ __ sub(r7, r1, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
+ } else {
+ // Calling convention says that first double is in r0 and r1.
+ __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
+ }
__ jmp(&finished_loading_r1);
__ bind(&r1_is_smi);
if (mode == OVERWRITE_LEFT) {
@@ -5348,9 +5452,12 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
- if (CpuFeatures::IsSupported(VFP3)) {
+ if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
- __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+ // Convert smi in r1 to double in d6.
+ __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+ __ vmov(s13, r7);
+ __ vcvt(d6, s13);
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
@@ -5363,22 +5470,12 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
__ bind(&finished_loading_r1);
__ bind(&do_the_call);
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- // r5: Address of heap number for result.
-
- if (CpuFeatures::IsSupported(VFP3) &&
- ((Token::MUL == operation) ||
- (Token::DIV == operation) ||
- (Token::ADD == operation) ||
- (Token::SUB == operation))) {
+ // If we are inlining the operation using VFP3 instructions for
+ // add, subtract, multiply, or divide, the arguments are in d6 and d7.
+ if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement
// double precision, add, subtract, multiply, divide.
- __ vmov(d6, r0, r1);
- __ vmov(d7, r2, r3);
if (Token::MUL == operation) {
__ vmul(d5, d6, d7);
@@ -5391,15 +5488,20 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
} else {
UNREACHABLE();
}
-
- __ vmov(r0, r1, d5);
-
- __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
- __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
- __ mov(r0, Operand(r5));
+ __ sub(r0, r5, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ add(r0, r0, Operand(kHeapObjectTag));
__ mov(pc, lr);
return;
}
+
+ // If we did not inline the operation, then the arguments are in:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+ // r5: Address of heap number for result.
+
__ push(lr); // For later.
__ push(r5); // Address of heap number that is answer.
__ AlignStack(0);
@@ -6002,59 +6104,96 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- ASSERT(op_ == Token::SUB);
-
- Label undo;
- Label slow;
- Label not_smi;
+ Label slow, done;
- // Enter runtime system if the value is not a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &not_smi);
-
- // Enter runtime system if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- __ cmp(r0, Operand(0));
- __ b(eq, &slow);
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(ne, &try_float);
+
+ // Go slow case if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ __ cmp(r0, Operand(0));
+ __ b(eq, &slow);
+
+ // The value of the expression is a smi that is not zero. Try
+ // optimistic subtraction '0 - value'.
+ __ rsb(r1, r0, Operand(0), SetCC);
+ __ b(vs, &slow);
+
+ __ mov(r0, Operand(r1)); // Set r0 to result.
+ __ b(&done);
+
+ __ bind(&try_float);
+ __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ b(ne, &slow);
+ // r0 is a heap number. Get a new heap number in r1.
+ if (overwrite_) {
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ } else {
+ AllocateHeapNumber(masm, &slow, r1, r2, r3);
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
+ __ mov(r0, Operand(r1));
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ b(ne, &slow);
+
+ // Convert the heap number is r0 to an untagged integer in r1.
+ GetInt32(masm, r0, r1, r2, r3, &slow);
+
+ // Do the bitwise operation (move negated) and check if the result
+ // fits in a smi.
+ Label try_float;
+ __ mvn(r1, Operand(r1));
+ __ add(r2, r1, Operand(0x40000000), SetCC);
+ __ b(mi, &try_float);
+ __ mov(r0, Operand(r1, LSL, kSmiTagSize));
+ __ b(&done);
+
+ __ bind(&try_float);
+ if (!overwrite_) {
+ // Allocate a fresh heap number, but don't overwrite r0 until
+ // we're sure we can do it without going through the slow case
+ // that needs the value in r0.
+ AllocateHeapNumber(masm, &slow, r2, r3, r4);
+ __ mov(r0, Operand(r2));
+ }
- // The value of the expression is a smi that is not zero. Try
- // optimistic subtraction '0 - value'.
- __ rsb(r1, r0, Operand(0), SetCC);
- __ b(vs, &slow);
+ // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+ // have to set up a frame.
+ WriteInt32ToHeapNumberStub stub(r1, r0, r2);
+ __ push(lr);
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ } else {
+ UNIMPLEMENTED();
+ }
- __ mov(r0, Operand(r1)); // Set r0 to result.
+ __ bind(&done);
__ StubReturn(1);
- // Enter runtime system.
+ // Handle the slow case by jumping to the JavaScript builtin.
__ bind(&slow);
__ push(r0);
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
-
- __ bind(&not_smi);
- __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
- __ b(ne, &slow);
- // r0 is a heap number. Get a new heap number in r1.
- if (overwrite_) {
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- } else {
- AllocateHeapNumber(masm, &slow, r1, r2, r3);
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
- __ mov(r0, Operand(r1));
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
+ break;
+ default:
+ UNREACHABLE();
}
- __ StubReturn(1);
-}
-
-
-int CEntryStub::MinorKey() {
- ASSERT(result_size_ <= 2);
- // Result returned in r0 or r0+r1 by default.
- return 0;
}
@@ -6165,7 +6304,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- ExitFrame::Mode mode,
bool do_gc,
bool always_allocate) {
// r0: result parameter for PerformGC, if any
@@ -6225,7 +6363,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(mode);
+ __ LeaveExitFrame(mode_);
// check if we should retry or throw exception
Label retry;
@@ -6258,7 +6396,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
}
-void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+void CEntryStub::Generate(MacroAssembler* masm) {
// Called from JavaScript; parameters are on stack as if calling JS function
// r0: number of arguments including receiver
// r1: pointer to builtin function
@@ -6266,17 +6404,15 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
+ // Result returned in r0 or r0+r1 by default.
+
// NOTE: Invocations of builtins may return failure objects
// instead of a proper result. The builtin entry handles
// this by performing a garbage collection and retrying the
// builtin once.
- ExitFrame::Mode mode = is_debug_break
- ? ExitFrame::MODE_DEBUG
- : ExitFrame::MODE_NORMAL;
-
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode);
+ __ EnterExitFrame(mode_);
// r4: number of arguments (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
@@ -6291,7 +6427,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
false,
false);
@@ -6300,7 +6435,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
true,
false);
@@ -6311,7 +6445,6 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- mode,
true,
true);
@@ -6601,6 +6734,33 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow;
+
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // function, receiver [, arguments]
+ Label receiver_is_value, receiver_is_js_object;
+ __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ BranchOnSmi(r1, &receiver_is_value);
+
+ // Check if the receiver is a valid JS object.
+ __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
+ __ b(ge, &receiver_is_js_object);
+
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(r1);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+ __ LeaveInternalFrame();
+ __ str(r0, MemOperand(sp, argc_ * kPointerSize));
+
+ __ bind(&receiver_is_js_object);
+ }
+
// Get the function to call from the stack.
// function, receiver [, arguments]
__ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
@@ -6677,6 +6837,101 @@ int CompareStub::MinorKey() {
}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ Label compare_lengths;
+ // Find minimum length and length difference.
+ __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
+ Register length_delta = scratch3;
+ __ mov(scratch1, scratch2, LeaveCC, gt);
+ Register min_length = scratch1;
+ __ tst(min_length, Operand(min_length));
+ __ b(eq, &compare_lengths);
+
+ // Setup registers so that we only need to increment one register
+ // in the loop.
+ __ add(scratch2, min_length,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(left, left, Operand(scratch2));
+ __ add(right, right, Operand(scratch2));
+ // Registers left and right points to the min_length character of strings.
+ __ rsb(min_length, min_length, Operand(-1));
+ Register index = min_length;
+ // Index starts at -min_length.
+
+ {
+ // Compare loop.
+ Label loop;
+ __ bind(&loop);
+ // Compare characters.
+ __ add(index, index, Operand(1), SetCC);
+ __ ldrb(scratch2, MemOperand(left, index), ne);
+ __ ldrb(scratch4, MemOperand(right, index), ne);
+ // Skip to compare lengths with eq condition true.
+ __ b(eq, &compare_lengths);
+ __ cmp(scratch2, scratch4);
+ __ b(eq, &loop);
+ // Fallthrough with eq condition false.
+ }
+ // Compare lengths - strings up to min-length are equal.
+ __ bind(&compare_lengths);
+ ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+ // Use zero length_delta as result.
+ __ mov(r0, Operand(length_delta), SetCC, eq);
+ // Fall through to here if characters compare not-equal.
+ __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
+ __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
+ __ Ret();
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // sp[0]: return address
+ // sp[4]: right string
+ // sp[8]: left string
+
+ __ ldr(r0, MemOperand(sp, 2 * kPointerSize)); // left
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); // right
+
+ Label not_same;
+ __ cmp(r0, r1);
+ __ b(ne, &not_same);
+ ASSERT_EQ(0, EQUAL);
+ ASSERT_EQ(0, kSmiTag);
+ __ mov(r0, Operand(Smi::FromInt(EQUAL)));
+ __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ __ bind(&not_same);
+
+ // Check that both objects are sequential ascii strings.
+ __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
+
+ // Compare flat ascii strings natively. Remove arguments from stack first.
+ __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index b62bc36d7..0384485f1 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -32,6 +32,7 @@ namespace v8 {
namespace internal {
// Forward declarations
+class CompilationInfo;
class DeferredCode;
class RegisterAllocator;
class RegisterFile;
@@ -43,57 +44,69 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Reference support
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-
+// A reference is a C++ stack-allocated object that puts a
+// reference on the virtual frame. The reference may be consumed
+// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
+// When the lifetime (scope) of a valid reference ends, it must have
+// been consumed, and be in state UNLOADED.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
- enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen, Expression* expression);
+ enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+ Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get = false);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
- ASSERT(type_ == ILLEGAL);
+ ASSERT_EQ(ILLEGAL, type_);
type_ = value;
}
+ void set_unloaded() {
+ ASSERT_NE(ILLEGAL, type_);
+ ASSERT_NE(UNLOADED, type_);
+ type_ = UNLOADED;
+ }
// The size the reference takes up on the stack.
- int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+ int size() const {
+ return (type_ < SLOT) ? 0 : type_;
+ }
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+ bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
- // the expression stack, and it is left in place with its value above it.
+ // the expression stack, and it is consumed by the call unless the
+ // reference is for a compound assignment.
+ // If the reference is not consumed, it is left in place under its value.
void GetValue();
- // Generate code to push the value of a reference on top of the expression
- // stack and then spill the stack frame. This function is used temporarily
- // while the code generator is being transformed.
+ // Generate code to pop a reference, push the value of the reference,
+ // and then spill the stack frame.
inline void GetValueAndSpill();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
- // on the expression stack. The stored value is left in place (with the
- // reference intact below it) to support chained assignments.
+ // on the expression stack. The value is stored in the location specified
+ // by the reference, and is left on top of the stack, after the reference
+ // is popped from beneath it (unloaded).
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
+ // Keep the reference on the stack after get, so it can be used by set later.
+ bool persist_after_get_;
};
@@ -137,11 +150,21 @@ class CodeGenState BASE_EMBEDDED {
class CodeGenerator: public AstVisitor {
public:
+ // Compilation mode. Either the compiler is used as the primary
+ // compiler and needs to setup everything or the compiler is used as
+ // the secondary compiler for split compilation and has to handle
+ // bailouts.
+ enum Mode {
+ PRIMARY,
+ SECONDARY
+ };
+
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
static Handle<Code> MakeCode(FunctionLiteral* fun,
Handle<Script> script,
- bool is_eval);
+ bool is_eval,
+ CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(FunctionLiteral* fun);
@@ -189,8 +212,7 @@ class CodeGenerator: public AstVisitor {
private:
// Construction/Destruction
- CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
- virtual ~CodeGenerator() { delete masm_; }
+ CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval);
// Accessors
Scope* scope() const { return scope_; }
@@ -227,7 +249,7 @@ class CodeGenerator: public AstVisitor {
inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
- void GenCode(FunctionLiteral* fun);
+ void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info);
// The following are used by class Reference.
void LoadReference(Reference* ref);
@@ -274,6 +296,9 @@ class CodeGenerator: public AstVisitor {
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
+ // Load a keyed property, leaving it in r0. The receiver and key are
+ // passed on the stack, and remain there.
+ void EmitKeyedLoad(bool is_global);
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
@@ -304,7 +329,9 @@ class CodeGenerator: public AstVisitor {
bool reversed,
OverwriteMode mode);
- void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+ void CallWithArguments(ZoneList<Expression*>* arguments,
+ CallFunctionFlags flags,
+ int position);
// Control flow
void Branch(bool if_true, JumpTarget* target);
@@ -339,6 +366,7 @@ class CodeGenerator: public AstVisitor {
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
+ void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -426,33 +454,13 @@ class CodeGenerator: public AstVisitor {
friend class JumpTarget;
friend class Reference;
friend class FastCodeGenerator;
- friend class CodeGenSelector;
+ friend class FullCodeGenerator;
+ friend class FullCodeGenSyntaxChecker;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) {}
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#if defined(DEBUG)
- void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
-#endif // defined(DEBUG)
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
class GenericBinaryOpStub : public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
@@ -530,6 +538,28 @@ class GenericBinaryOpStub : public CodeStub {
};
+class StringCompareStub: public CodeStub {
+ public:
+ StringCompareStub() { }
+
+ // Compare two flat ASCII strings and returns result in r0.
+ // Does not use the stack.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ private:
+ Major MajorKey() { return StringCompare; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 943220739..8a32c95b6 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -237,6 +237,7 @@ class Instr {
inline int RnField() const { return Bits(19, 16); }
inline int RdField() const { return Bits(15, 12); }
+ inline int CoprocessorField() const { return Bits(11, 8); }
// Support for VFP.
// Vn(19-16) | Vd(15-12) | Vm(3-0)
inline int VnField() const { return Bits(19, 16); }
@@ -246,6 +247,8 @@ class Instr {
inline int MField() const { return Bit(5); }
inline int DField() const { return Bit(22); }
inline int RtField() const { return Bits(15, 12); }
+ inline int PField() const { return Bit(24); }
+ inline int UField() const { return Bit(23); }
// Fields used in Data processing instructions
inline Opcode OpcodeField() const {
@@ -296,6 +299,7 @@ class Instr {
inline bool HasB() const { return BField() == 1; }
inline bool HasW() const { return WField() == 1; }
inline bool HasL() const { return LField() == 1; }
+ inline bool HasU() const { return UField() == 1; }
inline bool HasSign() const { return SignField() == 1; }
inline bool HasH() const { return HField() == 1; }
inline bool HasLink() const { return LinkField() == 1; }
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index fc9808d52..6eb5239b8 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -98,7 +98,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ mov(r0, Operand(0)); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break()));
- CEntryDebugBreakStub ceb;
+ CEntryStub ceb(1, ExitFrame::MODE_DEBUG);
__ CallStub(&ceb);
// Restore the register values containing object pointers from the expression
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index afed0fa5c..5b314557d 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -998,29 +998,43 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
// Decode Type 6 coprocessor instructions.
// Dm = vmov(Rt, Rt2)
// <Rt, Rt2> = vmov(Dm)
+// Ddst = MEM(Rbase + 4*offset).
+// MEM(Rbase + 4*offset) = Dsrc.
void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
- if (instr->Bit(23) == 1) {
- Unknown(instr); // Not used by V8.
- } else if (instr->Bit(22) == 1) {
- if ((instr->Bits(27, 24) == 0xC) &&
- (instr->Bit(22) == 1) &&
- (instr->Bits(11, 8) == 0xB) &&
- (instr->Bits(7, 6) == 0x0) &&
- (instr->Bit(4) == 1)) {
- if (instr->Bit(20) == 0) {
- Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
- } else if (instr->Bit(20) == 1) {
- Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else if (instr->Bit(21) == 1) {
+ if (instr->CoprocessorField() != 0xB) {
Unknown(instr); // Not used by V8.
} else {
- Unknown(instr); // Not used by V8.
+ switch (instr->OpcodeField()) {
+ case 0x2:
+ // Load and store double to two GP registers
+ if (instr->Bits(7, 4) != 0x1) {
+ Unknown(instr); // Not used by V8.
+ } else if (instr->HasL()) {
+ Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
+ } else {
+ Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
+ }
+ break;
+ case 0x8:
+ if (instr->HasL()) {
+ Format(instr, "vldr'cond 'Dd, ['rn - 4*'off8]");
+ } else {
+ Format(instr, "vstr'cond 'Dd, ['rn - 4*'off8]");
+ }
+ break;
+ case 0xC:
+ if (instr->HasL()) {
+ Format(instr, "vldr'cond 'Dd, ['rn + 4*'off8]");
+ } else {
+ Format(instr, "vstr'cond 'Dd, ['rn + 4*'off8]");
+ }
+ break;
+ default:
+ Unknown(instr); // Not used by V8.
+ break;
+ }
}
}
diff --git a/deps/v8/src/arm/fast-codegen-arm.cc b/deps/v8/src/arm/fast-codegen-arm.cc
index 0d934b5ab..1aeea7ab6 100644
--- a/deps/v8/src/arm/fast-codegen-arm.cc
+++ b/deps/v8/src/arm/fast-codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,1711 +28,113 @@
#include "v8.h"
#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
#include "fast-codegen.h"
-#include "parser.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm())
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right. The actual
-// argument count matches the formal parameter count expected by the
-// function.
-//
-// The live registers are:
-// o r1: the JS function object being called (ie, ourselves)
-// o cp: our context
-// o fp: our caller's frame pointer
-// o sp: stack pointer
-// o lr: return address
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-arm.h for its layout.
-void FastCodeGenerator::Generate(FunctionLiteral* fun) {
- function_ = fun;
- SetFunctionPosition(fun);
- int locals_count = fun->scope()->num_stack_slots();
-
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- if (locals_count > 0) {
- // Load undefined value here, so the value is ready for the loop below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- }
- // Adjust fp to point to caller's fp.
- __ add(fp, sp, Operand(2 * kPointerSize));
-
- { Comment cmnt(masm_, "[ Allocate locals");
- for (int i = 0; i < locals_count; i++) {
- __ push(ip);
- }
- }
-
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- if (fun->scope()->num_heap_slots() > 0) {
- Comment cmnt(masm_, "[ Allocate local context");
- // Argument to NewContext is the function, which is in r1.
- __ push(r1);
- __ CallRuntime(Runtime::kNewContext, 1);
- function_in_register = false;
- // Context is returned in both r0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = fun->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Slot* slot = fun->scope()->parameter(i)->slot();
- if (slot != NULL && slot->type() == Slot::CONTEXT) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ ldr(r0, MemOperand(fp, parameter_offset));
- // Store it in the context
- __ str(r0, MemOperand(cp, Context::SlotOffset(slot->index())));
- }
- }
- }
-
- Variable* arguments = fun->scope()->arguments()->AsVariable();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register) {
- // Load this again, if it's used by the local context below.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(r3, r1);
- }
- // Receiver is just before the parameters on the caller's stack.
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
- fun->num_parameters() * kPointerSize));
- __ mov(r1, Operand(Smi::FromInt(fun->num_parameters())));
- __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
-
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ CallStub(&stub);
- // Duplicate the value; move-to-slot operation might clobber registers.
- __ mov(r3, r0);
- Move(arguments->slot(), r0, r1, r2);
- Slot* dot_arguments_slot =
- fun->scope()->arguments_shadow()->AsVariable()->slot();
- Move(dot_arguments_slot, r3, r1, r2);
- }
-
- // Check the stack for overflow or break request.
- // Put the lr setup instruction in the delay slot. The kInstrSize is
- // added to the implicit 8 byte offset that always applies to operations
- // with pc and gives a return address 12 bytes down.
- { Comment cmnt(masm_, "[ Stack check");
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- __ add(lr, pc, Operand(Assembler::kInstrSize));
- __ cmp(sp, Operand(r2));
- StackCheckStub stub;
- __ mov(pc,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
- }
-
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(fun->scope()->declarations());
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(fun->body());
- ASSERT(loop_depth() == 0);
- }
-
- { Comment cmnt(masm_, "[ return <undefined>;");
- // Emit a 'return undefined' in case control fell off the end of the
- // body.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- }
- EmitReturnSequence(function_->end_position());
+void FastCodeGenerator::EmitLoadReceiver(Register reg) {
+ // Offset 2 is due to return address and saved frame pointer.
+ int index = 2 + function()->scope()->num_parameters();
+ __ ldr(reg, MemOperand(sp, index * kPointerSize));
}
-void FastCodeGenerator::EmitReturnSequence(int position) {
- Comment cmnt(masm_, "[ Return sequence");
- if (return_label_.is_bound()) {
- __ b(&return_label_);
- } else {
- __ bind(&return_label_);
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
- __ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
-
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-
- // Calculate the exact length of the return sequence and make sure that
- // the constant pool is not emitted inside of the return sequence.
- int num_parameters = function_->scope()->num_parameters();
- int32_t sp_delta = (num_parameters + 1) * kPointerSize;
- int return_sequence_length = Assembler::kJSReturnSequenceLength;
- if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
- // Additional mov instruction generated.
- return_sequence_length++;
- }
- masm_->BlockConstPoolFor(return_sequence_length);
-
- CodeGenerator::RecordPositions(masm_, position);
- __ RecordJSReturn();
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- __ add(sp, sp, Operand(sp_delta));
- __ Jump(lr);
-
- // Check that the size of the code used for returning matches what is
- // expected by the debugger. The add instruction above is an addressing
- // mode 1 instruction where there are restrictions on which immediate values
- // can be encoded in the instruction and which immediate values requires
- // use of an additional instruction for moving the immediate to a temporary
- // register.
- ASSERT_EQ(return_sequence_length,
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
+void FastCodeGenerator::EmitReceiverMapCheck() {
+ Comment cmnt(masm(), ";; MapCheck(this)");
+ if (FLAG_print_ir) {
+ PrintF("MapCheck(this)\n");
}
-}
+ EmitLoadReceiver(r1);
+ __ BranchOnSmi(r1, bailout());
-void FastCodeGenerator::Apply(Expression::Context context,
- Slot* slot,
- Register scratch) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- break;
- case Expression::kValue:
- case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
- Move(scratch, slot);
- Apply(context, scratch);
- break;
- }
+ ASSERT(has_receiver() && receiver()->IsHeapObject());
+ Handle<HeapObject> object = Handle<HeapObject>::cast(receiver());
+ Handle<Map> map(object->map());
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ mov(ip, Operand(map));
+ __ cmp(r3, ip);
+ __ b(ne, bailout());
}
-void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- break;
- case Expression::kValue:
- case Expression::kTest:
- case Expression::kValueTest:
- case Expression::kTestValue:
- __ mov(ip, Operand(lit->handle()));
- Apply(context, ip);
- break;
- }
-}
-
-
-void FastCodeGenerator::ApplyTOS(Expression::Context context) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- __ Drop(1);
- break;
- case Expression::kValue:
- break;
- case Expression::kTest:
- __ pop(r0);
- TestAndBranch(r0, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ ldr(r0, MemOperand(sp, 0));
- TestAndBranch(r0, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ ldr(r0, MemOperand(sp, 0));
- TestAndBranch(r0, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- }
- }
-}
-
-
-void FastCodeGenerator::DropAndApply(int count,
- Expression::Context context,
- Register reg) {
- ASSERT(count > 0);
- ASSERT(!reg.is(sp));
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- __ Drop(count);
- break;
- case Expression::kValue:
- if (count > 1) __ Drop(count - 1);
- __ str(reg, MemOperand(sp));
- break;
- case Expression::kTest:
- __ Drop(count);
- TestAndBranch(reg, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (count > 1) __ Drop(count - 1);
- __ str(reg, MemOperand(sp));
- TestAndBranch(reg, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (count > 1) __ Drop(count - 1);
- __ str(reg, MemOperand(sp));
- TestAndBranch(reg, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
- }
-}
-
-
-MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- return MemOperand(fp, SlotOffset(slot));
- case Slot::CONTEXT: {
- int context_chain_length =
- function_->scope()->ContextChainLength(slot->var()->scope());
- __ LoadContext(scratch, context_chain_length);
- return CodeGenerator::ContextOperand(scratch, slot->index());
- }
- case Slot::LOOKUP:
- UNREACHABLE();
- }
- UNREACHABLE();
- return MemOperand(r0, 0);
-}
-
-
-void FastCodeGenerator::Move(Register destination, Slot* source) {
- // Use destination as scratch.
- MemOperand location = EmitSlotSearch(source, destination);
- __ ldr(destination, location);
-}
-
-
-
-void FastCodeGenerator::Move(Slot* dst,
- Register src,
- Register scratch1,
- Register scratch2) {
- ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
- ASSERT(!scratch1.is(src) && !scratch2.is(src));
- MemOperand location = EmitSlotSearch(dst, scratch1);
- __ str(src, location);
- // Emit the write barrier code if the location is in the heap.
- if (dst->type() == Slot::CONTEXT) {
- __ mov(scratch2, Operand(Context::SlotOffset(dst->index())));
- __ RecordWrite(scratch1, scratch2, src);
- }
-}
-
-
-
-void FastCodeGenerator::TestAndBranch(Register source,
- Label* true_label,
- Label* false_label) {
- ASSERT_NE(NULL, true_label);
- ASSERT_NE(NULL, false_label);
- // Call the runtime to find the boolean value of the source and then
- // translate it into control flow to the pair of labels.
- __ push(source);
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, true_label);
- __ jmp(false_label);
-}
-
-
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
- Comment cmnt(masm_, "[ Declaration");
- Variable* var = decl->proxy()->var();
- ASSERT(var != NULL); // Must have been resolved.
- Slot* slot = var->slot();
- Property* prop = var->AsProperty();
-
- if (slot != NULL) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- if (decl->mode() == Variable::CONST) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, MemOperand(fp, SlotOffset(slot)));
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(ip);
- __ str(ip, MemOperand(fp, SlotOffset(slot)));
- }
- break;
-
- case Slot::CONTEXT:
- // We bypass the general EmitSlotSearch because we know more about
- // this specific context.
-
- // The variable in the decl always resides in the current context.
- ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
- if (FLAG_debug_code) {
- // Check if we have the correct context pointer.
- __ ldr(r1,
- CodeGenerator::ContextOperand(cp, Context::FCONTEXT_INDEX));
- __ cmp(r1, cp);
- __ Check(eq, "Unexpected declaration in current context.");
- }
- if (decl->mode() == Variable::CONST) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, CodeGenerator::ContextOperand(cp, slot->index()));
- // No write barrier since the_hole_value is in old space.
- } else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(r0);
- __ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
- int offset = Context::SlotOffset(slot->index());
- __ mov(r2, Operand(offset));
- // We know that we have written a function, which is not a smi.
- __ RecordWrite(cp, r2, r0);
- }
- break;
-
- case Slot::LOOKUP: {
- __ mov(r2, Operand(var->name()));
- // Declaration nodes are always introduced in one of two modes.
- ASSERT(decl->mode() == Variable::VAR ||
- decl->mode() == Variable::CONST);
- PropertyAttributes attr =
- (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
- __ mov(r1, Operand(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (decl->mode() == Variable::CONST) {
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
- } else if (decl->fun() != NULL) {
- __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
- Visit(decl->fun()); // Initial value for function decl.
- } else {
- __ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
- __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-
- } else if (prop != NULL) {
- if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
- // We are declaring a function or constant that rewrites to a
- // property. Use (keyed) IC to set the initial value.
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->key()->context());
- Visit(prop->key());
-
- if (decl->fun() != NULL) {
- ASSERT_EQ(Expression::kValue, decl->fun()->context());
- Visit(decl->fun());
- __ pop(r0);
- } else {
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- }
-
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // Value in r0 is ignored (declarations are statements). Receiver
- // and key on stack are discarded.
- __ Drop(2);
- }
- }
-}
-
-
-void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- // The context is the first argument.
- __ mov(r1, Operand(pairs));
- __ mov(r0, Operand(Smi::FromInt(is_eval_ ? 1 : 0)));
- __ stm(db_w, sp, cp.bit() | r1.bit() | r0.bit());
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
-
-void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- Comment cmnt(masm_, "[ FunctionLiteral");
-
- // Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate =
- Compiler::BuildBoilerplate(expr, script_, this);
- if (HasStackOverflow()) return;
-
- ASSERT(boilerplate->IsBoilerplate());
-
- // Create a new closure.
- __ mov(r0, Operand(boilerplate));
- __ stm(db_w, sp, cp.bit() | r0.bit());
- __ CallRuntime(Runtime::kNewClosure, 2);
- Apply(expr->context(), r0);
-}
-
-
-void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var(), expr->context());
-}
-
-
-void FastCodeGenerator::EmitVariableLoad(Variable* var,
- Expression::Context context) {
- Expression* rewrite = var->rewrite();
- if (rewrite == NULL) {
- ASSERT(var->is_global());
- Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in r2 and the global
- // object on the stack.
- __ ldr(ip, CodeGenerator::GlobalObject());
- __ push(ip);
- __ mov(r2, Operand(var->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- DropAndApply(1, context, r0);
- } else if (rewrite->AsSlot() != NULL) {
- Slot* slot = rewrite->AsSlot();
- if (FLAG_debug_code) {
- switch (slot->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL: {
- Comment cmnt(masm_, "Stack slot");
- break;
- }
- case Slot::CONTEXT: {
- Comment cmnt(masm_, "Context slot");
- break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- break;
- }
- }
- Apply(context, slot, r0);
- } else {
- Comment cmnt(masm_, "Variable rewritten to property");
- // A variable has been rewritten into an explicit access to an object
- // property.
- Property* property = rewrite->AsProperty();
- ASSERT_NOT_NULL(property);
-
- // The only property expressions that can occur are of the form
- // "slot[literal]".
-
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->slot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- Move(r2, object_slot);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ mov(r1, Operand(key_literal->handle()));
-
- // Push both as arguments to ic.
- __ stm(db_w, sp, r2.bit() | r1.bit());
-
- // Do a keyed property load.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // Drop key and object left on the stack by IC, and push the result.
- DropAndApply(2, context, r0);
- }
-}
-
-
-void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- Label done;
- // Registers will be used as follows:
- // r4 = JS function, literals array
- // r3 = literal index
- // r2 = RegExp pattern
- // r1 = RegExp flags
- // r0 = temp + return value (RegExp literal)
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ ldr(r0, FieldMemOperand(r4, literal_offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- __ b(ne, &done);
- __ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r2, Operand(expr->pattern()));
- __ mov(r1, Operand(expr->flags()));
- __ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ bind(&done);
- Apply(expr->context(), r0);
-}
-
-
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
- __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
- __ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r0, Operand(expr->constant_properties()));
- __ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
- } else {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
- }
-
- // If result_saved == true: The result is saved on top of the
- // stack and in r0.
- // If result_saved == false: The result not on the stack, just in r0.
- bool result_saved = false;
-
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
- if (!result_saved) {
- __ push(r0); // Save result on stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
-
- case ObjectLiteral::Property::MATERIALIZED_LITERAL: // Fall through.
- ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
- __ pop(r0);
- __ mov(r2, Operand(key->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // StoreIC leaves the receiver on the stack.
- __ ldr(r0, MemOperand(sp)); // Restore result into r0.
- break;
- }
- // Fall through.
-
- case ObjectLiteral::Property::PROTOTYPE:
- __ push(r0);
- Visit(key);
- ASSERT_EQ(Expression::kValue, key->context());
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
- __ CallRuntime(Runtime::kSetProperty, 3);
- __ ldr(r0, MemOperand(sp)); // Restore result into r0.
- break;
-
- case ObjectLiteral::Property::GETTER:
- case ObjectLiteral::Property::SETTER:
- __ push(r0);
- Visit(key);
- ASSERT_EQ(Expression::kValue, key->context());
- __ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
- Smi::FromInt(1) :
- Smi::FromInt(0)));
- __ push(r1);
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
- __ CallRuntime(Runtime::kDefineAccessor, 4);
- __ ldr(r0, MemOperand(sp)); // Restore result into r0
- break;
- }
- }
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- if (result_saved) __ Drop(1);
- break;
- case Expression::kValue:
- if (!result_saved) __ push(r0);
- break;
- case Expression::kTest:
- if (result_saved) __ pop(r0);
- TestAndBranch(r0, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (!result_saved) __ push(r0);
- TestAndBranch(r0, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (!result_saved) __ push(r0);
- TestAndBranch(r0, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
- }
-}
-
-
-void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(expr->constant_elements()));
- __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
- if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- }
-
- bool result_saved = false; // Is the result saved to the stack?
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- ZoneList<Expression*>* subexprs = expr->values();
- for (int i = 0, len = subexprs->length(); i < len; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
-
- if (!result_saved) {
- __ push(r0);
- result_saved = true;
- }
- Visit(subexpr);
- ASSERT_EQ(Expression::kValue, subexpr->context());
-
- // Store the subexpression value in the array's elements.
- __ pop(r0); // Subexpression value.
- __ ldr(r1, MemOperand(sp)); // Copy of array literal.
- __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ str(r0, FieldMemOperand(r1, offset));
-
- // Update the write barrier for the array store with r0 as the scratch
- // register.
- __ mov(r2, Operand(offset));
- __ RecordWrite(r1, r2, r0);
- }
-
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- if (result_saved) __ Drop(1);
- break;
- case Expression::kValue:
- if (!result_saved) __ push(r0);
- break;
- case Expression::kTest:
- if (result_saved) __ pop(r0);
- TestAndBranch(r0, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (!result_saved) __ push(r0);
- TestAndBranch(r0, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (!result_saved) __ push(r0);
- TestAndBranch(r0, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
- }
-}
-
-
-void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
- Expression::Context context) {
- SetSourcePosition(prop->position());
- Literal* key = prop->key()->AsLiteral();
- __ mov(r2, Operand(key->handle()));
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
+ // Compile global variable accesses as load IC calls. The only live
+ // registers are cp (context) and possibly r1 (this). Both are also saved
+ // in the stack and cp is preserved by the call.
+ __ ldr(ip, CodeGenerator::GlobalObject());
+ __ push(ip);
+ __ mov(r2, Operand(name));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- Apply(context, r0);
-}
-
-
-void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop,
- Expression::Context context) {
- SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- Apply(context, r0);
-}
-
-
-void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
- Expression::Context context) {
- __ pop(r0);
- __ pop(r1);
- GenericBinaryOpStub stub(op,
- NO_OVERWRITE);
- __ CallStub(&stub);
- Apply(context, r0);
-}
-
-
-void FastCodeGenerator::EmitVariableAssignment(Variable* var,
- Expression::Context context) {
- ASSERT(var != NULL);
- ASSERT(var->is_global() || var->slot() != NULL);
- if (var->is_global()) {
- // Assignment to a global variable. Use inline caching for the
- // assignment. Right-hand-side value is passed in r0, variable name in
- // r2, and the global object on the stack.
- __ pop(r0);
- __ mov(r2, Operand(var->name()));
- __ ldr(ip, CodeGenerator::GlobalObject());
- __ push(ip);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // Overwrite the global object on the stack with the result if needed.
- DropAndApply(1, context, r0);
-
- } else if (var->slot() != NULL) {
- Slot* slot = var->slot();
- switch (slot->type()) {
- case Slot::LOCAL:
- case Slot::PARAMETER: {
- MemOperand target = MemOperand(fp, SlotOffset(slot));
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Perform assignment and discard value.
- __ pop(r0);
- __ str(r0, target);
- break;
- case Expression::kValue:
- // Perform assignment and preserve value.
- __ ldr(r0, MemOperand(sp));
- __ str(r0, target);
- break;
- case Expression::kTest:
- // Perform assignment and test (and discard) value.
- __ pop(r0);
- __ str(r0, target);
- TestAndBranch(r0, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ ldr(r0, MemOperand(sp));
- __ str(r0, target);
- TestAndBranch(r0, true_label_, &discard);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ ldr(r0, MemOperand(sp));
- __ str(r0, target);
- TestAndBranch(r0, &discard, false_label_);
- __ bind(&discard);
- __ Drop(1);
- __ jmp(true_label_);
- break;
- }
- }
- break;
- }
-
- case Slot::CONTEXT: {
- MemOperand target = EmitSlotSearch(slot, r1);
- __ pop(r0);
- __ str(r0, target);
-
- // RecordWrite may destroy all its register arguments.
- if (context == Expression::kValue) {
- __ push(r0);
- } else if (context != Expression::kEffect) {
- __ mov(r3, r0);
- }
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-
- // Update the write barrier for the array store with r0 as the scratch
- // register. Skip the write barrier if the value written (r1) is a smi.
- // The smi test is part of RecordWrite on other platforms, not on arm.
- Label exit;
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &exit);
-
- __ mov(r2, Operand(offset));
- __ RecordWrite(r1, r2, r0);
- __ bind(&exit);
- if (context != Expression::kEffect && context != Expression::kValue) {
- Apply(context, r3);
- }
- break;
- }
-
- case Slot::LOOKUP:
- UNREACHABLE();
- break;
- }
- } else {
- // Variables rewritten as properties are not treated as variables in
- // assignments.
- UNREACHABLE();
- }
-}
-
-
-void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a named store IC.
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
-
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value.
- __ push(ip);
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- __ pop(r0);
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(r0); // Result of assignment, saved even if not needed.
- __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value.
- __ push(ip);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(r0);
- }
-
- DropAndApply(1, expr->context(), r0);
-}
-
-
-void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a keyed store IC.
-
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- // Receiver is under the key and value.
- __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
- __ push(ip);
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- }
-
- __ pop(r0);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(r0); // Result of assignment, saved even if not needed.
- // Reciever is under the key and value.
- __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
- __ push(ip);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(r0);
- }
-
- // Receiver and key are still on stack.
- DropAndApply(2, expr->context(), r0);
-}
-
-
-void FastCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- Expression* key = expr->key();
-
- // Record the source position for the property load.
- SetSourcePosition(expr->position());
-
- // Evaluate receiver.
- Visit(expr->obj());
-
- if (key->IsPropertyName()) {
- // Do a named property load. The IC expects the property name in r2 and
- // the receiver on the stack.
- __ mov(r2, Operand(key->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- DropAndApply(1, expr->context(), r0);
- } else {
- // Do a keyed property load.
- Visit(expr->key());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // Drop key and receiver left on the stack by IC.
- DropAndApply(2, expr->context(), r0);
- }
-}
-
-void FastCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> ignored,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
- NOT_IN_LOOP);
- __ Call(ic, mode);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- DropAndApply(1, expr->context(), r0);
-}
-
-
-void FastCodeGenerator::EmitCallWithStub(Call* expr) {
- // Code common for calls using the call stub.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, NOT_IN_LOOP);
- __ CallStub(&stub);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- DropAndApply(1, expr->context(), r0);
-}
-
-
-void FastCodeGenerator::VisitCall(Call* expr) {
- Comment cmnt(masm_, "[ Call");
- Expression* fun = expr->expression();
- Variable* var = fun->AsVariableProxy()->AsVariable();
-
- if (var != NULL && var->is_possibly_eval()) {
- // Call to the identifier 'eval'.
- UNREACHABLE();
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // Call to a global variable.
- __ mov(r1, Operand(var->name()));
- // Push global object as receiver for the call IC lookup.
- __ ldr(r0, CodeGenerator::GlobalObject());
- __ stm(db_w, sp, r1.bit() | r0.bit());
- EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
- // Call to a lookup slot.
- UNREACHABLE();
- } else if (fun->AsProperty() != NULL) {
- // Call to an object property.
- Property* prop = fun->AsProperty();
- Literal* key = prop->key()->AsLiteral();
- if (key != NULL && key->handle()->IsSymbol()) {
- // Call to a named property, use call IC.
- __ mov(r0, Operand(key->handle()));
- __ push(r0);
- Visit(prop->obj());
- EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
- } else {
- // Call to a keyed property, use keyed load IC followed by function
- // call.
- Visit(prop->obj());
- Visit(prop->key());
- // Record source code position for IC call.
- SetSourcePosition(prop->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // Load receiver object into r1.
- if (prop->is_synthetic()) {
- __ ldr(r1, CodeGenerator::GlobalObject());
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- } else {
- __ ldr(r1, MemOperand(sp, kPointerSize));
- }
- // Overwrite (object, key) with (function, receiver).
- __ str(r0, MemOperand(sp, kPointerSize));
- __ str(r1, MemOperand(sp));
- EmitCallWithStub(expr);
- }
- } else {
- // Call to some other expression. If the expression is an anonymous
- // function literal not called in a loop, mark it as one that should
- // also use the fast code generator.
- FunctionLiteral* lit = fun->AsFunctionLiteral();
- if (lit != NULL &&
- lit->name()->Equals(Heap::empty_string()) &&
- loop_depth() == 0) {
- lit->set_try_fast_codegen(true);
- }
- Visit(fun);
- // Load global receiver object.
- __ ldr(r1, CodeGenerator::GlobalObject());
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ push(r1);
- // Emit function call.
- EmitCallWithStub(expr);
- }
-}
-
-
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
- // Push function on the stack.
- Visit(expr->expression());
- ASSERT_EQ(Expression::kValue, expr->expression()->context());
-
- // Push global object (receiver).
- __ ldr(r0, CodeGenerator::GlobalObject());
- __ push(r0);
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
- // If location is value, it is already on the stack,
- // so nothing to do here.
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetSourcePosition(expr->position());
-
- // Load function, arg_count into r1 and r0.
- __ mov(r0, Operand(arg_count));
- // Function is in sp[arg_count + 1].
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
-
- Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
- __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
-
- // Replace function on TOS with result in r0, or pop it.
- DropAndApply(1, expr->context(), r0);
-}
-
-
-void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Comment cmnt(masm_, "[ CallRuntime");
- ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ mov(r1, Operand(expr->name()));
- __ ldr(r0, CodeGenerator::GlobalObject());
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
- __ stm(db_w, sp, r1.bit() | r0.bit());
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
- }
-
- if (expr->is_jsruntime()) {
- // Call the JS runtime function.
- Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
- NOT_IN_LOOP);
- __ Call(ic, RelocInfo::CODE_TARGET);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- DropAndApply(1, expr->context(), r0);
- } else {
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
- Apply(expr->context(), r0);
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ if (has_this_properties()) {
+ // Restore this.
+ EmitLoadReceiver(r1);
}
}
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- Visit(expr->expression());
- ASSERT_EQ(Expression::kEffect, expr->expression()->context());
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- break;
- case Expression::kValue:
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ push(ip);
- break;
- case Expression::kTestValue:
- // Value is false so it's needed.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ push(ip);
- case Expression::kTest:
- case Expression::kValueTest:
- __ jmp(false_label_);
- break;
- }
- break;
- }
-
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- ASSERT_EQ(Expression::kTest, expr->expression()->context());
-
- Label push_true, push_false, done;
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
-
- case Expression::kEffect:
- VisitForControl(expr->expression(), &done, &done);
- __ bind(&done);
- break;
-
- case Expression::kValue:
- VisitForControl(expr->expression(), &push_false, &push_true);
- __ bind(&push_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- __ jmp(&done);
- __ bind(&push_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- __ bind(&done);
- break;
-
- case Expression::kTest:
- VisitForControl(expr->expression(), false_label_, true_label_);
- break;
-
- case Expression::kValueTest:
- VisitForControl(expr->expression(), false_label_, &push_true);
- __ bind(&push_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- __ jmp(true_label_);
- break;
-
- case Expression::kTestValue:
- VisitForControl(expr->expression(), &push_false, true_label_);
- __ bind(&push_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- __ jmp(false_label_);
- break;
- }
- break;
- }
-
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- ASSERT_EQ(Expression::kValue, expr->expression()->context());
+void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+ LookupResult lookup;
+ receiver()->Lookup(*name, &lookup);
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != NULL &&
- !proxy->var()->is_this() &&
- proxy->var()->is_global()) {
- Comment cmnt(masm_, "Global variable");
- __ ldr(r0, CodeGenerator::GlobalObject());
- __ push(r0);
- __ mov(r2, Operand(proxy->name()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- __ Call(ic, RelocInfo::CODE_TARGET);
- __ str(r0, MemOperand(sp));
- } else if (proxy != NULL &&
- proxy->var()->slot() != NULL &&
- proxy->var()->slot()->type() == Slot::LOOKUP) {
- __ mov(r0, Operand(proxy->name()));
- __ stm(db_w, sp, cp.bit() | r0.bit());
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- __ push(r0);
- } else {
- // This expression cannot throw a reference error at the top level.
- Visit(expr->expression());
- }
-
- __ CallRuntime(Runtime::kTypeof, 1);
- Apply(expr->context(), r0);
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
-
- // Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type = (prop->key()->context() == Expression::kUninitialized)
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
+ ASSERT(lookup.holder() == *receiver());
+ ASSERT(lookup.type() == FIELD);
+ Handle<Map> map(Handle<HeapObject>::cast(receiver())->map());
+ int index = lookup.GetFieldIndex() - map->inobject_properties();
+ int offset = index * kPointerSize;
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
- Expression::kValue);
+ // Negative offsets are inobject properties.
+ if (offset < 0) {
+ offset += map->instance_size();
+ __ mov(r2, r1); // Copy receiver for write barrier.
} else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && expr->context() != Expression::kEffect) {
- ASSERT(expr->context() != Expression::kUninitialized);
- __ mov(ip, Operand(Smi::FromInt(0)));
- __ push(ip);
- }
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- if (assign_type == NAMED_PROPERTY) {
- EmitNamedPropertyLoad(prop, Expression::kValue);
- } else {
- Visit(prop->key());
- ASSERT_EQ(Expression::kValue, prop->key()->context());
- EmitKeyedPropertyLoad(prop, Expression::kValue);
- }
- }
-
- // Convert to number.
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Do not save result.
- break;
- case Expression::kValue:
- case Expression::kTest:
- case Expression::kTestValue:
- case Expression::kValueTest:
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(r0);
- break;
- case NAMED_PROPERTY:
- __ str(r0, MemOperand(sp, kPointerSize));
- break;
- case KEYED_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
- break;
- }
- break;
- }
- }
-
- // Call runtime for +1/-1.
- if (expr->op() == Token::INC) {
- __ mov(ip, Operand(Smi::FromInt(1)));
- } else {
- __ mov(ip, Operand(Smi::FromInt(-1)));
- }
- __ stm(db_w, sp, ip.bit() | r0.bit());
- __ CallRuntime(Runtime::kNumberAdd, 2);
-
- // Store the value returned in r0.
- switch (assign_type) {
- case VARIABLE:
- __ push(r0);
- if (expr->is_postfix()) {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Expression::kEffect);
- // For all contexts except kEffect: We have the result on
- // top of the stack.
- if (expr->context() != Expression::kEffect) {
- ApplyTOS(expr->context());
- }
- } else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- expr->context());
- }
- break;
- case NAMED_PROPERTY: {
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- if (expr->is_postfix()) {
- __ Drop(1); // Result is on the stack under the receiver.
- if (expr->context() != Expression::kEffect) {
- ApplyTOS(expr->context());
- }
- } else {
- DropAndApply(1, expr->context(), r0);
- }
- break;
- }
- case KEYED_PROPERTY: {
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- if (expr->is_postfix()) {
- __ Drop(2); // Result is on the stack under the key and the receiver.
- if (expr->context() != Expression::kEffect) {
- ApplyTOS(expr->context());
- }
- } else {
- DropAndApply(2, expr->context(), r0);
- }
- break;
- }
- }
-}
-
-
-void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- Comment cmnt(masm_, "[ BinaryOperation");
- switch (expr->op()) {
- case Token::COMMA:
- ASSERT_EQ(Expression::kEffect, expr->left()->context());
- ASSERT_EQ(expr->context(), expr->right()->context());
- Visit(expr->left());
- Visit(expr->right());
- break;
-
- case Token::OR:
- case Token::AND:
- EmitLogicalOperation(expr);
- break;
-
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD:
- case Token::MUL:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- ASSERT_EQ(Expression::kValue, expr->left()->context());
- ASSERT_EQ(Expression::kValue, expr->right()->context());
-
- Visit(expr->left());
- Visit(expr->right());
- __ pop(r0);
- __ pop(r1);
- GenericBinaryOpStub stub(expr->op(),
- NO_OVERWRITE);
- __ CallStub(&stub);
- Apply(expr->context(), r0);
-
- break;
- }
- default:
- UNREACHABLE();
+ offset += FixedArray::kHeaderSize;
+ __ ldr(r2, FieldMemOperand(r1, JSObject::kPropertiesOffset));
}
+ // Perform the store.
+ __ str(r0, FieldMemOperand(r2, offset));
+ __ mov(r3, Operand(offset));
+ __ RecordWrite(r2, r3, ip);
}
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- ASSERT_EQ(Expression::kValue, expr->left()->context());
- ASSERT_EQ(Expression::kValue, expr->right()->context());
- Visit(expr->left());
- Visit(expr->right());
-
- // Always perform the comparison for its control flow. Pack the result
- // into the expression's context after the comparison is performed.
- Label push_true, push_false, done;
- // Initially assume we are in a test context.
- Label* if_true = true_label_;
- Label* if_false = false_label_;
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
- case Expression::kEffect:
- if_true = &done;
- if_false = &done;
- break;
- case Expression::kValue:
- if_true = &push_true;
- if_false = &push_false;
- break;
- case Expression::kTest:
- break;
- case Expression::kValueTest:
- if_true = &push_true;
- break;
- case Expression::kTestValue:
- if_false = &push_false;
- break;
- }
-
- switch (expr->op()) {
- case Token::IN: {
- __ InvokeBuiltin(Builtins::IN, CALL_JS);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, if_true);
- __ jmp(if_false);
- break;
- }
-
- case Token::INSTANCEOF: {
- InstanceofStub stub;
- __ CallStub(&stub);
- __ tst(r0, r0);
- __ b(eq, if_true); // The stub returns 0 for true.
- __ jmp(if_false);
- break;
- }
-
- default: {
- Condition cc = eq;
- bool strict = false;
- switch (expr->op()) {
- case Token::EQ_STRICT:
- strict = true;
- // Fall through
- case Token::EQ:
- cc = eq;
- __ pop(r0);
- __ pop(r1);
- break;
- case Token::LT:
- cc = lt;
- __ pop(r0);
- __ pop(r1);
- break;
- case Token::GT:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = lt;
- __ pop(r1);
- __ pop(r0);
- break;
- case Token::LTE:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
- cc = ge;
- __ pop(r1);
- __ pop(r0);
- break;
- case Token::GTE:
- cc = ge;
- __ pop(r0);
- __ pop(r1);
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
-
- // The comparison stub expects the smi vs. smi case to be handled
- // before it is called.
- Label slow_case;
- __ orr(r2, r0, Operand(r1));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &slow_case);
- __ cmp(r1, r0);
- __ b(cc, if_true);
- __ jmp(if_false);
-
- __ bind(&slow_case);
- CompareStub stub(cc, strict);
- __ CallStub(&stub);
- __ tst(r0, r0);
- __ b(cc, if_true);
- __ jmp(if_false);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
-
- case Expression::kEffect:
- __ bind(&done);
- break;
-
- case Expression::kValue:
- __ bind(&push_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- __ jmp(&done);
- __ bind(&push_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- __ bind(&done);
- break;
-
- case Expression::kTest:
- break;
-
- case Expression::kValueTest:
- __ bind(&push_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- __ jmp(true_label_);
- break;
-
- case Expression::kTestValue:
- __ bind(&push_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- __ jmp(false_label_);
- break;
- }
-}
-
-
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- Apply(expr->context(), r0);
-}
-
-
-Register FastCodeGenerator::result_register() { return r0; }
-
-
-Register FastCodeGenerator::context_register() { return cp; }
-
-
-void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
- __ str(value, MemOperand(fp, frame_offset));
-}
-
+void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
+ ASSERT(function_ == NULL);
+ ASSERT(info_ == NULL);
+ function_ = fun;
+ info_ = info;
-void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ ldr(dst, CodeGenerator::ContextOperand(cp, context_index));
-}
+ // Save the caller's frame pointer and set up our own.
+ Comment prologue_cmnt(masm(), ";; Prologue");
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ // Note that we keep a live register reference to cp (context) at
+ // this point.
+ // Receiver (this) is allocated to r1 if there are this properties.
+ if (has_this_properties()) EmitReceiverMapCheck();
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
+ VisitStatements(fun->body());
-void FastCodeGenerator::EnterFinallyBlock() {
- ASSERT(!result_register().is(r1));
- // Store result register while executing finally block.
- __ push(result_register());
- // Cook return address in link register to stack (smi encoded Code* delta)
- __ sub(r1, lr, Operand(masm_->CodeObject()));
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- ASSERT_EQ(0, kSmiTag);
- __ add(r1, r1, Operand(r1)); // Convert to smi.
- __ push(r1);
-}
+ Comment return_cmnt(masm(), ";; Return(<undefined>)");
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ Comment epilogue_cmnt(masm(), ";; Epilogue");
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ int32_t sp_delta = (fun->scope()->num_parameters() + 1) * kPointerSize;
+ __ add(sp, sp, Operand(sp_delta));
+ __ Jump(lr);
-void FastCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(r1));
- // Restore result register from stack.
- __ pop(r1);
- // Uncook return address and return.
- __ pop(result_register());
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
- __ add(pc, r1, Operand(masm_->CodeObject()));
+ __ bind(&bailout_);
}
#undef __
+
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
new file mode 100644
index 000000000..9f240dd82
--- /dev/null
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -0,0 +1,1781 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "compiler.h"
+#include "debug.h"
+#include "full-codegen.h"
+#include "parser.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right. The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+// o r1: the JS function object being called (ie, ourselves)
+// o cp: our context
+// o fp: our caller's frame pointer
+// o sp: stack pointer
+// o lr: return address
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-arm.h for its layout.
+void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
+ function_ = fun;
+ SetFunctionPosition(fun);
+
+ if (mode == PRIMARY) {
+ int locals_count = fun->scope()->num_stack_slots();
+
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ if (locals_count > 0) {
+ // Load undefined value here, so the value is ready for the loop
+ // below.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ }
+ // Adjust fp to point to caller's fp.
+ __ add(fp, sp, Operand(2 * kPointerSize));
+
+ { Comment cmnt(masm_, "[ Allocate locals");
+ for (int i = 0; i < locals_count; i++) {
+ __ push(ip);
+ }
+ }
+
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ if (fun->scope()->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is in r1.
+ __ push(r1);
+ __ CallRuntime(Runtime::kNewContext, 1);
+ function_in_register = false;
+ // Context is returned in both r0 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = fun->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = fun->scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ ldr(r0, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ __ mov(r1, Operand(Context::SlotOffset(slot->index())));
+ __ str(r0, MemOperand(cp, r1));
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering cp.
+ __ mov(r2, Operand(cp));
+ __ RecordWrite(r2, r1, r0);
+ }
+ }
+ }
+
+ Variable* arguments = fun->scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register) {
+ // Load this again, if it's used by the local context below.
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ mov(r3, r1);
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
+ fun->num_parameters() * kPointerSize));
+ __ mov(r1, Operand(Smi::FromInt(fun->num_parameters())));
+ __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiever and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ // Duplicate the value; move-to-slot operation might clobber registers.
+ __ mov(r3, r0);
+ Move(arguments->slot(), r0, r1, r2);
+ Slot* dot_arguments_slot =
+ fun->scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, r3, r1, r2);
+ }
+ }
+
+ // Check the stack for overflow or break request.
+ // Put the lr setup instruction in the delay slot. The kInstrSize is
+ // added to the implicit 8 byte offset that always applies to operations
+ // with pc and gives a return address 12 bytes down.
+ { Comment cmnt(masm_, "[ Stack check");
+ __ LoadRoot(r2, Heap::kStackLimitRootIndex);
+ __ add(lr, pc, Operand(Assembler::kInstrSize));
+ __ cmp(sp, Operand(r2));
+ StackCheckStub stub;
+ __ mov(pc,
+ Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ LeaveCC,
+ lo);
+ }
+
+ { Comment cmnt(masm_, "[ Declarations");
+ VisitDeclarations(fun->scope()->declarations());
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(fun->body());
+ ASSERT(loop_depth() == 0);
+ }
+
+ { Comment cmnt(masm_, "[ return <undefined>;");
+ // Emit a 'return undefined' in case control fell off the end of the
+ // body.
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ }
+ EmitReturnSequence(function_->end_position());
+}
+
+
+void FullCodeGenerator::EmitReturnSequence(int position) {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ b(&return_label_);
+ } else {
+ __ bind(&return_label_);
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in r0.
+ __ push(r0);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+
+ // Calculate the exact length of the return sequence and make sure that
+ // the constant pool is not emitted inside of the return sequence.
+ int num_parameters = function_->scope()->num_parameters();
+ int32_t sp_delta = (num_parameters + 1) * kPointerSize;
+ int return_sequence_length = Assembler::kJSReturnSequenceLength;
+ if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
+ // Additional mov instruction generated.
+ return_sequence_length++;
+ }
+ masm_->BlockConstPoolFor(return_sequence_length);
+
+ CodeGenerator::RecordPositions(masm_, position);
+ __ RecordJSReturn();
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ __ add(sp, sp, Operand(sp_delta));
+ __ Jump(lr);
+
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger. The add instruction above is an addressing
+ // mode 1 instruction where there are restrictions on which immediate values
+ // can be encoded in the instruction and which immediate values requires
+ // use of an additional instruction for moving the immediate to a temporary
+ // register.
+ ASSERT_EQ(return_sequence_length,
+ masm_->InstructionsGeneratedSince(&check_exit_codesize));
+ }
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ // Nothing to do.
+ break;
+
+ case Expression::kValue:
+ // Move value into place.
+ switch (location_) {
+ case kAccumulator:
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ break;
+ case kStack:
+ __ push(reg);
+ break;
+ }
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // Push an extra copy of the value in case it's needed.
+ __ push(reg);
+ // Fall through.
+
+ case Expression::kTest:
+ // We always call the runtime on ARM, so push the value as argument.
+ __ push(reg);
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Nothing to do.
+ break;
+ case Expression::kValue:
+ case Expression::kTest:
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // On ARM we have to move the value into a register to do anything
+ // with it.
+ Move(result_register(), slot);
+ Apply(context, result_register());
+ break;
+ }
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ // Nothing to do.
+ case Expression::kValue:
+ case Expression::kTest:
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // On ARM we have to move the value into a register to do anything
+ // with it.
+ __ mov(result_register(), Operand(lit->handle()));
+ Apply(context, result_register());
+ break;
+ }
+}
+
+
+void FullCodeGenerator::ApplyTOS(Expression::Context context) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ __ Drop(1);
+ break;
+
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ pop(result_register());
+ break;
+ case kStack:
+ break;
+ }
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // Duplicate the value on the stack in case it's needed.
+ __ ldr(ip, MemOperand(sp));
+ __ push(ip);
+ // Fall through.
+
+ case Expression::kTest:
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::DropAndApply(int count,
+ Expression::Context context,
+ Register reg) {
+ ASSERT(count > 0);
+ ASSERT(!reg.is(sp));
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ __ Drop(count);
+ break;
+
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ Drop(count);
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ break;
+ case kStack:
+ if (count > 1) __ Drop(count - 1);
+ __ str(reg, MemOperand(sp));
+ break;
+ }
+ break;
+
+ case Expression::kTest:
+ if (count > 1) __ Drop(count - 1);
+ __ str(reg, MemOperand(sp));
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ if (count == 1) {
+ __ str(reg, MemOperand(sp));
+ __ push(reg);
+ } else { // count > 1
+ __ Drop(count - 2);
+ __ str(reg, MemOperand(sp, kPointerSize));
+ __ str(reg, MemOperand(sp));
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::Apply(Expression::Context context,
+ Label* materialize_true,
+ Label* materialize_false) {
+ switch (context) {
+ case Expression::kUninitialized:
+
+ case Expression::kEffect:
+ ASSERT_EQ(materialize_true, materialize_false);
+ __ bind(materialize_true);
+ break;
+
+ case Expression::kValue: {
+ Label done;
+ __ bind(materialize_true);
+ __ mov(result_register(), Operand(Factory::true_value()));
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ mov(result_register(), Operand(Factory::false_value()));
+ __ bind(&done);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ break;
+ }
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ __ bind(materialize_true);
+ __ mov(result_register(), Operand(Factory::true_value()));
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ __ jmp(true_label_);
+ break;
+
+ case Expression::kTestValue:
+ __ bind(materialize_false);
+ __ mov(result_register(), Operand(Factory::false_value()));
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ __ jmp(false_label_);
+ break;
+ }
+}
+
+
+void FullCodeGenerator::DoTest(Expression::Context context) {
+ // The value to test is pushed on the stack, and duplicated on the stack
+ // if necessary (for value/test and test/value contexts).
+ ASSERT_NE(NULL, true_label_);
+ ASSERT_NE(NULL, false_label_);
+
+ // Call the runtime to find the boolean value of the source and then
+ // translate it into control flow to the pair of labels.
+ __ CallRuntime(Runtime::kToBool, 1);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
+
+ // Complete based on the context.
+ switch (context) {
+ case Expression::kUninitialized:
+ case Expression::kEffect:
+ case Expression::kValue:
+ UNREACHABLE();
+
+ case Expression::kTest:
+ __ b(eq, true_label_);
+ __ jmp(false_label_);
+ break;
+
+ case Expression::kValueTest: {
+ Label discard;
+ switch (location_) {
+ case kAccumulator:
+ __ b(ne, &discard);
+ __ pop(result_register());
+ __ jmp(true_label_);
+ break;
+ case kStack:
+ __ b(eq, true_label_);
+ break;
+ }
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(false_label_);
+ break;
+ }
+
+ case Expression::kTestValue: {
+ Label discard;
+ switch (location_) {
+ case kAccumulator:
+ __ b(eq, &discard);
+ __ pop(result_register());
+ __ jmp(false_label_);
+ break;
+ case kStack:
+ __ b(ne, false_label_);
+ break;
+ }
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(true_label_);
+ break;
+ }
+ }
+}
+
+
+MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return MemOperand(fp, SlotOffset(slot));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(slot->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return CodeGenerator::ContextOperand(scratch, slot->index());
+ }
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return MemOperand(r0, 0);
+}
+
+
+void FullCodeGenerator::Move(Register destination, Slot* source) {
+ // Use destination as scratch.
+ MemOperand slot_operand = EmitSlotSearch(source, destination);
+ __ ldr(destination, slot_operand);
+}
+
+
+void FullCodeGenerator::Move(Slot* dst,
+ Register src,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
+ ASSERT(!scratch1.is(src) && !scratch2.is(src));
+ MemOperand location = EmitSlotSearch(dst, scratch1);
+ __ str(src, location);
+ // Emit the write barrier code if the location is in the heap.
+ if (dst->type() == Slot::CONTEXT) {
+ __ mov(scratch2, Operand(Context::SlotOffset(dst->index())));
+ __ RecordWrite(scratch1, scratch2, src);
+ }
+}
+
+
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+ Comment cmnt(masm_, "[ Declaration");
+ Variable* var = decl->proxy()->var();
+ ASSERT(var != NULL); // Must have been resolved.
+ Slot* slot = var->slot();
+ Property* prop = var->AsProperty();
+
+ if (slot != NULL) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ str(ip, MemOperand(fp, SlotOffset(slot)));
+ } else if (decl->fun() != NULL) {
+ VisitForValue(decl->fun(), kAccumulator);
+ __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
+ }
+ break;
+
+ case Slot::CONTEXT:
+ // We bypass the general EmitSlotSearch because we know more about
+ // this specific context.
+
+ // The variable in the decl always resides in the current context.
+ ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ if (FLAG_debug_code) {
+ // Check if we have the correct context pointer.
+ __ ldr(r1,
+ CodeGenerator::ContextOperand(cp, Context::FCONTEXT_INDEX));
+ __ cmp(r1, cp);
+ __ Check(eq, "Unexpected declaration in current context.");
+ }
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ str(ip, CodeGenerator::ContextOperand(cp, slot->index()));
+ // No write barrier since the_hole_value is in old space.
+ } else if (decl->fun() != NULL) {
+ VisitForValue(decl->fun(), kAccumulator);
+ __ str(result_register(),
+ CodeGenerator::ContextOperand(cp, slot->index()));
+ int offset = Context::SlotOffset(slot->index());
+ __ mov(r2, Operand(offset));
+ // We know that we have written a function, which is not a smi.
+ __ mov(r1, Operand(cp));
+ __ RecordWrite(r1, r2, result_register());
+ }
+ break;
+
+ case Slot::LOOKUP: {
+ __ mov(r2, Operand(var->name()));
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(decl->mode() == Variable::VAR ||
+ decl->mode() == Variable::CONST);
+ PropertyAttributes attr =
+ (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+ __ mov(r1, Operand(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+ } else if (decl->fun() != NULL) {
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
+ // Push initial value for function declaration.
+ VisitForValue(decl->fun(), kStack);
+ } else {
+ __ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+
+ } else if (prop != NULL) {
+ if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+ // We are declaring a function or constant that rewrites to a
+ // property. Use (keyed) IC to set the initial value.
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+
+ if (decl->fun() != NULL) {
+ VisitForValue(decl->fun(), kAccumulator);
+ } else {
+ __ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
+ }
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // Value in r0 is ignored (declarations are statements). Receiver
+ // and key on stack are discarded.
+ __ Drop(2);
+ }
+ }
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ // The context is the first argument.
+ __ mov(r1, Operand(pairs));
+ __ mov(r0, Operand(Smi::FromInt(is_eval_ ? 1 : 0)));
+ __ stm(db_w, sp, cp.bit() | r1.bit() | r0.bit());
+ __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ // Return value is ignored.
+}
+
+
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+ Comment cmnt(masm_, "[ FunctionLiteral");
+
+ // Build the function boilerplate and instantiate it.
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(expr, script_, this);
+ if (HasStackOverflow()) return;
+
+ ASSERT(boilerplate->IsBoilerplate());
+
+ // Create a new closure.
+ __ mov(r0, Operand(boilerplate));
+ __ stm(db_w, sp, cp.bit() | r0.bit());
+ __ CallRuntime(Runtime::kNewClosure, 2);
+ Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ EmitVariableLoad(expr->var(), context_);
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(Variable* var,
+ Expression::Context context) {
+ // Four cases: non-this global variables, lookup slots, all other
+ // types of slots, and parameters that rewrite to explicit property
+ // accesses on the arguments object.
+ Slot* slot = var->slot();
+ Property* property = var->AsProperty();
+
+ if (var->is_global() && !var->is_this()) {
+ Comment cmnt(masm_, "Global variable");
+ // Use inline caching. Variable name is passed in r2 and the global
+ // object on the stack.
+ __ ldr(ip, CodeGenerator::GlobalObject());
+ __ push(ip);
+ __ mov(r2, Operand(var->name()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ DropAndApply(1, context, r0);
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ Comment cmnt(masm_, "Lookup slot");
+ __ mov(r1, Operand(var->name()));
+ __ stm(db_w, sp, cp.bit() | r1.bit()); // Context and name.
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ Apply(context, r0);
+
+ } else if (slot != NULL) {
+ Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+ ? "Context slot"
+ : "Stack slot");
+ Apply(context, slot);
+
+ } else {
+ Comment cmnt(masm_, "Rewritten parameter");
+ ASSERT_NOT_NULL(property);
+ // Rewritten parameter accesses are of the form "slot[literal]".
+
+ // Assert that the object is in a slot.
+ Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(object_var);
+ Slot* object_slot = object_var->slot();
+ ASSERT_NOT_NULL(object_slot);
+
+ // Load the object.
+ Move(r2, object_slot);
+
+ // Assert that the key is a smi.
+ Literal* key_literal = property->key()->AsLiteral();
+ ASSERT_NOT_NULL(key_literal);
+ ASSERT(key_literal->handle()->IsSmi());
+
+ // Load the key.
+ __ mov(r1, Operand(key_literal->handle()));
+
+ // Push both as arguments to ic.
+ __ stm(db_w, sp, r2.bit() | r1.bit());
+
+ // Do a keyed property load.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // Drop key and object left on the stack by IC, and push the result.
+ DropAndApply(2, context, r0);
+ }
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ Label done;
+ // Registers will be used as follows:
+ // r4 = JS function, literals array
+ // r3 = literal index
+ // r2 = RegExp pattern
+ // r1 = RegExp flags
+ // r0 = temp + return value (RegExp literal)
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ ldr(r0, FieldMemOperand(r4, literal_offset));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, ip);
+ __ b(ne, &done);
+ __ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
+ __ mov(r2, Operand(expr->pattern()));
+ __ mov(r1, Operand(expr->flags()));
+ __ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit());
+ __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ bind(&done);
+ Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+ __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
+ __ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
+ __ mov(r0, Operand(expr->constant_properties()));
+ __ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ } else {
+ __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+ }
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in r0.
+ bool result_saved = false;
+
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(r0); // Save result on stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->handle()->IsSymbol()) {
+ VisitForValue(value, kAccumulator);
+ __ mov(r2, Operand(key->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // StoreIC leaves the receiver on the stack.
+ break;
+ }
+ // Fall through.
+ case ObjectLiteral::Property::PROTOTYPE:
+ // Duplicate receiver on stack.
+ __ ldr(r0, MemOperand(sp));
+ __ push(r0);
+ VisitForValue(key, kStack);
+ VisitForValue(value, kStack);
+ __ CallRuntime(Runtime::kSetProperty, 3);
+ break;
+ case ObjectLiteral::Property::GETTER:
+ case ObjectLiteral::Property::SETTER:
+ // Duplicate receiver on stack.
+ __ ldr(r0, MemOperand(sp));
+ __ push(r0);
+ VisitForValue(key, kStack);
+ __ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
+ Smi::FromInt(1) :
+ Smi::FromInt(0)));
+ __ push(r1);
+ VisitForValue(value, kStack);
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ break;
+ }
+ }
+
+ if (result_saved) {
+ ApplyTOS(context_);
+ } else {
+ Apply(context_, r0);
+ }
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+ __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
+ __ mov(r1, Operand(expr->constant_elements()));
+ __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
+ } else {
+ __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ }
+
+ bool result_saved = false; // Is the result saved to the stack?
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ ZoneList<Expression*>* subexprs = expr->values();
+ for (int i = 0, len = subexprs->length(); i < len; i++) {
+ Expression* subexpr = subexprs->at(i);
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (subexpr->AsLiteral() != NULL ||
+ CompileTimeValue::IsCompileTimeValue(subexpr)) {
+ continue;
+ }
+
+ if (!result_saved) {
+ __ push(r0);
+ result_saved = true;
+ }
+ VisitForValue(subexpr, kAccumulator);
+
+ // Store the subexpression value in the array's elements.
+ __ ldr(r1, MemOperand(sp)); // Copy of array literal.
+ __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ str(result_register(), FieldMemOperand(r1, offset));
+
+ // Update the write barrier for the array store with r0 as the scratch
+ // register.
+ __ mov(r2, Operand(offset));
+ __ RecordWrite(r1, r2, result_register());
+ }
+
+ if (result_saved) {
+ ApplyTOS(context_);
+ } else {
+ Apply(context_, r0);
+ }
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ __ mov(r2, Operand(key->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+ Expression::Context context) {
+ __ pop(r1);
+ GenericBinaryOpStub stub(op, NO_OVERWRITE);
+ __ CallStub(&stub);
+ Apply(context, r0);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+ Expression::Context context) {
+ // Three main cases: global variables, lookup slots, and all other
+ // types of slots. Left-hand-side parameters that rewrite to
+ // explicit property accesses do not reach here.
+ ASSERT(var != NULL);
+ ASSERT(var->is_global() || var->slot() != NULL);
+
+ Slot* slot = var->slot();
+ if (var->is_global()) {
+ ASSERT(!var->is_this());
+ // Assignment to a global variable. Use inline caching for the
+ // assignment. Right-hand-side value is passed in r0, variable name in
+ // r2, and the global object on the stack.
+ __ mov(r2, Operand(var->name()));
+ __ ldr(ip, CodeGenerator::GlobalObject());
+ __ push(ip);
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Overwrite the global object on the stack with the result if needed.
+ DropAndApply(1, context, r0);
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ __ push(result_register()); // Value.
+ __ mov(r1, Operand(var->name()));
+ __ stm(db_w, sp, cp.bit() | r1.bit()); // Context and name.
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ Apply(context, r0);
+
+ } else if (var->slot() != NULL) {
+ Slot* slot = var->slot();
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER:
+ __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
+ break;
+
+ case Slot::CONTEXT: {
+ MemOperand target = EmitSlotSearch(slot, r1);
+ __ str(result_register(), target);
+
+ // RecordWrite may destroy all its register arguments.
+ __ mov(r3, result_register());
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+
+ __ mov(r2, Operand(offset));
+ __ RecordWrite(r1, r2, r3);
+ break;
+ }
+
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ break;
+ }
+ Apply(context, result_register());
+
+ } else {
+ // Variables rewritten as properties are not treated as variables in
+ // assignments.
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ push(result_register());
+ __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is now under value.
+ __ push(ip);
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
+ }
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(r0); // Result of assignment, saved even if not needed.
+ __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value.
+ __ push(ip);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(r0);
+ }
+
+ DropAndApply(1, context_, r0);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ push(result_register());
+ // Receiver is now under the key and value.
+ __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
+ __ push(ip);
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
+ }
+
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(r0); // Result of assignment, saved even if not needed.
+ // Receiver is under the key and value.
+ __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
+ __ push(ip);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(r0);
+ }
+
+ // Receiver and key are still on stack.
+ DropAndApply(2, context_, r0);
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+
+ // Evaluate receiver.
+ VisitForValue(expr->obj(), kStack);
+
+ if (key->IsPropertyName()) {
+ EmitNamedPropertyLoad(expr);
+ // Drop receiver left on the stack by IC.
+ DropAndApply(1, context_, r0);
+ } else {
+ VisitForValue(expr->key(), kStack);
+ EmitKeyedPropertyLoad(expr);
+ // Drop key and receiver left on the stack by IC.
+ DropAndApply(2, context_, r0);
+ }
+}
+
+void FullCodeGenerator::EmitCallWithIC(Call* expr,
+ Handle<Object> ignored,
+ RelocInfo::Mode mode) {
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ // Call the IC initialization code.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ Call(ic, mode);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndApply(1, context_, r0);
+}
+
+
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+ __ CallStub(&stub);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndApply(1, context_, r0);
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+ Comment cmnt(masm_, "[ Call");
+ Expression* fun = expr->expression();
+ Variable* var = fun->AsVariableProxy()->AsVariable();
+
+ if (var != NULL && var->is_possibly_eval()) {
+ // Call to the identifier 'eval'.
+ UNREACHABLE();
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // Call to a global variable.
+ __ mov(r1, Operand(var->name()));
+ // Push global object as receiver for the call IC lookup.
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ stm(db_w, sp, r1.bit() | r0.bit());
+ EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // Call to a lookup slot.
+ UNREACHABLE();
+ } else if (fun->AsProperty() != NULL) {
+ // Call to an object property.
+ Property* prop = fun->AsProperty();
+ Literal* key = prop->key()->AsLiteral();
+ if (key != NULL && key->handle()->IsSymbol()) {
+ // Call to a named property, use call IC.
+ __ mov(r0, Operand(key->handle()));
+ __ push(r0);
+ VisitForValue(prop->obj(), kStack);
+ EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
+ } else {
+ // Call to a keyed property, use keyed load IC followed by function
+ // call.
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Load receiver object into r1.
+ if (prop->is_synthetic()) {
+ __ ldr(r1, CodeGenerator::GlobalObject());
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ } else {
+ __ ldr(r1, MemOperand(sp, kPointerSize));
+ }
+ // Overwrite (object, key) with (function, receiver).
+ __ str(r0, MemOperand(sp, kPointerSize));
+ __ str(r1, MemOperand(sp));
+ EmitCallWithStub(expr);
+ }
+ } else {
+ // Call to some other expression. If the expression is an anonymous
+ // function literal not called in a loop, mark it as one that should
+ // also use the fast code generator.
+ FunctionLiteral* lit = fun->AsFunctionLiteral();
+ if (lit != NULL &&
+ lit->name()->Equals(Heap::empty_string()) &&
+ loop_depth() == 0) {
+ lit->set_try_full_codegen(true);
+ }
+ VisitForValue(fun, kStack);
+ // Load global receiver object.
+ __ ldr(r1, CodeGenerator::GlobalObject());
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ __ push(r1);
+ // Emit function call.
+ EmitCallWithStub(expr);
+ }
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+ // Push function on the stack.
+ VisitForValue(expr->expression(), kStack);
+
+ // Push global object (receiver).
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ push(r0);
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function, arg_count into r1 and r0.
+ __ mov(r0, Operand(arg_count));
+ // Function is in sp[arg_count + 1].
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+
+ Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+ // Replace function on TOS with result in r0, or pop it.
+ DropAndApply(1, context_, r0);
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+ Comment cmnt(masm_, "[ CallRuntime");
+ ZoneList<Expression*>* args = expr->arguments();
+
+ if (expr->is_jsruntime()) {
+ // Prepare for calling JS runtime function.
+ __ mov(r1, Operand(expr->name()));
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
+ __ stm(db_w, sp, r1.bit() | r0.bit());
+ }
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForValue(args->at(i), kStack);
+ }
+
+ if (expr->is_jsruntime()) {
+ // Call the JS runtime function.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndApply(1, context_, r0);
+ } else {
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ Apply(context_, r0);
+ }
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ break;
+ case Expression::kTestValue:
+ // Value is false so it's needed.
+ __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ // Fall through.
+ case Expression::kTest:
+ case Expression::kValueTest:
+ __ jmp(false_label_);
+ break;
+ }
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ Label materialize_true, materialize_false, done;
+ // Initially assume a pure test context. Notice that the labels are
+ // swapped.
+ Label* if_true = false_label_;
+ Label* if_false = true_label_;
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ if_true = &done;
+ if_false = &done;
+ break;
+ case Expression::kValue:
+ if_true = &materialize_false;
+ if_false = &materialize_true;
+ break;
+ case Expression::kTest:
+ break;
+ case Expression::kValueTest:
+ if_false = &materialize_true;
+ break;
+ case Expression::kTestValue:
+ if_true = &materialize_false;
+ break;
+ }
+ VisitForControl(expr->expression(), if_true, if_false);
+ Apply(context_, if_false, if_true); // Labels swapped.
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy != NULL &&
+ !proxy->var()->is_this() &&
+ proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ push(r0);
+ __ mov(r2, Operand(proxy->name()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ str(r0, MemOperand(sp));
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ mov(r0, Operand(proxy->name()));
+ __ stm(db_w, sp, cp.bit() | r0.bit());
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ push(r0);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ VisitForValue(expr->expression(), kStack);
+ }
+
+ __ CallRuntime(Runtime::kTypeof, 1);
+ Apply(context_, r0);
+ break;
+ }
+
+ case Token::ADD: {
+ Comment cmt(masm_, "[ UnaryOperation (ADD)");
+ VisitForValue(expr->expression(), kAccumulator);
+ Label no_conversion;
+ __ tst(result_register(), Operand(kSmiTagMask));
+ __ b(eq, &no_conversion);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
+ __ bind(&no_conversion);
+ Apply(context_, result_register());
+ break;
+ }
+
+ case Token::SUB: {
+ Comment cmt(masm_, "[ UnaryOperation (SUB)");
+ bool overwrite =
+ (expr->expression()->AsBinaryOperation() != NULL &&
+ expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericUnaryOpStub stub(Token::SUB, overwrite);
+ // GenericUnaryOpStub expects the argument to be in the
+ // accumulator register r0.
+ VisitForValue(expr->expression(), kAccumulator);
+ __ CallStub(&stub);
+ Apply(context_, r0);
+ break;
+ }
+
+ case Token::BIT_NOT: {
+ Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
+ bool overwrite =
+ (expr->expression()->AsBinaryOperation() != NULL &&
+ expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ // GenericUnaryOpStub expects the argument to be in the
+ // accumulator register r0.
+ VisitForValue(expr->expression(), kAccumulator);
+ // Avoid calling the stub for Smis.
+ Label smi, done;
+ __ tst(result_register(), Operand(kSmiTagMask));
+ __ b(eq, &smi);
+ // Non-smi: call stub leaving result in accumulator register.
+ __ CallStub(&stub);
+ __ b(&done);
+ // Perform operation directly on Smis.
+ __ bind(&smi);
+ __ mvn(result_register(), Operand(result_register()));
+ // Bit-clear inverted smi-tag.
+ __ bic(result_register(), result_register(), Operand(kSmiTagMask));
+ __ bind(&done);
+ Apply(context_, result_register());
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ Comment cmnt(masm_, "[ CountOperation");
+
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ Location saved_location = location_;
+ location_ = kAccumulator;
+ EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
+ Expression::kValue);
+ location_ = saved_location;
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && context_ != Expression::kEffect) {
+ __ mov(ip, Operand(Smi::FromInt(0)));
+ __ push(ip);
+ }
+ VisitForValue(prop->obj(), kStack);
+ if (assign_type == NAMED_PROPERTY) {
+ EmitNamedPropertyLoad(prop);
+ } else {
+ VisitForValue(prop->key(), kStack);
+ EmitKeyedPropertyLoad(prop);
+ }
+ }
+
+ // Call ToNumber only if operand is not a smi.
+ Label no_conversion;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &no_conversion);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
+ __ bind(&no_conversion);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ case Expression::kValue:
+ case Expression::kTest:
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(r0);
+ break;
+ case NAMED_PROPERTY:
+ __ str(r0, MemOperand(sp, kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ str(r0, MemOperand(sp, 2 * kPointerSize));
+ break;
+ }
+ break;
+ }
+ }
+
+
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ if (loop_depth() > 0) {
+ __ add(r0, r0, Operand(expr->op() == Token::INC
+ ? Smi::FromInt(1)
+ : Smi::FromInt(-1)));
+ __ b(vs, &stub_call);
+ // We could eliminate this smi check if we split the code at
+ // the first smi check before calling ToNumber.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &done);
+ __ bind(&stub_call);
+ // Call stub. Undo operation first.
+ __ sub(r0, r0, Operand(r1));
+ }
+ __ mov(r1, Operand(expr->op() == Token::INC
+ ? Smi::FromInt(1)
+ : Smi::FromInt(-1)));
+ GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
+ __ CallStub(&stub);
+ __ bind(&done);
+
+ // Store the value returned in r0.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Expression::kEffect);
+ // For all contexts except kEffect: We have the result on
+ // top of the stack.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ context_);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ if (expr->is_postfix()) {
+ __ Drop(1); // Result is on the stack under the receiver.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ DropAndApply(1, context_, r0);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ if (expr->is_postfix()) {
+ __ Drop(2); // Result is on the stack under the key and the receiver.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ DropAndApply(2, context_, r0);
+ }
+ break;
+ }
+ }
+}
+
+
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+ Comment cmnt(masm_, "[ BinaryOperation");
+ switch (expr->op()) {
+ case Token::COMMA:
+ VisitForEffect(expr->left());
+ Visit(expr->right());
+ break;
+
+ case Token::OR:
+ case Token::AND:
+ EmitLogicalOperation(expr);
+ break;
+
+ case Token::ADD:
+ case Token::SUB:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::MUL:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ VisitForValue(expr->left(), kStack);
+ VisitForValue(expr->right(), kAccumulator);
+ EmitBinaryOp(expr->op(), context_);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+ Label materialize_true, materialize_false, done;
+ // Initially assume we are in a test context.
+ Label* if_true = true_label_;
+ Label* if_false = false_label_;
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ if_true = &done;
+ if_false = &done;
+ break;
+ case Expression::kValue:
+ if_true = &materialize_true;
+ if_false = &materialize_false;
+ break;
+ case Expression::kTest:
+ break;
+ case Expression::kValueTest:
+ if_true = &materialize_true;
+ break;
+ case Expression::kTestValue:
+ if_false = &materialize_false;
+ break;
+ }
+
+ VisitForValue(expr->left(), kStack);
+ switch (expr->op()) {
+ case Token::IN:
+ VisitForValue(expr->right(), kStack);
+ __ InvokeBuiltin(Builtins::IN, CALL_JS);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
+ __ b(eq, if_true);
+ __ jmp(if_false);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForValue(expr->right(), kStack);
+ InstanceofStub stub;
+ __ CallStub(&stub);
+ __ tst(r0, r0);
+ __ b(eq, if_true); // The stub returns 0 for true.
+ __ jmp(if_false);
+ break;
+ }
+
+ default: {
+ VisitForValue(expr->right(), kAccumulator);
+ Condition cc = eq;
+ bool strict = false;
+ switch (expr->op()) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through
+ case Token::EQ:
+ cc = eq;
+ __ pop(r1);
+ break;
+ case Token::LT:
+ cc = lt;
+ __ pop(r1);
+ break;
+ case Token::GT:
+ // Reverse left and right sides to obtain ECMA-262 conversion order.
+ cc = lt;
+ __ mov(r1, result_register());
+ __ pop(r0);
+ break;
+ case Token::LTE:
+ // Reverse left and right sides to obtain ECMA-262 conversion order.
+ cc = ge;
+ __ mov(r1, result_register());
+ __ pop(r0);
+ break;
+ case Token::GTE:
+ cc = ge;
+ __ pop(r1);
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+
+ // The comparison stub expects the smi vs. smi case to be handled
+ // before it is called.
+ Label slow_case;
+ __ orr(r2, r0, Operand(r1));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &slow_case);
+ __ cmp(r1, r0);
+ __ b(cc, if_true);
+ __ jmp(if_false);
+
+ __ bind(&slow_case);
+ CompareStub stub(cc, strict);
+ __ CallStub(&stub);
+ __ cmp(r0, Operand(0));
+ __ b(cc, if_true);
+ __ jmp(if_false);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ Apply(context_, r0);
+}
+
+
+Register FullCodeGenerator::result_register() { return r0; }
+
+
+Register FullCodeGenerator::context_register() { return cp; }
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ __ str(value, MemOperand(fp, frame_offset));
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ ldr(dst, CodeGenerator::ContextOperand(cp, context_index));
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FullCodeGenerator::EnterFinallyBlock() {
+ ASSERT(!result_register().is(r1));
+ // Store result register while executing finally block.
+ __ push(result_register());
+ // Cook return address in link register to stack (smi encoded Code* delta)
+ __ sub(r1, lr, Operand(masm_->CodeObject()));
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ ASSERT_EQ(0, kSmiTag);
+ __ add(r1, r1, Operand(r1)); // Convert to smi.
+ __ push(r1);
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+ ASSERT(!result_register().is(r1));
+ // Restore result register from stack.
+ __ pop(r1);
+ // Uncook return address and return.
+ __ pop(result_register());
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
+ __ add(pc, r1, Operand(masm_->CodeObject()));
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index a1f26130a..bae1e9679 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -170,7 +170,6 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
-
Label miss;
__ ldr(r0, MemOperand(sp, 0));
@@ -204,7 +203,6 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
-
Label miss;
// Load receiver.
@@ -318,7 +316,6 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
-
Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack into r1.
@@ -451,7 +448,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
-
Label miss, probe, global;
__ ldr(r0, MemOperand(sp, 0));
@@ -543,6 +539,8 @@ void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
+ // -----------------------------------
+
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r2.bit() | r3.bit());
@@ -555,6 +553,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
+ // -----------------------------------
Label slow, fast;
// Get the key and receiver object from the stack.
@@ -569,11 +568,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Get the map of the receiver.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks.
+
+ // Check bit field.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ tst(r3, Operand(kSlowCaseBitFieldMask));
__ b(ne, &slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
@@ -623,6 +621,8 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
+ // -----------------------------------
+
GenerateGeneric(masm);
}
@@ -641,6 +641,7 @@ void KeyedStoreIC::Generate(MacroAssembler* masm,
// -- lr : return address
// -- sp[0] : key
// -- sp[1] : receiver
+ // -----------------------------------
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
@@ -655,7 +656,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// -- lr : return address
// -- sp[0] : key
// -- sp[1] : receiver
+ // -----------------------------------
Label slow, fast, array, extra, exit;
+
// Get the key and the object from the stack.
__ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver
// Check that the key is a smi.
@@ -807,7 +810,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
// Cache miss: Jump to runtime.
- Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+ GenerateMiss(masm);
}
@@ -828,7 +831,7 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
}
-void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r2 : name
@@ -840,7 +843,7 @@ void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
// Perform tail call to the entry.
- __ TailCallRuntime(f, 3, 1);
+ __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 18cadaca3..b39404e7f 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -205,6 +205,11 @@ void MacroAssembler::LoadRoot(Register destination,
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object, Register offset,
Register scratch) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are cp.
+ ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp));
+
// This is how much we shift the remembered set bit offset to get the
// offset of the word in the remembered set. We divide by kBitsPerInt (32,
// shift right 5) and then multiply by kIntSize (4, shift left 2).
@@ -272,6 +277,14 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
str(scratch, MemOperand(object));
bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ mov(object, Operand(bit_cast<int32_t>(kZapValue)));
+ mov(offset, Operand(bit_cast<int32_t>(kZapValue)));
+ mov(scratch, Operand(bit_cast<int32_t>(kZapValue)));
+ }
}
@@ -1035,9 +1048,13 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
return;
}
- Runtime::FunctionId function_id =
- static_cast<Runtime::FunctionId>(f->stub_id);
- RuntimeStub stub(function_id, num_arguments);
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(r0, Operand(num_arguments));
+ mov(r1, Operand(ExternalReference(f)));
+ CEntryStub stub(1);
CallStub(&stub);
}
@@ -1221,6 +1238,46 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Test that both first and second are sequential ASCII strings.
+ // Assume that they are non-smis.
+ ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+ ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+ int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ and_(scratch1, scratch1, Operand(kFlatAsciiStringMask));
+ and_(scratch2, scratch2, Operand(kFlatAsciiStringMask));
+ cmp(scratch1, Operand(kFlatAsciiStringTag));
+ // Ignore second test if first test failed.
+ cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
+ b(ne, failure);
+}
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Check that neither is a smi.
+ ASSERT_EQ(0, kSmiTag);
+ and_(scratch1, first, Operand(second));
+ tst(scratch1, Operand(kSmiTagMask));
+ b(eq, failure);
+ JumpIfNonSmisNotBothSequentialAsciiStrings(first,
+ second,
+ scratch1,
+ scratch2,
+ failure);
+}
+
#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 8f2064a74..efc5bfae7 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -337,6 +337,25 @@ class MacroAssembler: public Assembler {
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
+ // ---------------------------------------------------------------------------
+ // String utilities
+
+ // Checks if both objects are sequential ASCII strings and jumps to label
+ // if either is not. Assumes that neither object is a smi.
+ void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
+ Register object2,
+ Register scratch1,
+ Register scratch2,
+ Label *failure);
+
+ // Checks if both objects are sequential ASCII strings and jumps to label
+ // if either is not.
+ void JumpIfNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* not_flat_ascii_strings);
+
private:
List<Unresolved> unresolved_;
bool generating_stub_;
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index 5ea775104..9dd3b9326 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -63,8 +63,6 @@ namespace internal {
* through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
- * - at_start (if 1, we are starting at the start of the
- * string, otherwise 0)
* - int* capture_array (int[num_saved_registers_], for output).
* --- sp when called ---
* - link address
@@ -76,6 +74,8 @@ namespace internal {
* - void* input_string (location of a handle containing the string)
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
+ * - At start (if 1, we are starting at the start of the
+ * string, otherwise 0)
* - register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
@@ -526,64 +526,54 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
return true;
}
case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ eor(r0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(r0, r0, Operand(0x0b));
- __ cmp(r0, Operand(0x0c - 0x0b));
- if (mode_ == ASCII) {
- BranchOrBacktrack(hi, on_no_match);
- } else {
- Label done;
- __ b(ls, &done);
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(r0, r0, Operand(0x2028 - 0x0b));
- __ cmp(r0, Operand(1));
- BranchOrBacktrack(hi, on_no_match);
- __ bind(&done);
- }
- return true;
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ __ eor(r0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ sub(r0, r0, Operand(0x0b));
+ __ cmp(r0, Operand(0x0c - 0x0b));
+ if (mode_ == ASCII) {
+ BranchOrBacktrack(hi, on_no_match);
+ } else {
+ Label done;
+ __ b(ls, &done);
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ sub(r0, r0, Operand(0x2028 - 0x0b));
+ __ cmp(r0, Operand(1));
+ BranchOrBacktrack(hi, on_no_match);
+ __ bind(&done);
}
+ return true;
+ }
case 'w': {
- // Match word character (0-9, A-Z, a-z and _).
- Label digits, done;
- __ cmp(current_character(), Operand('9'));
- __ b(ls, &digits);
- __ cmp(current_character(), Operand('_'));
- __ b(eq, &done);
- __ orr(r0, current_character(), Operand(0x20));
- __ sub(r0, r0, Operand('a'));
- __ cmp(r0, Operand('z' - 'a'));
- BranchOrBacktrack(hi, on_no_match);
- __ jmp(&done);
-
- __ bind(&digits);
- __ cmp(current_character(), Operand('0'));
- BranchOrBacktrack(lo, on_no_match);
- __ bind(&done);
-
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmp(current_character(), Operand('z'));
+ BranchOrBacktrack(hi, on_no_match);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ mov(r0, Operand(map));
+ __ ldrb(r0, MemOperand(r0, current_character()));
+ __ tst(r0, Operand(r0));
+ BranchOrBacktrack(eq, on_no_match);
return true;
}
case 'W': {
- // Match non-word character (not 0-9, A-Z, a-z and _).
- Label digits, done;
- __ cmp(current_character(), Operand('9'));
- __ b(ls, &digits);
- __ cmp(current_character(), Operand('_'));
- BranchOrBacktrack(eq, on_no_match);
- __ orr(r0, current_character(), Operand(0x20));
- __ sub(r0, r0, Operand('a'));
- __ cmp(r0, Operand('z' - 'a'));
- BranchOrBacktrack(ls, on_no_match);
- __ jmp(&done);
-
- __ bind(&digits);
- __ cmp(current_character(), Operand('0'));
- BranchOrBacktrack(hs, on_no_match);
- __ bind(&done);
-
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmp(current_character(), Operand('z'));
+ __ b(hi, &done);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ mov(r0, Operand(map));
+ __ ldrb(r0, MemOperand(r0, current_character()));
+ __ tst(r0, Operand(r0));
+ BranchOrBacktrack(ne, on_no_match);
+ if (mode_ != ASCII) {
+ __ bind(&done);
+ }
return true;
}
case '*':
@@ -620,6 +610,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Set frame pointer just above the arguments.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
+ __ push(r0); // Make room for "at start" constant (value is irrelevant).
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -663,6 +654,15 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Store this value in a local variable, for use when clearing
// position registers.
__ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+ // Determine whether the start index is zero, that is at the start of the
+ // string, and store that value in a local variable.
+ __ ldr(r1, MemOperand(frame_pointer(), kStartIndex));
+ __ tst(r1, Operand(r1));
+ __ mov(r1, Operand(1), LeaveCC, eq);
+ __ mov(r1, Operand(0), LeaveCC, ne);
+ __ str(r1, MemOperand(frame_pointer(), kAtStart));
+
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
index 4459859a3..7de5f93d7 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h
@@ -123,8 +123,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kReturnAddress + kPointerSize;
- static const int kAtStart = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kAtStart + kPointerSize;
+ static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
// Below the frame pointer.
@@ -136,8 +135,9 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kInputStartMinusOne = kInputString - kPointerSize;
+ static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kAtStart - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index f3927720f..f5431512f 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -47,9 +47,9 @@ using ::v8::internal::ReadLine;
using ::v8::internal::DeleteArray;
// This macro provides a platform independent use of sscanf. The reason for
-// SScanF not being implemented in a platform independent was through
-// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
-// Library does not provide vsscanf.
+// SScanF not being implemented in a platform independent way through
+// ::v8::internal::OS in the same way as SNPrintF is that the
+// Windows C Run-Time Library does not provide vsscanf.
#define SScanF sscanf // NOLINT
// The Debugger class is used by the simulator while debugging simulated ARM
@@ -355,6 +355,10 @@ void Debugger::Debug() {
} else {
PrintF("Not at debugger stop.");
}
+ } else if ((strcmp(cmd, "t") == 0) || strcmp(cmd, "trace") == 0) {
+ ::v8::internal::FLAG_trace_sim = !::v8::internal::FLAG_trace_sim;
+ PrintF("Trace of executed instructions is %s\n",
+ ::v8::internal::FLAG_trace_sim ? "on" : "off");
} else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
PrintF("cont\n");
PrintF(" continue execution (alias 'c')\n");
@@ -378,7 +382,9 @@ void Debugger::Debug() {
PrintF(" delete the breakpoint\n");
PrintF("unstop\n");
PrintF(" ignore the stop instruction at the current location");
- PrintF(" from now on\n");
+ PrintF(" from now on\n");
+ PrintF("trace (alias 't')\n");
+ PrintF(" toogle the tracing of all executed statements");
} else {
PrintF("Unknown command: %s\n", cmd);
}
@@ -890,8 +896,13 @@ bool Simulator::OverflowFrom(int32_t alu_out,
// Support for VFP comparisons.
void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
+ if (isnan(val1) || isnan(val2)) {
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = true;
+ v_flag_FPSCR_ = true;
// All non-NaN cases.
- if (val1 == val2) {
+ } else if (val1 == val2) {
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = true;
c_flag_FPSCR_ = true;
@@ -2022,42 +2033,62 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
// Decode Type 6 coprocessor instructions.
// Dm = vmov(Rt, Rt2)
// <Rt, Rt2> = vmov(Dm)
+// Ddst = MEM(Rbase + 4*offset).
+// MEM(Rbase + 4*offset) = Dsrc.
void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
- int rt = instr->RtField();
- int rn = instr->RnField();
- int vm = instr->VmField();
+ if (instr->CoprocessorField() != 0xB) {
+ UNIMPLEMENTED(); // Not used by V8.
+ } else {
+ switch (instr->OpcodeField()) {
+ case 0x2:
+ // Load and store double to two GP registers
+ if (instr->Bits(7, 4) != 0x1) {
+ UNIMPLEMENTED(); // Not used by V8.
+ } else {
+ int rt = instr->RtField();
+ int rn = instr->RnField();
+ int vm = instr->VmField();
+ if (instr->HasL()) {
+ int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
+ int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
- if (instr->Bit(23) == 1) {
- UNIMPLEMENTED();
- } else if (instr->Bit(22) == 1) {
- if ((instr->Bits(27, 24) == 0xC) &&
- (instr->Bit(22) == 1) &&
- (instr->Bits(11, 8) == 0xB) &&
- (instr->Bits(7, 6) == 0x0) &&
- (instr->Bit(4) == 1)) {
- if (instr->Bit(20) == 0) {
- int32_t rs_val = get_register(rt);
- int32_t rn_val = get_register(rn);
-
- set_s_register_from_sinteger(2*vm, rs_val);
- set_s_register_from_sinteger((2*vm+1), rn_val);
-
- } else if (instr->Bit(20) == 1) {
- int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
- int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
-
- set_register(rt, rt_int_value);
- set_register(rn, rn_int_value);
+ set_register(rt, rt_int_value);
+ set_register(rn, rn_int_value);
+ } else {
+ int32_t rs_val = get_register(rt);
+ int32_t rn_val = get_register(rn);
+
+ set_s_register_from_sinteger(2*vm, rs_val);
+ set_s_register_from_sinteger((2*vm+1), rn_val);
+ }
+ }
+ break;
+ case 0x8:
+ case 0xC: { // Load and store double to memory.
+ int rn = instr->RnField();
+ int vd = instr->VdField();
+ int offset = instr->Immed8Field();
+ if (!instr->HasU()) {
+ offset = -offset;
+ }
+ int32_t address = get_register(rn) + 4 * offset;
+ if (instr->HasL()) {
+ // Load double from memory: vldr.
+ set_s_register_from_sinteger(2*vd, ReadW(address, instr));
+ set_s_register_from_sinteger(2*vd + 1, ReadW(address + 4, instr));
+ } else {
+ // Store double to memory: vstr.
+ WriteW(address, get_sinteger_from_s_register(2*vd), instr);
+ WriteW(address + 4, get_sinteger_from_s_register(2*vd + 1), instr);
+ }
+ break;
}
- } else {
- UNIMPLEMENTED();
+ default:
+ UNIMPLEMENTED(); // Not used by V8.
+ break;
}
- } else if (instr->Bit(21) == 1) {
- UNIMPLEMENTED();
- } else {
- UNIMPLEMENTED();
}
}
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 3ce5b7a6b..19737301a 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -63,8 +63,8 @@ class SimulatorStack : public v8::internal::AllStatic {
// Call the generated regexp code directly. The entry function pointer should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- entry(p0, p1, p2, p3, p4, p5, p6, p7)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+ entry(p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
@@ -79,9 +79,9 @@ class SimulatorStack : public v8::internal::AllStatic {
assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
assembler::arm::Simulator::current()->Call( \
- FUNCTION_ADDR(entry), 8, p0, p1, p2, p3, p4, p5, p6, p7)
+ FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 958842d2c..d19a683dc 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -362,6 +362,369 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
}
+static void GenerateCallFunction(MacroAssembler* masm,
+ Object* object,
+ const ParameterCount& arguments,
+ Label* miss) {
+ // ----------- S t a t e -------------
+ // -- r0: receiver
+ // -- r1: function to call
+ // -----------------------------------
+
+ // Check that the function really is a function.
+ __ BranchOnSmi(r1, miss);
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+ __ b(ne, miss);
+
+ // Patch the receiver on the stack with the global proxy if
+ // necessary.
+ if (object->IsGlobalObject()) {
+ __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+ __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize));
+ }
+
+ // Invoke the function.
+ __ InvokeFunction(r1, arguments, JUMP_FUNCTION);
+}
+
+
+static void GenerateCallConstFunction(MacroAssembler* masm,
+ JSFunction* function,
+ const ParameterCount& arguments) {
+ ASSERT(function->is_compiled());
+
+ // Get the function and setup the context.
+ __ mov(r1, Operand(Handle<JSFunction>(function)));
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+ // Jump to the cached code (tail call).
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ __ InvokeCode(code, expected, arguments,
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+}
+
+
+template <class Compiler>
+static void CompileLoadInterceptor(Compiler* compiler,
+ StubCompiler* stub_compiler,
+ MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ BranchOnSmi(receiver, miss);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ stub_compiler->CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, name, miss);
+
+ if (lookup->IsValid() && lookup->IsCacheable()) {
+ compiler->CompileCacheable(masm,
+ stub_compiler,
+ receiver,
+ reg,
+ scratch1,
+ scratch2,
+ holder,
+ lookup,
+ name,
+ miss);
+ } else {
+ compiler->CompileRegular(masm,
+ receiver,
+ reg,
+ scratch2,
+ holder,
+ miss);
+ }
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ JSObject* holder_obj) {
+ __ push(receiver);
+ __ push(holder);
+ __ push(name);
+ InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+ ASSERT(!Heap::InNewSpace(interceptor));
+
+ Register scratch = receiver;
+ __ mov(scratch, Operand(Handle<Object>(interceptor)));
+ __ push(scratch);
+ __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
+ __ push(scratch);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register name,
+ JSObject* holder_obj) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
+ __ mov(r0, Operand(5));
+ __ mov(r1, Operand(ref));
+
+ CEntryStub stub(1);
+ __ CallStub(&stub);
+}
+
+
+class LoadInterceptorCompiler BASE_EMBEDDED {
+ public:
+ explicit LoadInterceptorCompiler(Register name) : name_(name) {}
+
+ void CompileCacheable(MacroAssembler* masm,
+ StubCompiler* stub_compiler,
+ Register receiver,
+ Register holder,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ Label* miss_label) {
+ AccessorInfo* callback = 0;
+ bool optimize = false;
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ if (lookup->type() == FIELD) {
+ optimize = true;
+ } else if (lookup->type() == CALLBACKS) {
+ Object* callback_object = lookup->GetCallbackObject();
+ if (callback_object->IsAccessorInfo()) {
+ callback = AccessorInfo::cast(callback_object);
+ optimize = callback->getter() != NULL;
+ }
+ }
+
+ if (!optimize) {
+ CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+ return;
+ }
+
+ // Note: starting a frame here makes GC aware of pointers pushed below.
+ __ EnterInternalFrame();
+
+ if (lookup->type() == CALLBACKS) {
+ __ push(receiver);
+ }
+ __ push(holder);
+ __ push(name_);
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ Label interceptor_failed;
+ // Compare with no_interceptor_result_sentinel.
+ __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch1);
+ __ b(eq, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ Ret();
+
+ __ bind(&interceptor_failed);
+ __ pop(name_);
+ __ pop(holder);
+
+ if (lookup->type() == CALLBACKS) {
+ __ pop(receiver);
+ }
+
+ __ LeaveInternalFrame();
+
+ if (lookup->type() == FIELD) {
+ holder = stub_compiler->CheckPrototypes(holder_obj,
+ holder,
+ lookup->holder(),
+ scratch1,
+ scratch2,
+ name,
+ miss_label);
+ stub_compiler->GenerateFastPropertyLoad(masm,
+ r0,
+ holder,
+ lookup->holder(),
+ lookup->GetFieldIndex());
+ __ Ret();
+ } else {
+ ASSERT(lookup->type() == CALLBACKS);
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+ ASSERT(callback != NULL);
+ ASSERT(callback->getter() != NULL);
+
+ Label cleanup;
+ __ pop(scratch2);
+ __ push(receiver);
+ __ push(scratch2);
+
+ holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ &cleanup);
+
+ __ push(holder);
+ __ Move(holder, Handle<AccessorInfo>(callback));
+ __ push(holder);
+ __ ldr(scratch1, FieldMemOperand(holder, AccessorInfo::kDataOffset));
+ __ push(scratch1);
+ __ push(name_);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallRuntime(ref, 5, 1);
+
+ __ bind(&cleanup);
+ __ pop(scratch1);
+ __ pop(scratch2);
+ __ push(scratch1);
+ }
+ }
+
+
+ void CompileRegular(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register scratch,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ __ TailCallRuntime(ref, 5, 1);
+ }
+
+ private:
+ Register name_;
+};
+
+
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+ CallInterceptorCompiler(const ParameterCount& arguments, Register name)
+ : arguments_(arguments), argc_(arguments.immediate()), name_(name) {}
+
+ void CompileCacheable(MacroAssembler* masm,
+ StubCompiler* stub_compiler,
+ Register receiver,
+ Register holder,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ Label* miss_label) {
+ JSFunction* function = 0;
+ bool optimize = false;
+ // So far the most popular case for failed interceptor is
+ // CONSTANT_FUNCTION sitting below.
+ if (lookup->type() == CONSTANT_FUNCTION) {
+ function = lookup->GetConstantFunction();
+ // JSArray holder is a special case for call constant function
+ // (see the corresponding code).
+ if (function->is_compiled() && !holder_obj->IsJSArray()) {
+ optimize = true;
+ }
+ }
+
+ if (!optimize) {
+ CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+ return;
+ }
+
+ // Constant functions cannot sit on global object.
+ ASSERT(!lookup->holder()->IsGlobalObject());
+
+ __ EnterInternalFrame();
+ __ push(holder); // Save the holder.
+ __ push(name_); // Save the name.
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ ASSERT(!r0.is(name_));
+ ASSERT(!r0.is(scratch1));
+ __ pop(name_); // Restore the name.
+ __ pop(scratch1); // Restore the holder.
+ __ LeaveInternalFrame();
+
+ // Compare with no_interceptor_result_sentinel.
+ __ LoadRoot(scratch2, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch2);
+ Label invoke;
+ __ b(ne, &invoke);
+
+ stub_compiler->CheckPrototypes(holder_obj, scratch1,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ miss_label);
+ GenerateCallConstFunction(masm, function, arguments_);
+
+ __ bind(&invoke);
+ }
+
+ void CompileRegular(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register scratch,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ __ EnterInternalFrame();
+ // Save the name_ register across the call.
+ __ push(name_);
+
+ PushInterceptorArguments(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
+ __ mov(r0, Operand(5));
+ __ mov(r1, Operand(ref));
+
+ CEntryStub stub(1);
+ __ CallStub(&stub);
+
+ // Restore the name_ register.
+ __ pop(name_);
+ __ LeaveInternalFrame();
+ }
+
+ private:
+ const ParameterCount& arguments_;
+ int argc_;
+ Register name_;
+};
+
+
#undef __
#define __ ACCESS_MASM(masm())
@@ -491,30 +854,18 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
Register scratch2,
String* name,
Label* miss) {
- // Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
-
- // Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
-
- // Push the arguments on the JS stack of the caller.
- __ push(receiver); // receiver
- __ push(reg); // holder
- __ push(name_reg); // name
-
- InterceptorInfo* interceptor = holder->GetNamedInterceptor();
- ASSERT(!Heap::InNewSpace(interceptor));
- __ mov(scratch1, Operand(Handle<Object>(interceptor)));
- __ push(scratch1);
- __ ldr(scratch2, FieldMemOperand(scratch1, InterceptorInfo::kDataOffset));
- __ push(scratch2);
-
- // Do tail-call to the runtime system.
- ExternalReference load_ic_property =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
- __ TailCallRuntime(load_ic_property, 5, 1);
+ LoadInterceptorCompiler compiler(name_reg);
+ CompileLoadInterceptor(&compiler,
+ this,
+ masm(),
+ object,
+ holder,
+ name,
+ lookup,
+ receiver,
+ scratch1,
+ scratch2,
+ miss);
}
@@ -572,22 +923,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
CheckPrototypes(JSObject::cast(object), r0, holder, r3, r2, name, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
- // Check that the function really is a function.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
- // Get the map.
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
-
- // Invoke the function.
- __ InvokeFunction(r1, arguments(), JUMP_FUNCTION);
+ GenerateCallFunction(masm(), object, arguments(), &miss);
// Handle call cache miss.
__ bind(&miss);
@@ -637,50 +973,65 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
- // Check that the object is a two-byte string or a symbol.
- __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(hs, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- r2);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
- r1, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ // Check that the object is a two-byte string or a symbol.
+ __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(hs, &miss);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ r2);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+ r1, name, &miss);
+ }
break;
case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &fast);
- __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::NUMBER_FUNCTION_INDEX,
- r2);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
- r1, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &fast);
+ __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
+ __ b(ne, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::NUMBER_FUNCTION_INDEX,
+ r2);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+ r1, name, &miss);
+ }
break;
}
case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &fast);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::BOOLEAN_FUNCTION_INDEX,
- r2);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
- r1, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a boolean.
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r1, ip);
+ __ b(eq, &fast);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r1, ip);
+ __ b(ne, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::BOOLEAN_FUNCTION_INDEX,
+ r2);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+ r1, name, &miss);
+ }
break;
}
@@ -700,16 +1051,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
UNREACHABLE();
}
- // Get the function and setup the context.
- __ mov(r1, Operand(Handle<JSFunction>(function)));
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- ASSERT(function->is_compiled());
- Handle<Code> code(function->code());
- ParameterCount expected(function->shared()->formal_parameter_count());
- __ InvokeCode(code, expected, arguments(),
- RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+ GenerateCallConstFunction(masm(), function, arguments());
// Handle call cache miss.
__ bind(&miss);
@@ -733,7 +1075,34 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
// -----------------------------------
Label miss;
- // TODO(1224669): Implement.
+ // Get the number of arguments.
+ const int argc = arguments().immediate();
+
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+
+ // Get the receiver from the stack into r0.
+ __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+ // Load the name from the stack into r1.
+ __ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
+
+ CallInterceptorCompiler compiler(arguments(), r1);
+ CompileLoadInterceptor(&compiler,
+ this,
+ masm(),
+ JSObject::cast(object),
+ holder,
+ name,
+ &lookup,
+ r0,
+ r2,
+ r3,
+ &miss);
+
+ // Restore receiver.
+ __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+
+ GenerateCallFunction(masm(), object, arguments(), &miss);
// Handle call cache miss.
__ bind(&miss);
@@ -906,7 +1275,6 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Handle store cache miss.
__ bind(&miss);
- __ mov(r2, Operand(Handle<String>(name))); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -958,7 +1326,6 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Handle store cache miss.
__ bind(&miss);
- __ mov(r2, Operand(Handle<String>(name))); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -1084,7 +1451,7 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
__ ldr(r0, MemOperand(sp, 0));
LookupResult lookup;
- holder->LocalLookupRealNamedProperty(name, &lookup);
+ LookupPostInterceptor(holder, name, &lookup);
GenerateLoadInterceptor(object,
holder,
&lookup,
@@ -1250,7 +1617,7 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
__ b(ne, &miss);
LookupResult lookup;
- holder->LocalLookupRealNamedProperty(name, &lookup);
+ LookupPostInterceptor(holder, name, &lookup);
GenerateLoadInterceptor(receiver,
holder,
&lookup,
diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc
index a33ebd420..7a8ac7266 100644
--- a/deps/v8/src/arm/virtual-frame-arm.cc
+++ b/deps/v8/src/arm/virtual-frame-arm.cc
@@ -219,36 +219,15 @@ void VirtualFrame::PushTryHandler(HandlerType type) {
}
-void VirtualFrame::RawCallStub(CodeStub* stub) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallStub(stub);
-}
-
-
-void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
- PrepareForCall(0, 0);
- arg->Unuse();
- RawCallStub(stub);
-}
-
-
-void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
- PrepareForCall(0, 0);
- arg0->Unuse();
- arg1->Unuse();
- RawCallStub(stub);
-}
-
-
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
- PrepareForCall(arg_count, arg_count);
+ Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
}
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- PrepareForCall(arg_count, arg_count);
+ Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count);
}
@@ -257,102 +236,34 @@ void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
int arg_count) {
- PrepareForCall(arg_count, arg_count);
+ Forget(arg_count);
__ InvokeBuiltin(id, flags);
}
-void VirtualFrame::RawCallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ Call(code, rmode);
-}
-
-
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
int dropped_args) {
- int spilled_args = 0;
switch (code->kind()) {
case Code::CALL_IC:
- spilled_args = dropped_args + 1;
- break;
case Code::FUNCTION:
- spilled_args = dropped_args + 1;
break;
case Code::KEYED_LOAD_IC:
- ASSERT(dropped_args == 0);
- spilled_args = 2;
- break;
- default:
- // The other types of code objects are called with values
- // in specific registers, and are handled in functions with
- // a different signature.
- UNREACHABLE();
- break;
- }
- PrepareForCall(spilled_args, dropped_args);
- RawCallCodeObject(code, rmode);
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args) {
- int spilled_args = 0;
- switch (code->kind()) {
case Code::LOAD_IC:
- ASSERT(arg->reg().is(r2));
- ASSERT(dropped_args == 0);
- spilled_args = 1;
- break;
case Code::KEYED_STORE_IC:
- ASSERT(arg->reg().is(r0));
- ASSERT(dropped_args == 0);
- spilled_args = 2;
- break;
- default:
- // No other types of code objects are called with values
- // in exactly one register.
- UNREACHABLE();
- break;
- }
- PrepareForCall(spilled_args, dropped_args);
- arg->Unuse();
- RawCallCodeObject(code, rmode);
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
- int dropped_args) {
- int spilled_args = 1;
- switch (code->kind()) {
case Code::STORE_IC:
- ASSERT(arg0->reg().is(r0));
- ASSERT(arg1->reg().is(r2));
ASSERT(dropped_args == 0);
- spilled_args = 1;
break;
case Code::BUILTIN:
ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall));
- ASSERT(arg0->reg().is(r0));
- ASSERT(arg1->reg().is(r1));
- spilled_args = dropped_args + 1;
break;
default:
- // No other types of code objects are called with values
- // in exactly two registers.
UNREACHABLE();
break;
}
- PrepareForCall(spilled_args, dropped_args);
- arg0->Unuse();
- arg1->Unuse();
- RawCallCodeObject(code, rmode);
+ Forget(dropped_args);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ Call(code, rmode);
}
diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h
index b2f0eea60..9a2f7d360 100644
--- a/deps/v8/src/arm/virtual-frame-arm.h
+++ b/deps/v8/src/arm/virtual-frame-arm.h
@@ -287,18 +287,11 @@ class VirtualFrame : public ZoneObject {
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
void CallStub(CodeStub* stub, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- RawCallStub(stub);
+ Forget(arg_count);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ masm()->CallStub(stub);
}
- // Call stub that expects its argument in r0. The argument is given
- // as a result which must be the register r0.
- void CallStub(CodeStub* stub, Result* arg);
-
- // Call stub that expects its arguments in r1 and r0. The arguments
- // are given as results which must be the appropriate registers.
- void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
void CallRuntime(Runtime::Function* f, int arg_count);
@@ -311,19 +304,10 @@ class VirtualFrame : public ZoneObject {
int arg_count);
// Call into an IC stub given the number of arguments it removes
- // from the stack. Register arguments are passed as results and
- // consumed by the call.
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- int dropped_args);
+ // from the stack. Register arguments to the IC stub are implicit,
+ // and depend on the type of IC stub.
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args);
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
int dropped_args);
// Drop a number of elements from the top of the expression stack. May
@@ -511,14 +495,6 @@ class VirtualFrame : public ZoneObject {
// Register counts are correctly updated.
int InvalidateFrameSlotAt(int index);
- // Call a code stub that has already been prepared for calling (via
- // PrepareForCall).
- void RawCallStub(CodeStub* stub);
-
- // Calls a code object which has already been prepared for calling
- // (via PrepareForCall).
- void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.