summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm')
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h3
-rw-r--r--deps/v8/src/arm/assembler-arm.cc7
-rw-r--r--deps/v8/src/arm/assembler-arm.h9
-rw-r--r--deps/v8/src/arm/builtins-arm.cc5
-rw-r--r--deps/v8/src/arm/codegen-arm-inl.h46
-rw-r--r--deps/v8/src/arm/codegen-arm.cc586
-rw-r--r--deps/v8/src/arm/codegen-arm.h11
-rw-r--r--deps/v8/src/arm/constants-arm.h3
-rw-r--r--deps/v8/src/arm/cpu-arm.cc3
-rw-r--r--deps/v8/src/arm/debug-arm.cc3
-rw-r--r--deps/v8/src/arm/disasm-arm.cc3
-rw-r--r--deps/v8/src/arm/frames-arm.cc3
-rw-r--r--deps/v8/src/arm/frames-arm.h3
-rw-r--r--deps/v8/src/arm/ic-arm.cc13
-rw-r--r--deps/v8/src/arm/jump-target-arm.cc147
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc28
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h3
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc3
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h3
-rw-r--r--deps/v8/src/arm/register-allocator-arm-inl.h103
-rw-r--r--deps/v8/src/arm/register-allocator-arm.cc55
-rw-r--r--deps/v8/src/arm/register-allocator-arm.h43
-rw-r--r--deps/v8/src/arm/simulator-arm.cc3
-rw-r--r--deps/v8/src/arm/simulator-arm.h3
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc44
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.cc77
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.h198
27 files changed, 850 insertions, 558 deletions
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index fe64761e3..824a5fda5 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -41,7 +41,8 @@
#include "cpu.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
Condition NegateCondition(Condition cc) {
ASSERT(cc != al);
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 191c865a8..6ec8f460b 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -39,7 +39,8 @@
#include "arm/assembler-arm-inl.h"
#include "serialize.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -----------------------------------------------------------------------------
// Implementation of Register and CRegister
@@ -211,6 +212,7 @@ enum {
// Instruction bit masks
RdMask = 15 << 12, // in str instruction
CondMask = 15 << 28,
+ CoprocessorMask = 15 << 8,
OpCodeMask = 15 << 21, // in data-processing instructions
Imm24Mask = (1 << 24) - 1,
Off12Mask = (1 << 12) - 1,
@@ -616,7 +618,8 @@ void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
// unindexed addressing is not encoded by this function
- ASSERT((instr & ~(CondMask | P | U | N | W | L)) == (B27 | B26));
+ ASSERT_EQ((B27 | B26),
+ (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
int am = x.am_;
int offset_8 = x.offset_;
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index d7535e0da..eeab4a72c 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -42,7 +42,8 @@
#include "assembler.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// CPU Registers.
//
@@ -83,8 +84,6 @@ struct Register {
};
-const int kNumRegisters = 16;
-
extern Register no_reg;
extern Register r0;
extern Register r1;
@@ -622,8 +621,8 @@ class Assembler : public Malloced {
// Pseudo instructions
void nop() { mov(r0, Operand(r0)); }
- void push(Register src) {
- str(src, MemOperand(sp, 4, NegPreIndex), al);
+ void push(Register src, Condition cond = al) {
+ str(src, MemOperand(sp, 4, NegPreIndex), cond);
}
void pop(Register dst) {
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 9c7a42ab1..588798bde 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -31,7 +31,8 @@
#include "debug.h"
#include "runtime.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#define __ ACCESS_MASM(masm)
@@ -187,7 +188,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0));
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
+ __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/arm/codegen-arm-inl.h b/deps/v8/src/arm/codegen-arm-inl.h
new file mode 100644
index 000000000..544331a52
--- /dev/null
+++ b/deps/v8/src/arm/codegen-arm-inl.h
@@ -0,0 +1,46 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_ARM_CODEGEN_ARM_INL_H_
+#define V8_ARM_CODEGEN_ARM_INL_H_
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Platform-specific inline functions.
+
+void DeferredCode::Jump() { __ jmp(&entry_label_); }
+void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_CODEGEN_ARM_INL_H_
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 1930a7c2f..7428d3b59 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -36,10 +36,39 @@
#include "scopes.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#define __ ACCESS_MASM(masm_)
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ int action = registers_[i];
+ if (action == kPush) {
+ __ push(RegisterAllocator::ToRegister(i));
+ } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
+ __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
+ }
+ }
+}
+
+
+void DeferredCode::RestoreRegisters() {
+ // Restore registers in reverse order due to the stack.
+ for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
+ int action = registers_[i];
+ if (action == kPush) {
+ __ pop(RegisterAllocator::ToRegister(i));
+ } else if (action != kIgnore) {
+ action &= ~kSyncedFlag;
+ __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
+ }
+ }
+}
+
// -------------------------------------------------------------------------
// CodeGenState implementation.
@@ -108,7 +137,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
RegisterAllocator register_allocator(this);
allocator_ = &register_allocator;
ASSERT(frame_ == NULL);
- frame_ = new VirtualFrame(this);
+ frame_ = new VirtualFrame();
cc_reg_ = al;
set_in_spilled_code(false);
{
@@ -133,13 +162,13 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
#endif
// Allocate space for locals and initialize them.
- frame_->AllocateStackSlots(scope_->num_stack_slots());
+ frame_->AllocateStackSlots();
// Initialize the function return target after the locals are set
// up, because it needs the expected frame height from the frame.
- function_return_.Initialize(this, JumpTarget::BIDIRECTIONAL);
+ function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
function_return_is_shadowed_ = false;
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
if (scope_->num_heap_slots() > 0) {
// Allocate local context.
// Get outer context and create a new context based on it.
@@ -148,7 +177,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
frame_->CallRuntime(Runtime::kNewContext, 1); // r0 holds the result
#ifdef DEBUG
- JumpTarget verified_true(this);
+ JumpTarget verified_true;
__ cmp(r0, Operand(cp));
verified_true.Branch(eq);
__ stop("NewContext: r0 is expected to be the same as cp");
@@ -288,9 +317,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
DeleteFrame();
// Process any deferred code using the register allocator.
- if (HasStackOverflow()) {
- ClearDeferred();
- } else {
+ if (!HasStackOverflow()) {
ProcessDeferred();
}
@@ -456,14 +483,14 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
int original_height = frame_->height();
#endif
ASSERT(!in_spilled_code());
- JumpTarget true_target(this);
- JumpTarget false_target(this);
+ JumpTarget true_target;
+ JumpTarget false_target;
LoadCondition(x, typeof_state, &true_target, &false_target, false);
if (has_cc()) {
// Convert cc_reg_ into a boolean value.
- JumpTarget loaded(this);
- JumpTarget materialize_true(this);
+ JumpTarget loaded;
+ JumpTarget materialize_true;
materialize_true.Branch(cc_reg_);
__ mov(r0, Operand(Factory::false_value()));
frame_->EmitPush(r0);
@@ -478,7 +505,7 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
if (true_target.is_linked() || false_target.is_linked()) {
// We have at least one condition value that has been "translated"
// into a branch, thus it needs to be loaded explicitly.
- JumpTarget loaded(this);
+ JumpTarget loaded;
if (frame_ != NULL) {
loaded.Jump(); // Don't lose the current TOS.
}
@@ -510,14 +537,14 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
void CodeGenerator::LoadGlobal() {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
__ ldr(r0, GlobalObject());
frame_->EmitPush(r0);
}
void CodeGenerator::LoadGlobalReceiver(Register scratch) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
__ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
__ ldr(scratch,
FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
@@ -529,7 +556,7 @@ void CodeGenerator::LoadGlobalReceiver(Register scratch) {
// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
// variables w/o reference errors elsewhere.
void CodeGenerator::LoadTypeofExpression(Expression* x) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Variable* variable = x->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
// NOTE: This is somewhat nasty. We force the compiler to load
@@ -559,7 +586,7 @@ Reference::~Reference() {
void CodeGenerator::LoadReference(Reference* ref) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ LoadReference");
Expression* e = ref->expression();
Property* property = e->AsProperty();
@@ -602,7 +629,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
void CodeGenerator::UnloadReference(Reference* ref) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
// Pop a reference from the stack while preserving TOS.
Comment cmnt(masm_, "[ UnloadReference");
int size = ref->size();
@@ -619,7 +646,7 @@ void CodeGenerator::UnloadReference(Reference* ref) {
// may jump to 'false_target' in case the register converts to 'false'.
void CodeGenerator::ToBoolean(JumpTarget* true_target,
JumpTarget* false_target) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
// Note: The generated code snippet does not change stack variables.
// Only the condition code should be set.
frame_->EmitPop(r0);
@@ -701,7 +728,7 @@ class GenericBinaryOpStub : public CodeStub {
void CodeGenerator::GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
// sp[0] : y
// sp[1] : x
// result : r0
@@ -756,13 +783,11 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
class DeferredInlineSmiOperation: public DeferredCode {
public:
- DeferredInlineSmiOperation(CodeGenerator* generator,
- Token::Value op,
+ DeferredInlineSmiOperation(Token::Value op,
int value,
bool reversed,
OverwriteMode overwrite_mode)
- : DeferredCode(generator),
- op_(op),
+ : op_(op),
value_(value),
reversed_(reversed),
overwrite_mode_(overwrite_mode) {
@@ -780,17 +805,13 @@ class DeferredInlineSmiOperation: public DeferredCode {
void DeferredInlineSmiOperation::Generate() {
- enter()->Bind();
- VirtualFrame::SpilledScope spilled_scope(generator());
-
switch (op_) {
case Token::ADD: {
+ // Revert optimistic add.
if (reversed_) {
- // revert optimistic add
__ sub(r0, r0, Operand(Smi::FromInt(value_)));
__ mov(r1, Operand(Smi::FromInt(value_)));
} else {
- // revert optimistic add
__ sub(r1, r0, Operand(Smi::FromInt(value_)));
__ mov(r0, Operand(Smi::FromInt(value_)));
}
@@ -798,8 +819,8 @@ void DeferredInlineSmiOperation::Generate() {
}
case Token::SUB: {
+ // Revert optimistic sub.
if (reversed_) {
- // revert optimistic sub
__ rsb(r0, r0, Operand(Smi::FromInt(value_)));
__ mov(r1, Operand(Smi::FromInt(value_)));
} else {
@@ -828,24 +849,19 @@ void DeferredInlineSmiOperation::Generate() {
__ mov(r1, Operand(r0));
__ mov(r0, Operand(Smi::FromInt(value_)));
} else {
- UNREACHABLE(); // should have been handled in SmiOperation
+ UNREACHABLE(); // Should have been handled in SmiOperation.
}
break;
}
default:
- // other cases should have been handled before this point.
+ // Other cases should have been handled before this point.
UNREACHABLE();
break;
}
- GenericBinaryOpStub igostub(op_, overwrite_mode_);
- Result arg0 = generator()->allocator()->Allocate(r1);
- ASSERT(arg0.is_valid());
- Result arg1 = generator()->allocator()->Allocate(r0);
- ASSERT(arg1.is_valid());
- generator()->frame()->CallStub(&igostub, &arg0, &arg1);
- exit_.Jump();
+ GenericBinaryOpStub stub(op_, overwrite_mode_);
+ __ CallStub(&stub);
}
@@ -853,7 +869,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed,
OverwriteMode mode) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a literal smi. With this optimization, the
@@ -865,34 +881,34 @@ void CodeGenerator::SmiOperation(Token::Value op,
int int_value = Smi::cast(*value)->value();
- JumpTarget exit(this);
+ JumpTarget exit;
frame_->EmitPop(r0);
switch (op) {
case Token::ADD: {
DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode);
__ add(r0, r0, Operand(value), SetCC);
- deferred->enter()->Branch(vs);
+ deferred->Branch(vs);
__ tst(r0, Operand(kSmiTagMask));
- deferred->enter()->Branch(ne);
+ deferred->Branch(ne);
deferred->BindExit();
break;
}
case Token::SUB: {
DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode);
- if (!reversed) {
- __ sub(r0, r0, Operand(value), SetCC);
- } else {
+ if (reversed) {
__ rsb(r0, r0, Operand(value), SetCC);
+ } else {
+ __ sub(r0, r0, Operand(value), SetCC);
}
- deferred->enter()->Branch(vs);
+ deferred->Branch(vs);
__ tst(r0, Operand(kSmiTagMask));
- deferred->enter()->Branch(ne);
+ deferred->Branch(ne);
deferred->BindExit();
break;
}
@@ -901,9 +917,9 @@ void CodeGenerator::SmiOperation(Token::Value op,
case Token::BIT_XOR:
case Token::BIT_AND: {
DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode);
__ tst(r0, Operand(kSmiTagMask));
- deferred->enter()->Branch(ne);
+ deferred->Branch(ne);
switch (op) {
case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break;
case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
@@ -926,16 +942,16 @@ void CodeGenerator::SmiOperation(Token::Value op,
} else {
int shift_value = int_value & 0x1f; // least significant 5 bits
DeferredCode* deferred =
- new DeferredInlineSmiOperation(this, op, shift_value, false, mode);
+ new DeferredInlineSmiOperation(op, shift_value, false, mode);
__ tst(r0, Operand(kSmiTagMask));
- deferred->enter()->Branch(ne);
+ deferred->Branch(ne);
__ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
switch (op) {
case Token::SHL: {
__ mov(r2, Operand(r2, LSL, shift_value));
// check that the *unsigned* result fits in a smi
__ add(r3, r2, Operand(0x40000000), SetCC);
- deferred->enter()->Branch(mi);
+ deferred->Branch(mi);
break;
}
case Token::SHR: {
@@ -950,7 +966,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
// smi tagging these two cases can only happen with shifts
// by 0 or 1 when handed a valid smi
__ and_(r3, r2, Operand(0xc0000000), SetCC);
- deferred->enter()->Branch(ne);
+ deferred->Branch(ne);
break;
}
case Token::SAR: {
@@ -987,7 +1003,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
void CodeGenerator::Comparison(Condition cc, bool strict) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
// sp[0] : y
// sp[1] : x
// result : cc register
@@ -995,8 +1011,8 @@ void CodeGenerator::Comparison(Condition cc, bool strict) {
// Strict only makes sense for equality comparisons.
ASSERT(!strict || cc == eq);
- JumpTarget exit(this);
- JumpTarget smi(this);
+ JumpTarget exit;
+ JumpTarget smi;
// Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
if (cc == gt || cc == le) {
cc = ReverseCondition(cc);
@@ -1057,12 +1073,14 @@ void CodeGenerator::Comparison(Condition cc, bool strict) {
class CallFunctionStub: public CodeStub {
public:
- explicit CallFunctionStub(int argc) : argc_(argc) {}
+ CallFunctionStub(int argc, InLoopFlag in_loop)
+ : argc_(argc), in_loop_(in_loop) {}
void Generate(MacroAssembler* masm);
private:
int argc_;
+ InLoopFlag in_loop_;
#if defined(DEBUG)
void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
@@ -1070,13 +1088,14 @@ class CallFunctionStub: public CodeStub {
Major MajorKey() { return CallFunction; }
int MinorKey() { return argc_; }
+ InLoopFlag InLoop() { return in_loop_; }
};
// Call the function on the stack with the given arguments.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
int position) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
// Push the arguments ("left-to-right") on the stack.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
@@ -1087,7 +1106,8 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
CodeForSourcePosition(position);
// Use the shared code stub to call the function.
- CallFunctionStub call_function(arg_count);
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
frame_->CallStub(&call_function, arg_count + 1);
// Restore context and pop function from the stack.
@@ -1097,7 +1117,7 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(has_cc());
Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
target->Branch(cc);
@@ -1106,7 +1126,7 @@ void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
void CodeGenerator::CheckStack() {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
if (FLAG_check_stack) {
Comment cmnt(masm_, "[ check stack");
StackCheckStub stub;
@@ -1141,7 +1161,7 @@ void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
VisitAndSpill(statements->at(i));
}
@@ -1153,10 +1173,10 @@ void CodeGenerator::VisitBlock(Block* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Block");
CodeForStatementPosition(node);
- node->break_target()->Initialize(this);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
VisitStatementsAndSpill(node->statements());
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
@@ -1167,7 +1187,7 @@ void CodeGenerator::VisitBlock(Block* node) {
void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
__ mov(r0, Operand(pairs));
frame_->EmitPush(r0);
frame_->EmitPush(cp);
@@ -1182,7 +1202,7 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Declaration");
CodeForStatementPosition(node);
Variable* var = node->proxy()->var();
@@ -1254,7 +1274,7 @@ void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ExpressionStatement");
CodeForStatementPosition(node);
Expression* expression = node->expression();
@@ -1269,7 +1289,7 @@ void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "// EmptyStatement");
CodeForStatementPosition(node);
// nothing to do
@@ -1281,7 +1301,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ IfStatement");
// Generate different code depending on which parts of the if statement
// are present or not.
@@ -1290,11 +1310,11 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
CodeForStatementPosition(node);
- JumpTarget exit(this);
+ JumpTarget exit;
if (has_then_stm && has_else_stm) {
Comment cmnt(masm_, "[ IfThenElse");
- JumpTarget then(this);
- JumpTarget else_(this);
+ JumpTarget then;
+ JumpTarget else_;
// if (cond)
LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
&then, &else_, true);
@@ -1318,7 +1338,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
} else if (has_then_stm) {
Comment cmnt(masm_, "[ IfThen");
ASSERT(!has_else_stm);
- JumpTarget then(this);
+ JumpTarget then;
// if (cond)
LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
&then, &exit, true);
@@ -1334,7 +1354,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
} else if (has_else_stm) {
Comment cmnt(masm_, "[ IfElse");
ASSERT(!has_then_stm);
- JumpTarget else_(this);
+ JumpTarget else_;
// if (!cond)
LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
&exit, &else_, true);
@@ -1371,7 +1391,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ContinueStatement");
CodeForStatementPosition(node);
node->target()->continue_target()->Jump();
@@ -1379,7 +1399,7 @@ void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ BreakStatement");
CodeForStatementPosition(node);
node->target()->break_target()->Jump();
@@ -1387,7 +1407,7 @@ void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ReturnStatement");
if (function_return_is_shadowed_) {
@@ -1414,7 +1434,7 @@ void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ WithEnterStatement");
CodeForStatementPosition(node);
LoadAndSpill(node->expression());
@@ -1424,7 +1444,7 @@ void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
frame_->CallRuntime(Runtime::kPushContext, 1);
}
#ifdef DEBUG
- JumpTarget verified_true(this);
+ JumpTarget verified_true;
__ cmp(r0, Operand(cp));
verified_true.Branch(eq);
__ stop("PushContext: r0 is expected to be the same as cp");
@@ -1440,7 +1460,7 @@ void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ WithExitStatement");
CodeForStatementPosition(node);
// Pop context.
@@ -1467,9 +1487,9 @@ void CodeGenerator::GenerateFastCaseSwitchJumpTable(
Label* default_label,
Vector<Label*> case_targets,
Vector<Label> case_labels) {
- VirtualFrame::SpilledScope spilled_scope(this);
- JumpTarget setup_default(this);
- JumpTarget is_smi(this);
+ VirtualFrame::SpilledScope spilled_scope;
+ JumpTarget setup_default;
+ JumpTarget is_smi;
// A non-null default label pointer indicates a default case among
// the case labels. Otherwise we use the break target as a
@@ -1529,8 +1549,6 @@ void CodeGenerator::GenerateFastCaseSwitchJumpTable(
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
}
-
- delete start_frame;
}
@@ -1538,10 +1556,10 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ SwitchStatement");
CodeForStatementPosition(node);
- node->break_target()->Initialize(this);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
LoadAndSpill(node->tag());
if (TryGenerateFastCaseSwitchStatement(node)) {
@@ -1549,10 +1567,10 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
return;
}
- JumpTarget next_test(this);
- JumpTarget fall_through(this);
- JumpTarget default_entry(this);
- JumpTarget default_exit(this, JumpTarget::BIDIRECTIONAL);
+ JumpTarget next_test;
+ JumpTarget fall_through;
+ JumpTarget default_entry;
+ JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
CaseClause* default_clause = NULL;
@@ -1632,10 +1650,10 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ LoopStatement");
CodeForStatementPosition(node);
- node->break_target()->Initialize(this);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
// Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
// known result for the test expression, with no side effects.
@@ -1656,19 +1674,19 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
switch (node->type()) {
case LoopStatement::DO_LOOP: {
- JumpTarget body(this, JumpTarget::BIDIRECTIONAL);
+ JumpTarget body(JumpTarget::BIDIRECTIONAL);
// Label the top of the loop for the backward CFG edge. If the test
// is always true we can use the continue target, and if the test is
// always false there is no need.
if (info == ALWAYS_TRUE) {
- node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
node->continue_target()->Bind();
} else if (info == ALWAYS_FALSE) {
- node->continue_target()->Initialize(this);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
} else {
ASSERT(info == DONT_KNOW);
- node->continue_target()->Initialize(this);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
body.Bind();
}
@@ -1715,11 +1733,11 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
// Label the top of the loop with the continue target for the backward
// CFG edge.
- node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
node->continue_target()->Bind();
if (info == DONT_KNOW) {
- JumpTarget body(this);
+ JumpTarget body;
LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
&body, node->break_target(), true);
if (has_valid_frame()) {
@@ -1745,7 +1763,7 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
}
case LoopStatement::FOR_LOOP: {
- JumpTarget loop(this, JumpTarget::BIDIRECTIONAL);
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
if (node->init() != NULL) {
VisitAndSpill(node->init());
@@ -1757,16 +1775,16 @@ void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
// If there is no update statement, label the top of the loop with the
// continue target, otherwise with the loop target.
if (node->next() == NULL) {
- node->continue_target()->Initialize(this, JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
node->continue_target()->Bind();
} else {
- node->continue_target()->Initialize(this);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
loop.Bind();
}
// If the test is always true, there is no need to compile it.
if (info == DONT_KNOW) {
- JumpTarget body(this);
+ JumpTarget body;
LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
&body, node->break_target(), true);
if (has_valid_frame()) {
@@ -1822,16 +1840,16 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
int original_height = frame_->height();
#endif
ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ForInStatement");
CodeForStatementPosition(node);
- JumpTarget primitive(this);
- JumpTarget jsobject(this);
- JumpTarget fixed_array(this);
- JumpTarget entry(this, JumpTarget::BIDIRECTIONAL);
- JumpTarget end_del_check(this);
- JumpTarget exit(this);
+ JumpTarget primitive;
+ JumpTarget jsobject;
+ JumpTarget fixed_array;
+ JumpTarget entry(JumpTarget::BIDIRECTIONAL);
+ JumpTarget end_del_check;
+ JumpTarget exit;
// Get the object to enumerate over (converted to JSObject).
LoadAndSpill(node->enumerable());
@@ -1916,8 +1934,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// sp[4] : enumerable
// Grab the current frame's height for the break and continue
// targets only after all the state is pushed on the frame.
- node->break_target()->Initialize(this);
- node->continue_target()->Initialize(this);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
__ ldr(r0, frame_->ElementAt(0)); // load the current count
__ ldr(r1, frame_->ElementAt(1)); // load the length
@@ -2016,12 +2034,12 @@ void CodeGenerator::VisitTryCatch(TryCatch* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ TryCatch");
CodeForStatementPosition(node);
- JumpTarget try_block(this);
- JumpTarget exit(this);
+ JumpTarget try_block;
+ JumpTarget exit;
try_block.Call();
// --- Catch block ---
@@ -2132,7 +2150,6 @@ void CodeGenerator::VisitTryCatch(TryCatch* node) {
}
shadows[i]->other_target()->Jump();
}
- delete shadows[i];
}
exit.Bind();
@@ -2144,7 +2161,7 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ TryFinally");
CodeForStatementPosition(node);
@@ -2153,8 +2170,8 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
// break/continue from within the try block.
enum { FALLING, THROWING, JUMPING };
- JumpTarget try_block(this);
- JumpTarget finally_block(this);
+ JumpTarget try_block;
+ JumpTarget finally_block;
try_block.Call();
@@ -2299,7 +2316,7 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
JumpTarget* original = shadows[i]->other_target();
__ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
- JumpTarget skip(this);
+ JumpTarget skip;
skip.Branch(ne);
frame_->PrepareForReturn();
original->Jump();
@@ -2308,12 +2325,11 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
original->Branch(eq);
}
}
- delete shadows[i];
}
if (has_valid_frame()) {
// Check if we need to rethrow the exception.
- JumpTarget exit(this);
+ JumpTarget exit;
__ cmp(r2, Operand(Smi::FromInt(THROWING)));
exit.Branch(ne);
@@ -2332,7 +2348,7 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ DebuggerStatament");
CodeForStatementPosition(node);
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -2344,7 +2360,7 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(boilerplate->IsBoilerplate());
// Push the boilerplate on the stack.
@@ -2362,7 +2378,7 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
@@ -2382,7 +2398,7 @@ void CodeGenerator::VisitFunctionBoilerplateLiteral(
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
InstantiateBoilerplate(node->boilerplate());
ASSERT(frame_->height() == original_height + 1);
@@ -2393,11 +2409,11 @@ void CodeGenerator::VisitConditional(Conditional* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Conditional");
- JumpTarget then(this);
- JumpTarget else_(this);
- JumpTarget exit(this);
+ JumpTarget then;
+ JumpTarget else_;
+ JumpTarget exit;
LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
&then, &else_, true);
Branch(false, &else_);
@@ -2412,12 +2428,12 @@ void CodeGenerator::VisitConditional(Conditional* node) {
void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
if (slot->type() == Slot::LOOKUP) {
ASSERT(slot->var()->is_dynamic());
- JumpTarget slow(this);
- JumpTarget done(this);
+ JumpTarget slow;
+ JumpTarget done;
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
@@ -2565,7 +2581,7 @@ void CodeGenerator::VisitSlot(Slot* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Slot");
LoadFromSlot(node, typeof_state());
ASSERT(frame_->height() == original_height + 1);
@@ -2576,7 +2592,7 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ VariableProxy");
Variable* var = node->var();
@@ -2596,7 +2612,7 @@ void CodeGenerator::VisitLiteral(Literal* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Literal");
__ mov(r0, Operand(node->handle()));
frame_->EmitPush(r0);
@@ -2608,7 +2624,7 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ RexExp Literal");
// Retrieve the literal array and check the allocated entry.
@@ -2624,7 +2640,7 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
__ ldr(r2, FieldMemOperand(r1, literal_offset));
- JumpTarget done(this);
+ JumpTarget done;
__ cmp(r2, Operand(Factory::undefined_value()));
done.Branch(ne);
@@ -2653,8 +2669,7 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
// therefore context dependent.
class DeferredObjectLiteral: public DeferredCode {
public:
- DeferredObjectLiteral(CodeGenerator* generator, ObjectLiteral* node)
- : DeferredCode(generator), node_(node) {
+ explicit DeferredObjectLiteral(ObjectLiteral* node) : node_(node) {
set_comment("[ DeferredObjectLiteral");
}
@@ -2667,26 +2682,20 @@ class DeferredObjectLiteral: public DeferredCode {
void DeferredObjectLiteral::Generate() {
// Argument is passed in r1.
- enter()->Bind();
- VirtualFrame::SpilledScope spilled_scope(generator());
// If the entry is undefined we call the runtime system to compute
// the literal.
-
- VirtualFrame* frame = generator()->frame();
// Literal array (0).
- frame->EmitPush(r1);
+ __ push(r1);
// Literal index (1).
__ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
- frame->EmitPush(r0);
+ __ push(r0);
// Constant properties (2).
__ mov(r0, Operand(node_->constant_properties()));
- frame->EmitPush(r0);
- Result boilerplate =
- frame->CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
- __ mov(r2, Operand(boilerplate.reg()));
+ __ push(r0);
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ mov(r2, Operand(r0));
// Result is returned in r2.
- exit_.Jump();
}
@@ -2694,10 +2703,10 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ObjectLiteral");
- DeferredObjectLiteral* deferred = new DeferredObjectLiteral(this, node);
+ DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node);
// Retrieve the literal array and check the allocated entry.
@@ -2715,7 +2724,7 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
// Check whether we need to materialize the object literal boilerplate.
// If so, jump to the deferred code.
__ cmp(r2, Operand(Factory::undefined_value()));
- deferred->enter()->Branch(eq);
+ deferred->Branch(eq);
deferred->BindExit();
// Push the object literal boilerplate.
@@ -2782,8 +2791,7 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
// therefore context dependent.
class DeferredArrayLiteral: public DeferredCode {
public:
- DeferredArrayLiteral(CodeGenerator* generator, ArrayLiteral* node)
- : DeferredCode(generator), node_(node) {
+ explicit DeferredArrayLiteral(ArrayLiteral* node) : node_(node) {
set_comment("[ DeferredArrayLiteral");
}
@@ -2796,26 +2804,20 @@ class DeferredArrayLiteral: public DeferredCode {
void DeferredArrayLiteral::Generate() {
// Argument is passed in r1.
- enter()->Bind();
- VirtualFrame::SpilledScope spilled_scope(generator());
// If the entry is undefined we call the runtime system to computed
// the literal.
-
- VirtualFrame* frame = generator()->frame();
// Literal array (0).
- frame->EmitPush(r1);
+ __ push(r1);
// Literal index (1).
__ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
- frame->EmitPush(r0);
+ __ push(r0);
// Constant properties (2).
__ mov(r0, Operand(node_->literals()));
- frame->EmitPush(r0);
- Result boilerplate =
- frame->CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
- __ mov(r2, Operand(boilerplate.reg()));
+ __ push(r0);
+ __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+ __ mov(r2, Operand(r0));
// Result is returned in r2.
- exit_.Jump();
}
@@ -2823,10 +2825,10 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ArrayLiteral");
- DeferredArrayLiteral* deferred = new DeferredArrayLiteral(this, node);
+ DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node);
// Retrieve the literal array and check the allocated entry.
@@ -2844,7 +2846,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
// Check whether we need to materialize the object literal boilerplate.
// If so, jump to the deferred code.
__ cmp(r2, Operand(Factory::undefined_value()));
- deferred->enter()->Branch(eq);
+ deferred->Branch(eq);
deferred->BindExit();
// Push the object literal boilerplate.
@@ -2897,7 +2899,7 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
int original_height = frame_->height();
#endif
ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.
Comment cmnt(masm_, "[ CatchExtensionObject");
@@ -2914,7 +2916,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Assignment");
CodeForStatementPosition(node);
@@ -2982,7 +2984,7 @@ void CodeGenerator::VisitThrow(Throw* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Throw");
LoadAndSpill(node->exception());
@@ -2997,7 +2999,7 @@ void CodeGenerator::VisitProperty(Property* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Property");
{ Reference property(this, node);
@@ -3011,7 +3013,7 @@ void CodeGenerator::VisitCall(Call* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Call");
ZoneList<Expression*>* args = node->arguments();
@@ -3054,7 +3056,8 @@ void CodeGenerator::VisitCall(Call* node) {
}
// Setup the receiver register and call the IC initialization code.
- Handle<Code> stub = ComputeCallInitialize(arg_count);
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
CodeForSourcePosition(node->position());
frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
arg_count + 1);
@@ -3105,7 +3108,8 @@ void CodeGenerator::VisitCall(Call* node) {
}
// Set the receiver register and call the IC initialization code.
- Handle<Code> stub = ComputeCallInitialize(arg_count);
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
CodeForSourcePosition(node->position());
frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
__ ldr(cp, frame_->Context());
@@ -3160,7 +3164,7 @@ void CodeGenerator::VisitCallEval(CallEval* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ CallEval");
// In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
@@ -3203,7 +3207,8 @@ void CodeGenerator::VisitCallEval(CallEval* node) {
// Call the function.
CodeForSourcePosition(node->position());
- CallFunctionStub call_function(arg_count);
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
frame_->CallStub(&call_function, arg_count + 1);
__ ldr(cp, frame_->Context());
@@ -3218,7 +3223,7 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ CallNew");
CodeForStatementPosition(node);
@@ -3268,9 +3273,9 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);
- JumpTarget leave(this);
+ JumpTarget leave;
LoadAndSpill(args->at(0));
frame_->EmitPop(r0); // r0 contains object.
// if (object->IsSmi()) return the object.
@@ -3290,9 +3295,9 @@ void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
- JumpTarget leave(this);
+ JumpTarget leave;
LoadAndSpill(args->at(0)); // Load the object.
LoadAndSpill(args->at(1)); // Load the value.
frame_->EmitPop(r0); // r0 contains value
@@ -3318,7 +3323,7 @@ void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
frame_->EmitPop(r0);
@@ -3328,7 +3333,7 @@ void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
// See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
ASSERT_EQ(args->length(), 3);
#ifdef ENABLE_LOGGING_AND_PROFILING
@@ -3344,7 +3349,7 @@ void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
frame_->EmitPop(r0);
@@ -3357,7 +3362,7 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
// It is not yet implemented on ARM, so it always goes to the slow case.
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
__ mov(r0, Operand(Factory::undefined_value()));
frame_->EmitPush(r0);
@@ -3365,10 +3370,10 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
- JumpTarget answer(this);
+ JumpTarget answer;
// We need the CC bits to come out as not_equal in the case where the
// object is a smi. This can't be done with the usual test opcode so
// we use XOR to get the right CC bits.
@@ -3387,7 +3392,7 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 0);
// Seed the result with the formal parameters count, which will be used
@@ -3402,7 +3407,7 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);
// Satisfy contract with ArgumentsAccessStub:
@@ -3419,7 +3424,7 @@ void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
@@ -3436,7 +3441,7 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
if (CheckForInlineRuntimeCall(node)) {
ASSERT((has_cc() && frame_->height() == original_height) ||
(!has_cc() && frame_->height() == original_height + 1));
@@ -3465,7 +3470,8 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (function == NULL) {
// Call the JS runtime function.
- Handle<Code> stub = ComputeCallInitialize(arg_count);
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
__ ldr(cp, frame_->Context());
frame_->Drop();
@@ -3483,7 +3489,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ UnaryOperation");
Token::Value op = node->op();
@@ -3572,8 +3578,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
case Token::BIT_NOT: {
// smi check
- JumpTarget smi_label(this);
- JumpTarget continue_label(this);
+ JumpTarget smi_label;
+ JumpTarget continue_label;
__ tst(r0, Operand(kSmiTagMask));
smi_label.Branch(eq);
@@ -3599,7 +3605,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
case Token::ADD: {
// Smi check.
- JumpTarget continue_label(this);
+ JumpTarget continue_label;
__ tst(r0, Operand(kSmiTagMask));
continue_label.Branch(eq);
frame_->EmitPush(r0);
@@ -3624,7 +3630,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ CountOperation");
bool is_postfix = node->is_postfix();
@@ -3653,8 +3659,8 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
frame_->EmitPop(r0);
- JumpTarget slow(this);
- JumpTarget exit(this);
+ JumpTarget slow;
+ JumpTarget exit;
// Load the value (1) into register r1.
__ mov(r1, Operand(Smi::FromInt(1)));
@@ -3726,7 +3732,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ BinaryOperation");
Token::Value op = node->op();
@@ -3743,7 +3749,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// of compiling the binary operation is materialized or not.
if (op == Token::AND) {
- JumpTarget is_true(this);
+ JumpTarget is_true;
LoadConditionAndSpill(node->left(),
NOT_INSIDE_TYPEOF,
&is_true,
@@ -3761,8 +3767,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
false);
} else {
- JumpTarget pop_and_continue(this);
- JumpTarget exit(this);
+ JumpTarget pop_and_continue;
+ JumpTarget exit;
__ ldr(r0, frame_->Top()); // dup the stack top
frame_->EmitPush(r0);
@@ -3785,7 +3791,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
}
} else if (op == Token::OR) {
- JumpTarget is_false(this);
+ JumpTarget is_false;
LoadConditionAndSpill(node->left(),
NOT_INSIDE_TYPEOF,
true_target(),
@@ -3803,8 +3809,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
false);
} else {
- JumpTarget pop_and_continue(this);
- JumpTarget exit(this);
+ JumpTarget pop_and_continue;
+ JumpTarget exit;
__ ldr(r0, frame_->Top());
frame_->EmitPush(r0);
@@ -3876,7 +3882,7 @@ void CodeGenerator::VisitThisFunction(ThisFunction* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
__ ldr(r0, frame_->Function());
frame_->EmitPush(r0);
ASSERT(frame_->height() == original_height + 1);
@@ -3887,7 +3893,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(this);
+ VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ CompareOperation");
// Get the expressions from the node.
@@ -4245,7 +4251,7 @@ void Reference::SetValue(InitState init_state) {
} else {
ASSERT(!slot->var()->is_dynamic());
- JumpTarget exit(cgen_);
+ JumpTarget exit;
if (init_state == CONST_INIT) {
ASSERT(slot->var()->mode() == Variable::CONST);
// Only the first const initialization must be executed (the slot
@@ -4335,6 +4341,45 @@ void Reference::SetValue(InitState init_state) {
}
+static void AllocateHeapNumber(
+ MacroAssembler* masm,
+ Label* need_gc, // Jump here if young space is full.
+ Register result_reg, // The tagged address of the new heap number.
+ Register allocation_top_addr_reg, // A scratch register.
+ Register scratch2) { // Another scratch register.
+ ExternalReference allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+ ExternalReference allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+
+ // allocat := the address of the allocation top variable.
+ __ mov(allocation_top_addr_reg, Operand(allocation_top));
+ // result_reg := the old allocation top.
+ __ ldr(result_reg, MemOperand(allocation_top_addr_reg));
+ // scratch2 := the address of the allocation limit.
+ __ mov(scratch2, Operand(allocation_limit));
+ // scratch2 := the allocation limit.
+ __ ldr(scratch2, MemOperand(scratch2));
+ // result_reg := the new allocation top.
+ __ add(result_reg, result_reg, Operand(HeapNumber::kSize));
+ // Compare new new allocation top and limit.
+ __ cmp(result_reg, Operand(scratch2));
+ // Branch if out of space in young generation.
+ __ b(hi, need_gc);
+ // Store new allocation top.
+ __ str(result_reg, MemOperand(allocation_top_addr_reg)); // store new top
+ // Tag and adjust back to start of new object.
+ __ sub(result_reg, result_reg, Operand(HeapNumber::kSize - kHeapObjectTag));
+ // Get heap number map into scratch2.
+ __ mov(scratch2, Operand(Factory::heap_number_map()));
+ // Store heap number map in new object.
+ __ str(scratch2, FieldMemOperand(result_reg, HeapObject::kMapOffset));
+}
+
+
+// We fall into this code if the operands were Smis, but the result was
+// not (eg. overflow). We branch into this code (to the not_smi label) if
+// the operands were not both Smi.
static void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
const Builtins::JavaScript& builtin,
@@ -4342,73 +4387,74 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
int swi_number,
OverwriteMode mode) {
Label slow;
- if (mode == NO_OVERWRITE) {
- __ bind(not_smi);
- }
__ bind(&slow);
__ push(r1);
__ push(r0);
__ mov(r0, Operand(1)); // Set number of arguments.
__ InvokeBuiltin(builtin, JUMP_JS); // Tail call.
- // Could it be a double-double op? If we already have a place to put
- // the answer then we can do the op and skip the builtin and runtime call.
- if (mode != NO_OVERWRITE) {
- __ bind(not_smi);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &slow); // We can't handle a Smi-double combination yet.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &slow); // We can't handle a Smi-double combination yet.
- // Get map of r0 into r2.
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- // Get type of r0 into r3.
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r3, Operand(HEAP_NUMBER_TYPE));
- __ b(ne, &slow);
- // Get type of r1 into r3.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- // Check they are both the same map (heap number map).
- __ cmp(r2, r3);
- __ b(ne, &slow);
- // Both are doubles.
- // Calling convention says that second double is in r2 and r3.
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+ __ bind(not_smi);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &slow); // We can't handle a Smi-double combination yet.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &slow); // We can't handle a Smi-double combination yet.
+ // Get map of r0 into r2.
+ __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ // Get type of r0 into r3.
+ __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r3, Operand(HEAP_NUMBER_TYPE));
+ __ b(ne, &slow);
+ // Get type of r1 into r3.
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ // Check they are both the same map (heap number map).
+ __ cmp(r2, r3);
+ __ b(ne, &slow);
+ // Both are doubles.
+ // Calling convention says that second double is in r2 and r3.
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+
+ if (mode == NO_OVERWRITE) {
+ // Get address of new heap number into r5.
+ AllocateHeapNumber(masm, &slow, r5, r6, r7);
__ push(lr);
- if (mode == OVERWRITE_LEFT) {
- __ push(r1);
- } else {
- __ push(r0);
- }
- // Calling convention says that first double is in r0 and r1.
- __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
- __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
- // Call C routine that may not cause GC or other trouble.
- __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
+ __ push(r5);
+ } else if (mode == OVERWRITE_LEFT) {
+ __ push(lr);
+ __ push(r1);
+ } else {
+ ASSERT(mode == OVERWRITE_RIGHT);
+ __ push(lr);
+ __ push(r0);
+ }
+ // Calling convention says that first double is in r0 and r1.
+ __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+ // Call C routine that may not cause GC or other trouble.
+ __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
#if !defined(__arm__)
- // Notify the simulator that we are calling an add routine in C.
- __ swi(swi_number);
+ // Notify the simulator that we are calling an add routine in C.
+ __ swi(swi_number);
#else
- // Actually call the add routine written in C.
- __ Call(r5);
+ // Actually call the add routine written in C.
+ __ Call(r5);
#endif
- // Store answer in the overwritable heap number.
- __ pop(r4);
+ // Store answer in the overwritable heap number.
+ __ pop(r4);
#if !defined(__ARM_EABI__) && defined(__arm__)
- // Double returned in fp coprocessor register 0 and 1, encoded as register
- // cr8. Offsets must be divisible by 4 for coprocessor so we need to
- // substract the tag from r4.
- __ sub(r5, r4, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
+ // Double returned in fp coprocessor register 0 and 1, encoded as register
+ // cr8. Offsets must be divisible by 4 for coprocessor so we need to
+ // substract the tag from r4.
+ __ sub(r5, r4, Operand(kHeapObjectTag));
+ __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
#else
- // Double returned in fp coprocessor register 0 and 1.
- __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
- __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize));
+ // Double returned in fp coprocessor register 0 and 1.
+ __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
+ __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize));
#endif
- __ mov(r0, Operand(r4));
- // And we are done.
- __ pop(pc);
- }
+ __ mov(r0, Operand(r4));
+ // And we are done.
+ __ pop(pc);
}
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index c098acdd7..a8cb777d7 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -28,7 +28,8 @@
#ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Forward declarations
class DeferredCode;
@@ -193,8 +194,7 @@ class CodeGenerator: public AstVisitor {
// Accessors
Scope* scope() const { return scope_; }
- // Clearing and generating deferred code.
- void ClearDeferred();
+ // Generating deferred code.
void ProcessDeferred();
bool is_eval() { return is_eval_; }
@@ -205,6 +205,8 @@ class CodeGenerator: public AstVisitor {
JumpTarget* true_target() const { return state_->true_target(); }
JumpTarget* false_target() const { return state_->false_target(); }
+ // We don't track loop nesting level on ARM yet.
+ int loop_nesting() const { return 0; }
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
@@ -317,8 +319,7 @@ class CodeGenerator: public AstVisitor {
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
- Handle<Code> ComputeCallInitialize(int argc);
- Handle<Code> ComputeCallInitializeInLoop(int argc);
+ Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of
// name/value pairs.
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 66c6a8d86..99eab238c 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -28,7 +28,8 @@
#ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_
-namespace assembler { namespace arm {
+namespace assembler {
+namespace arm {
// Defines constants and accessor classes to assemble, disassemble and
// simulate ARM instructions.
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index 736966129..71da1ecc9 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -34,7 +34,8 @@
#include "cpu.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
void CPU::Setup() {
// Nothing to do.
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index f86f981cb..bcfab6c80 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -30,7 +30,8 @@
#include "codegen-inl.h"
#include "debug.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Currently debug break is not supported in frame exit code on ARM.
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 3b7474dba..f56a599f8 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -62,7 +62,8 @@
#include "platform.h"
-namespace assembler { namespace arm {
+namespace assembler {
+namespace arm {
namespace v8i = v8::internal;
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index d26198ae1..6fde4b73c 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -31,7 +31,8 @@
#include "arm/assembler-arm-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
StackFrame::Type StackFrame::ComputeType(State* state) {
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index 9a18f3d93..a67b18a2b 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -28,7 +28,8 @@
#ifndef V8_ARM_FRAMES_ARM_H_
#define V8_ARM_FRAMES_ARM_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// The ARM ABI does not specify the usage of register r9, which may be reserved
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index b07c4742d..9b45c46a8 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -32,7 +32,8 @@
#include "runtime.h"
#include "stub-cache.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// ----------------------------------------------------------------------------
@@ -211,7 +212,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Probe the stub cache.
Code::Flags flags =
- Code::ComputeFlags(Code::CALL_IC, MONOMORPHIC, NORMAL, argc);
+ Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
StubCache::GenerateProbe(masm, flags, r1, r2, r3);
// If the stub cache probing failed, the receiver might be a value.
@@ -422,7 +423,9 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ ldr(r0, MemOperand(sp, 0));
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
+ Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, r0, r2, r3);
// Cache miss: Jump to runtime.
@@ -755,7 +758,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// Get the receiver from the stack and probe the stub cache.
__ ldr(r1, MemOperand(sp));
- Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC);
+ Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, r1, r2, r3);
// Cache miss: Jump to runtime.
diff --git a/deps/v8/src/arm/jump-target-arm.cc b/deps/v8/src/arm/jump-target-arm.cc
index 6d375e5ce..65e7eafa6 100644
--- a/deps/v8/src/arm/jump-target-arm.cc
+++ b/deps/v8/src/arm/jump-target-arm.cc
@@ -28,46 +28,47 @@
#include "v8.h"
#include "codegen-inl.h"
+#include "jump-target-inl.h"
#include "register-allocator-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(cgen()->masm())
void JumpTarget::DoJump() {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen()->has_valid_frame());
// Live non-frame registers are not allowed at unconditional jumps
// because we have no way of invalidating the corresponding results
// which are still live in the C++ code.
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
if (is_bound()) {
// Backward jump. There is an expected frame to merge to.
ASSERT(direction_ == BIDIRECTIONAL);
- cgen_->frame()->MergeTo(entry_frame_);
- cgen_->DeleteFrame();
+ cgen()->frame()->PrepareMergeTo(entry_frame_);
+ cgen()->frame()->MergeTo(entry_frame_);
+ cgen()->DeleteFrame();
__ jmp(&entry_label_);
} else {
+ // Preconfigured entry frame is not used on ARM.
+ ASSERT(entry_frame_ == NULL);
// Forward jump. The current frame is added to the end of the list
// of frames reaching the target block and a jump to the merge code
// is emitted.
- AddReachingFrame(cgen_->frame());
+ AddReachingFrame(cgen()->frame());
RegisterFile empty;
- cgen_->SetFrame(NULL, &empty);
+ cgen()->SetFrame(NULL, &empty);
__ jmp(&merge_labels_.last());
}
-
- is_linked_ = !is_bound_;
}
void JumpTarget::DoBranch(Condition cc, Hint ignored) {
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen()->has_valid_frame());
if (is_bound()) {
ASSERT(direction_ == BIDIRECTIONAL);
@@ -77,29 +78,29 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
// Swap the current frame for a copy (we do the swapping to get
// the off-frame registers off the fall through) to use for the
// branch.
- VirtualFrame* fall_through_frame = cgen_->frame();
+ VirtualFrame* fall_through_frame = cgen()->frame();
VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
- RegisterFile non_frame_registers = RegisterAllocator::Reserved();
- cgen_->SetFrame(branch_frame, &non_frame_registers);
+ RegisterFile non_frame_registers;
+ cgen()->SetFrame(branch_frame, &non_frame_registers);
// Check if we can avoid merge code.
- cgen_->frame()->PrepareMergeTo(entry_frame_);
- if (cgen_->frame()->Equals(entry_frame_)) {
+ cgen()->frame()->PrepareMergeTo(entry_frame_);
+ if (cgen()->frame()->Equals(entry_frame_)) {
// Branch right in to the block.
- cgen_->DeleteFrame();
+ cgen()->DeleteFrame();
__ b(cc, &entry_label_);
- cgen_->SetFrame(fall_through_frame, &non_frame_registers);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
return;
}
// Check if we can reuse existing merge code.
for (int i = 0; i < reaching_frames_.length(); i++) {
if (reaching_frames_[i] != NULL &&
- cgen_->frame()->Equals(reaching_frames_[i])) {
+ cgen()->frame()->Equals(reaching_frames_[i])) {
// Branch to the merge code.
- cgen_->DeleteFrame();
+ cgen()->DeleteFrame();
__ b(cc, &merge_labels_[i]);
- cgen_->SetFrame(fall_through_frame, &non_frame_registers);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
return;
}
}
@@ -108,19 +109,20 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
// around the merge code on the fall through path.
Label original_fall_through;
__ b(NegateCondition(cc), &original_fall_through);
- cgen_->frame()->MergeTo(entry_frame_);
- cgen_->DeleteFrame();
+ cgen()->frame()->MergeTo(entry_frame_);
+ cgen()->DeleteFrame();
__ b(&entry_label_);
- cgen_->SetFrame(fall_through_frame, &non_frame_registers);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
__ bind(&original_fall_through);
} else {
+ // Preconfigured entry frame is not used on ARM.
+ ASSERT(entry_frame_ == NULL);
// Forward branch. A copy of the current frame is added to the end
// of the list of frames reaching the target block and a branch to
// the merge code is emitted.
- AddReachingFrame(new VirtualFrame(cgen_->frame()));
+ AddReachingFrame(new VirtualFrame(cgen()->frame()));
__ b(cc, &merge_labels_.last());
- is_linked_ = true;
}
}
@@ -132,70 +134,63 @@ void JumpTarget::Call() {
// at the label (which should be the only one) is the spilled current
// frame plus an in-memory return address. The "fall-through" frame
// at the return site is the spilled current frame.
- ASSERT(cgen_ != NULL);
- ASSERT(cgen_->has_valid_frame());
+ ASSERT(cgen()->has_valid_frame());
// There are no non-frame references across the call.
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
ASSERT(!is_linked());
- cgen_->frame()->SpillAll();
- VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
+ cgen()->frame()->SpillAll();
+ VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
target_frame->Adjust(1);
+ // We do not expect a call with a preconfigured entry frame.
+ ASSERT(entry_frame_ == NULL);
AddReachingFrame(target_frame);
__ bl(&merge_labels_.last());
-
- is_linked_ = !is_bound_;
}
void JumpTarget::DoBind(int mergable_elements) {
- ASSERT(cgen_ != NULL);
ASSERT(!is_bound());
// Live non-frame registers are not allowed at the start of a basic
// block.
- ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters());
+ ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
if (direction_ == FORWARD_ONLY) {
// A simple case: no forward jumps and no possible backward jumps.
if (!is_linked()) {
// The stack pointer can be floating above the top of the
// virtual frame before the bind. Afterward, it should not.
- ASSERT(cgen_->has_valid_frame());
- VirtualFrame* frame = cgen_->frame();
- int difference =
- frame->stack_pointer_ - (frame->elements_.length() - 1);
+ ASSERT(cgen()->has_valid_frame());
+ VirtualFrame* frame = cgen()->frame();
+ int difference = frame->stack_pointer_ - (frame->element_count() - 1);
if (difference > 0) {
frame->stack_pointer_ -= difference;
__ add(sp, sp, Operand(difference * kPointerSize));
}
-
- is_bound_ = true;
+ __ bind(&entry_label_);
return;
}
// Another simple case: no fall through, a single forward jump,
// and no possible backward jumps.
- if (!cgen_->has_valid_frame() && reaching_frames_.length() == 1) {
+ if (!cgen()->has_valid_frame() && reaching_frames_.length() == 1) {
// Pick up the only reaching frame, take ownership of it, and
// use it for the block about to be emitted.
VirtualFrame* frame = reaching_frames_[0];
- RegisterFile reserved = RegisterAllocator::Reserved();
- cgen_->SetFrame(frame, &reserved);
+ RegisterFile empty;
+ cgen()->SetFrame(frame, &empty);
reaching_frames_[0] = NULL;
__ bind(&merge_labels_[0]);
// The stack pointer can be floating above the top of the
// virtual frame before the bind. Afterward, it should not.
- int difference =
- frame->stack_pointer_ - (frame->elements_.length() - 1);
+ int difference = frame->stack_pointer_ - (frame->element_count() - 1);
if (difference > 0) {
frame->stack_pointer_ -= difference;
__ add(sp, sp, Operand(difference * kPointerSize));
}
-
- is_linked_ = false;
- is_bound_ = true;
+ __ bind(&entry_label_);
return;
}
}
@@ -203,15 +198,17 @@ void JumpTarget::DoBind(int mergable_elements) {
// If there is a current frame, record it as the fall-through. It
// is owned by the reaching frames for now.
bool had_fall_through = false;
- if (cgen_->has_valid_frame()) {
+ if (cgen()->has_valid_frame()) {
had_fall_through = true;
- AddReachingFrame(cgen_->frame());
+ AddReachingFrame(cgen()->frame()); // Return value ignored.
RegisterFile empty;
- cgen_->SetFrame(NULL, &empty);
+ cgen()->SetFrame(NULL, &empty);
}
// Compute the frame to use for entry to the block.
- ComputeEntryFrame(mergable_elements);
+ if (entry_frame_ == NULL) {
+ ComputeEntryFrame(mergable_elements);
+ }
// Some moves required to merge to an expected frame require purely
// frame state changes, and do not require any code generation.
@@ -242,17 +239,17 @@ void JumpTarget::DoBind(int mergable_elements) {
// binding site or as the fall through from a previous merge
// code block. Jump around the code we are about to
// generate.
- if (cgen_->has_valid_frame()) {
- cgen_->DeleteFrame();
+ if (cgen()->has_valid_frame()) {
+ cgen()->DeleteFrame();
__ b(&entry_label_);
}
// Pick up the frame for this block. Assume ownership if
// there cannot be backward jumps.
- RegisterFile reserved = RegisterAllocator::Reserved();
+ RegisterFile empty;
if (direction_ == BIDIRECTIONAL) {
- cgen_->SetFrame(new VirtualFrame(frame), &reserved);
+ cgen()->SetFrame(new VirtualFrame(frame), &empty);
} else {
- cgen_->SetFrame(frame, &reserved);
+ cgen()->SetFrame(frame, &empty);
reaching_frames_[i] = NULL;
}
__ bind(&merge_labels_[i]);
@@ -261,23 +258,22 @@ void JumpTarget::DoBind(int mergable_elements) {
// looking for any that can share merge code with this one.
for (int j = 0; j < i; j++) {
VirtualFrame* other = reaching_frames_[j];
- if (other != NULL && other->Equals(cgen_->frame())) {
+ if (other != NULL && other->Equals(cgen()->frame())) {
// Set the reaching frame element to null to avoid
// processing it later, and then bind its entry label.
- delete other;
reaching_frames_[j] = NULL;
__ bind(&merge_labels_[j]);
}
}
// Emit the merge code.
- cgen_->frame()->MergeTo(entry_frame_);
+ cgen()->frame()->MergeTo(entry_frame_);
} else if (i == reaching_frames_.length() - 1 && had_fall_through) {
// If this is the fall through, and it didn't need merge
// code, we need to pick up the frame so we can jump around
// subsequent merge blocks if necessary.
- RegisterFile reserved = RegisterAllocator::Reserved();
- cgen_->SetFrame(frame, &reserved);
+ RegisterFile empty;
+ cgen()->SetFrame(frame, &empty);
reaching_frames_[i] = NULL;
}
}
@@ -286,22 +282,17 @@ void JumpTarget::DoBind(int mergable_elements) {
// The code generator may not have a current frame if there was no
// fall through and none of the reaching frames needed merging.
// In that case, clone the entry frame as the current frame.
- if (!cgen_->has_valid_frame()) {
- RegisterFile reserved_registers = RegisterAllocator::Reserved();
- cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
+ if (!cgen()->has_valid_frame()) {
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
}
- // There is certainly a current frame equal to the entry frame.
- // Bind the entry frame label.
- __ bind(&entry_label_);
-
// There may be unprocessed reaching frames that did not need
// merge code. They will have unbound merge labels. Bind their
// merge labels to be the same as the entry label and deallocate
// them.
for (int i = 0; i < reaching_frames_.length(); i++) {
if (!merge_labels_[i].is_bound()) {
- delete reaching_frames_[i];
reaching_frames_[i] = NULL;
__ bind(&merge_labels_[i]);
}
@@ -318,15 +309,13 @@ void JumpTarget::DoBind(int mergable_elements) {
// Use a copy of the reaching frame so the original can be saved
// for possible reuse as a backward merge block.
- RegisterFile reserved = RegisterAllocator::Reserved();
- cgen_->SetFrame(new VirtualFrame(reaching_frames_[0]), &reserved);
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
__ bind(&merge_labels_[0]);
- cgen_->frame()->MergeTo(entry_frame_);
- __ bind(&entry_label_);
+ cgen()->frame()->MergeTo(entry_frame_);
}
- is_linked_ = false;
- is_bound_ = true;
+ __ bind(&entry_label_);
}
#undef __
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 365c1ad7f..4e24063c9 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -32,7 +32,8 @@
#include "debug.h"
#include "runtime.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Give alias names to registers
Register cp = { 8 }; // JavaScript context pointer
@@ -58,7 +59,10 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
// We do not support thumb inter-working with an arm architecture not supporting
// the blx instruction (below v5t)
#if defined(__THUMB_INTERWORK__)
-#if !defined(__ARM_ARCH_5T__) && !defined(__ARM_ARCH_5TE__)
+#if !defined(__ARM_ARCH_5T__) && \
+ !defined(__ARM_ARCH_5TE__) && \
+ !defined(__ARM_ARCH_7A__) && \
+ !defined(__ARM_ARCH_7__)
// add tests for other versions above v5t as required
#error "for thumb inter-working we require architecture v5t or above"
#endif
@@ -291,6 +295,12 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
+
+ // Compute the argv pointer and keep it in a callee-saved register.
+ // r0 is argc.
+ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
+ sub(r6, r6, Operand(kPointerSize));
+
// Compute parameter pointer before making changes and save it as ip
// register so that it is restored as sp register on exit, thereby
// popping the args.
@@ -298,6 +308,17 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// ip = sp + kPointerSize * #args;
add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
+ // Align the stack at this point. After this point we have 5 pushes,
+ // so in fact we have to unalign here! See also the assert on the
+ // alignment immediately below.
+ if (OS::ActivationFrameAlignment() != kPointerSize) {
+ // This code needs to be made more general if this assert doesn't hold.
+ ASSERT(OS::ActivationFrameAlignment() == 2 * kPointerSize);
+ mov(r7, Operand(Smi::FromInt(0)));
+ tst(sp, Operand(OS::ActivationFrameAlignment() - 1));
+ push(r7, eq); // Conditional push instruction.
+ }
+
// Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
mov(fp, Operand(sp)); // setup new frame pointer
@@ -316,9 +337,6 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
mov(r4, Operand(r0));
mov(r5, Operand(r1));
- // Compute the argv pointer and keep it in a callee-saved register.
- add(r6, fp, Operand(r4, LSL, kPointerSizeLog2));
- add(r6, r6, Operand(ExitFrameConstants::kPPDisplacement - kPointerSize));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index e336757e0..27eeab2e9 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -30,7 +30,8 @@
#include "assembler.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Give alias names to registers
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index bf07f0e3d..78ebc7e80 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -30,7 +30,8 @@
#include "regexp-macro-assembler.h"
#include "arm/regexp-macro-assembler-arm.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
RegExpMacroAssemblerARM::RegExpMacroAssemblerARM() {
UNIMPLEMENTED();
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
index 2f38bb73e..de5518379 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h
@@ -28,7 +28,8 @@
#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
public:
diff --git a/deps/v8/src/arm/register-allocator-arm-inl.h b/deps/v8/src/arm/register-allocator-arm-inl.h
new file mode 100644
index 000000000..d98818f0f
--- /dev/null
+++ b/deps/v8/src/arm/register-allocator-arm-inl.h
@@ -0,0 +1,103 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
+#define V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+bool RegisterAllocator::IsReserved(Register reg) {
+ return reg.is(cp) || reg.is(fp) || reg.is(sp) || reg.is(pc);
+}
+
+
+
+// The register allocator uses small integers to represent the
+// non-reserved assembler registers. The mapping is:
+//
+// r0 <-> 0
+// r1 <-> 1
+// r2 <-> 2
+// r3 <-> 3
+// r4 <-> 4
+// r5 <-> 5
+// r6 <-> 6
+// r7 <-> 7
+// r9 <-> 8
+// r10 <-> 9
+// ip <-> 10
+// lr <-> 11
+
+int RegisterAllocator::ToNumber(Register reg) {
+ ASSERT(reg.is_valid() && !IsReserved(reg));
+ static int numbers[] = {
+ 0, // r0
+ 1, // r1
+ 2, // r2
+ 3, // r3
+ 4, // r4
+ 5, // r5
+ 6, // r6
+ 7, // r7
+ -1, // cp
+ 8, // r9
+ 9, // r10
+ -1, // fp
+ 10, // ip
+ -1, // sp
+ 11, // lr
+ -1 // pc
+ };
+ return numbers[reg.code()];
+}
+
+
+Register RegisterAllocator::ToRegister(int num) {
+ ASSERT(num >= 0 && num < kNumRegisters);
+ static Register registers[] =
+ { r0, r1, r2, r3, r4, r5, r6, r7, r9, r10, ip, lr };
+ return registers[num];
+}
+
+
+void RegisterAllocator::Initialize() {
+ Reset();
+ // The non-reserved r1 and lr registers are live on JS function entry.
+ Use(r1); // JS function.
+ Use(lr); // Return address.
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
diff --git a/deps/v8/src/arm/register-allocator-arm.cc b/deps/v8/src/arm/register-allocator-arm.cc
index d468c84e3..ad0c7f9d4 100644
--- a/deps/v8/src/arm/register-allocator-arm.cc
+++ b/deps/v8/src/arm/register-allocator-arm.cc
@@ -30,7 +30,8 @@
#include "codegen-inl.h"
#include "register-allocator-inl.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// Result implementation.
@@ -48,56 +49,10 @@ void Result::ToRegister(Register target) {
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
-RegisterFile RegisterAllocator::Reserved() {
- RegisterFile reserved;
- reserved.Use(sp);
- reserved.Use(fp);
- reserved.Use(cp);
- reserved.Use(pc);
- return reserved;
-}
-
-
-void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
- register_file->ref_counts_[sp.code()] = 0;
- register_file->ref_counts_[fp.code()] = 0;
- register_file->ref_counts_[cp.code()] = 0;
- register_file->ref_counts_[pc.code()] = 0;
-}
-
-
-bool RegisterAllocator::IsReserved(int reg_code) {
- return (reg_code == sp.code())
- || (reg_code == fp.code())
- || (reg_code == cp.code())
- || (reg_code == pc.code());
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
- // The following registers are live on function entry, saved in the
- // frame, and available for allocation during execution.
- Use(r1); // JS function.
- Use(lr); // Return address.
-}
-
-
-void RegisterAllocator::Reset() {
- registers_.Reset();
- // The following registers are live on function entry and reserved
- // during execution.
- Use(sp); // Stack pointer.
- Use(fp); // Frame pointer (caller's frame pointer on entry).
- Use(cp); // Context context (callee's context on entry).
- Use(pc); // Program counter.
-}
-
-
Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
- UNIMPLEMENTED();
- Result invalid(cgen_);
- return invalid;
+ // No byte registers on ARM.
+ UNREACHABLE();
+ return Result();
}
diff --git a/deps/v8/src/arm/register-allocator-arm.h b/deps/v8/src/arm/register-allocator-arm.h
new file mode 100644
index 000000000..f953ed9f1
--- /dev/null
+++ b/deps/v8/src/arm/register-allocator-arm.h
@@ -0,0 +1,43 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_H_
+#define V8_ARM_REGISTER_ALLOCATOR_ARM_H_
+
+namespace v8 {
+namespace internal {
+
+class RegisterAllocatorConstants : public AllStatic {
+ public:
+ static const int kNumRegisters = 12;
+ static const int kInvalidRegister = -1;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_H_
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 9737e9539..b8b66636c 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -36,7 +36,8 @@
#if !defined(__arm__)
// Only build the simulator if not compiling for real ARM hardware.
-namespace assembler { namespace arm {
+namespace assembler {
+namespace arm {
using ::v8::internal::Object;
using ::v8::internal::PrintF;
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 2029fd3bc..d4a395aca 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -66,7 +66,8 @@
#include "constants-arm.h"
-namespace assembler { namespace arm {
+namespace assembler {
+namespace arm {
class Simulator {
public:
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 56afa0288..c09f9e3b6 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -31,7 +31,8 @@
#include "codegen-inl.h"
#include "stub-cache.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
#define __ ACCESS_MASM(masm)
@@ -61,7 +62,7 @@ static void ProbeTable(MacroAssembler* masm,
// Check that the flags match what we're looking for.
__ ldr(offset, FieldMemOperand(offset, Code::kFlagsOffset));
- __ and_(offset, offset, Operand(~Code::kFlagsTypeMask));
+ __ and_(offset, offset, Operand(~Code::kFlagsNotUsedInLookup));
__ cmp(offset, Operand(flags));
__ b(ne, &miss);
@@ -245,6 +246,7 @@ void StubCompiler::GenerateLoadCallback(MacroAssembler* masm,
void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
+ Smi* lookup_hint,
Register receiver,
Register name,
Register scratch1,
@@ -262,11 +264,13 @@ void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
__ push(receiver); // receiver
__ push(reg); // holder
__ push(name); // name
+ __ mov(scratch1, Operand(lookup_hint));
+ __ push(scratch1);
// Do tail-call to the runtime system.
ExternalReference load_ic_property =
ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
- __ TailCallRuntime(load_ic_property, 3);
+ __ TailCallRuntime(load_ic_property, 4);
}
@@ -494,7 +498,9 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
Object* CallStubCompiler::CompileCallField(Object* object,
JSObject* holder,
int index,
- String* name) {
+ String* name,
+ Code::Flags flags) {
+ ASSERT_EQ(FIELD, Code::ExtractTypeFromFlags(flags));
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
@@ -538,14 +544,16 @@ Object* CallStubCompiler::CompileCallField(Object* object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(FIELD, name);
+ return GetCodeWithFlags(flags, name);
}
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
- CheckType check) {
+ CheckType check,
+ Code::Flags flags) {
+ ASSERT_EQ(CONSTANT_FUNCTION, Code::ExtractTypeFromFlags(flags));
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
@@ -663,7 +671,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
- return GetCode(CONSTANT_FUNCTION, function_name);
+ return GetCodeWithFlags(flags, function_name);
}
@@ -904,7 +912,15 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
__ ldr(r0, MemOperand(sp, 0));
- GenerateLoadInterceptor(masm(), object, holder, r0, r2, r3, r1, &miss);
+ GenerateLoadInterceptor(masm(),
+ object,
+ holder,
+ holder->InterceptorPropertyLookupHint(name),
+ r0,
+ r2,
+ r3,
+ r1,
+ &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1010,7 +1026,15 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
__ cmp(r2, Operand(Handle<String>(name)));
__ b(ne, &miss);
- GenerateLoadInterceptor(masm(), receiver, holder, r0, r2, r3, r1, &miss);
+ GenerateLoadInterceptor(masm(),
+ receiver,
+ holder,
+ Smi::FromInt(JSObject::kLookupInHolder),
+ r0,
+ r2,
+ r3,
+ r1,
+ &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc
index 43100f1ec..952738329 100644
--- a/deps/v8/src/arm/virtual-frame-arm.cc
+++ b/deps/v8/src/arm/virtual-frame-arm.cc
@@ -31,31 +31,25 @@
#include "register-allocator-inl.h"
#include "scopes.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// VirtualFrame implementation.
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm())
// On entry to a function, the virtual frame already contains the
// receiver and the parameters. All initial frame elements are in
// memory.
-VirtualFrame::VirtualFrame(CodeGenerator* cgen)
- : cgen_(cgen),
- masm_(cgen->masm()),
- elements_(cgen->scope()->num_parameters()
- + cgen->scope()->num_stack_slots()
- + kPreallocatedElements),
- parameter_count_(cgen->scope()->num_parameters()),
- local_count_(0),
- stack_pointer_(parameter_count_), // 0-based index of TOS.
- frame_pointer_(kIllegalIndex) {
- for (int i = 0; i < parameter_count_ + 1; i++) {
+VirtualFrame::VirtualFrame()
+ : elements_(parameter_count() + local_count() + kPreallocatedElements),
+ stack_pointer_(parameter_count()) { // 0-based index of TOS.
+ for (int i = 0; i <= stack_pointer_; i++) {
elements_.Add(FrameElement::MemoryElement());
}
- for (int i = 0; i < kNumRegisters; i++) {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
register_locations_[i] = kIllegalIndex;
}
}
@@ -82,10 +76,10 @@ void VirtualFrame::SyncRange(int begin, int end) {
void VirtualFrame::MergeTo(VirtualFrame* expected) {
- Comment cmnt(masm_, "[ Merge frame");
+ Comment cmnt(masm(), "[ Merge frame");
// We should always be merging the code generator's current frame to an
// expected frame.
- ASSERT(cgen_->frame() == this);
+ ASSERT(cgen()->frame() == this);
// Adjust the stack pointer upward (toward the top of the virtual
// frame) if necessary.
@@ -102,7 +96,7 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) {
// Fix any sync bit problems from the bottom-up, stopping when we
// hit the stack pointer or the top of the frame if the stack
// pointer is floating above the frame.
- int limit = Min(stack_pointer_, elements_.length() - 1);
+ int limit = Min(static_cast<int>(stack_pointer_), element_count() - 1);
for (int i = 0; i <= limit; i++) {
FrameElement source = elements_[i];
FrameElement target = expected->elements_[i];
@@ -134,7 +128,7 @@ void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
// On ARM, all elements are in memory.
#ifdef DEBUG
- int start = Min(stack_pointer_, elements_.length() - 1);
+ int start = Min(static_cast<int>(stack_pointer_), element_count() - 1);
for (int i = start; i >= 0; i--) {
ASSERT(elements_[i].is_memory());
ASSERT(expected->elements_[i].is_memory());
@@ -147,12 +141,12 @@ void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
}
-void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
+void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
}
void VirtualFrame::Enter() {
- Comment cmnt(masm_, "[ Enter JS frame");
+ Comment cmnt(masm(), "[ Enter JS frame");
#ifdef DEBUG
// Verify that r1 contains a JS function. The following code relies
@@ -175,15 +169,14 @@ void VirtualFrame::Enter() {
Adjust(4);
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
// Adjust FP to point to saved FP.
- frame_pointer_ = elements_.length() - 2;
__ add(fp, sp, Operand(2 * kPointerSize));
- cgen_->allocator()->Unuse(r1);
- cgen_->allocator()->Unuse(lr);
+ cgen()->allocator()->Unuse(r1);
+ cgen()->allocator()->Unuse(lr);
}
void VirtualFrame::Exit() {
- Comment cmnt(masm_, "[ Exit JS frame");
+ Comment cmnt(masm(), "[ Exit JS frame");
// Drop the execution stack down to the frame pointer and restore the caller
// frame pointer and return address.
__ mov(sp, fp);
@@ -191,12 +184,11 @@ void VirtualFrame::Exit() {
}
-void VirtualFrame::AllocateStackSlots(int count) {
- ASSERT(height() == 0);
- local_count_ = count;
- Adjust(count);
+void VirtualFrame::AllocateStackSlots() {
+ int count = local_count();
if (count > 0) {
- Comment cmnt(masm_, "[ Allocate space for locals");
+ Comment cmnt(masm(), "[ Allocate space for locals");
+ Adjust(count);
// Initialize stack slots with 'undefined' value.
__ mov(ip, Operand(Factory::undefined_value()));
for (int i = 0; i < count; i++) {
@@ -246,9 +238,9 @@ void VirtualFrame::PushTryHandler(HandlerType type) {
Result VirtualFrame::RawCallStub(CodeStub* stub) {
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
__ CallStub(stub);
- Result result = cgen_->allocator()->Allocate(r0);
+ Result result = cgen()->allocator()->Allocate(r0);
ASSERT(result.is_valid());
return result;
}
@@ -271,9 +263,9 @@ Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
PrepareForCall(arg_count, arg_count);
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
- Result result = cgen_->allocator()->Allocate(r0);
+ Result result = cgen()->allocator()->Allocate(r0);
ASSERT(result.is_valid());
return result;
}
@@ -281,9 +273,9 @@ Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
PrepareForCall(arg_count, arg_count);
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count);
- Result result = cgen_->allocator()->Allocate(r0);
+ Result result = cgen()->allocator()->Allocate(r0);
ASSERT(result.is_valid());
return result;
}
@@ -297,16 +289,16 @@ Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
PrepareForCall(arg_count, arg_count);
arg_count_register->Unuse();
__ InvokeBuiltin(id, flags);
- Result result = cgen_->allocator()->Allocate(r0);
+ Result result = cgen()->allocator()->Allocate(r0);
return result;
}
Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode) {
- ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(cgen()->HasValidEntryRegisters());
__ Call(code, rmode);
- Result result = cgen_->allocator()->Allocate(r0);
+ Result result = cgen()->allocator()->Allocate(r0);
ASSERT(result.is_valid());
return result;
}
@@ -401,7 +393,7 @@ Result VirtualFrame::CallCodeObject(Handle<Code> code,
void VirtualFrame::Drop(int count) {
ASSERT(height() >= count);
- int num_virtual_elements = (elements_.length() - 1) - stack_pointer_;
+ int num_virtual_elements = (element_count() - 1) - stack_pointer_;
// Emit code to lower the stack pointer if necessary.
if (num_virtual_elements < count) {
@@ -422,13 +414,12 @@ void VirtualFrame::Drop(int count) {
Result VirtualFrame::Pop() {
UNIMPLEMENTED();
- Result invalid(cgen_);
- return invalid;
+ return Result();
}
void VirtualFrame::EmitPop(Register reg) {
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_--;
elements_.RemoveLast();
__ pop(reg);
@@ -436,7 +427,7 @@ void VirtualFrame::EmitPop(Register reg) {
void VirtualFrame::EmitPush(Register reg) {
- ASSERT(stack_pointer_ == elements_.length() - 1);
+ ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ push(reg);
diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h
index 371a23e93..ebebd534a 100644
--- a/deps/v8/src/arm/virtual-frame-arm.h
+++ b/deps/v8/src/arm/virtual-frame-arm.h
@@ -29,8 +29,10 @@
#define V8_ARM_VIRTUAL_FRAME_ARM_H_
#include "register-allocator.h"
+#include "scopes.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// Virtual frames
@@ -41,7 +43,7 @@ namespace v8 { namespace internal {
// as random access to the expression stack elements, locals, and
// parameters.
-class VirtualFrame : public Malloced {
+class VirtualFrame : public ZoneObject {
public:
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
@@ -50,42 +52,66 @@ class VirtualFrame : public Malloced {
// generator is being transformed.
class SpilledScope BASE_EMBEDDED {
public:
- explicit SpilledScope(CodeGenerator* cgen);
+ SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
+ ASSERT(cgen()->has_valid_frame());
+ cgen()->frame()->SpillAll();
+ cgen()->set_in_spilled_code(true);
+ }
- ~SpilledScope();
+ ~SpilledScope() {
+ cgen()->set_in_spilled_code(previous_state_);
+ }
private:
- CodeGenerator* cgen_;
bool previous_state_;
+
+ CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
};
// An illegal index into the virtual frame.
static const int kIllegalIndex = -1;
// Construct an initial virtual frame on entry to a JS function.
- explicit VirtualFrame(CodeGenerator* cgen);
+ VirtualFrame();
// Construct a virtual frame as a clone of an existing one.
explicit VirtualFrame(VirtualFrame* original);
+ CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+ MacroAssembler* masm() { return cgen()->masm(); }
+
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index);
+ // The number of elements on the virtual frame.
+ int element_count() { return elements_.length(); }
+
// The height of the virtual expression stack.
- int height() const {
- return elements_.length() - expression_base_index();
+ int height() {
+ return element_count() - expression_base_index();
+ }
+
+ int register_location(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num];
+ }
+
+ int register_location(Register reg) {
+ return register_locations_[RegisterAllocator::ToNumber(reg)];
}
- int register_index(Register reg) {
- return register_locations_[reg.code()];
+ void set_register_location(Register reg, int index) {
+ register_locations_[RegisterAllocator::ToNumber(reg)] = index;
}
- bool is_used(int reg_code) {
- return register_locations_[reg_code] != kIllegalIndex;
+ bool is_used(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num] != kIllegalIndex;
}
bool is_used(Register reg) {
- return is_used(reg.code());
+ return register_locations_[RegisterAllocator::ToNumber(reg)]
+ != kIllegalIndex;
}
// Add extra in-memory elements to the top of the frame to match an actual
@@ -95,7 +121,12 @@ class VirtualFrame : public Malloced {
// Forget elements from the top of the frame to match an actual frame (eg,
// the frame after a runtime call). No code is emitted.
- void Forget(int count);
+ void Forget(int count) {
+ ASSERT(count >= 0);
+ ASSERT(stack_pointer_ == element_count() - 1);
+ stack_pointer_ -= count;
+ ForgetElements(count);
+ }
// Forget count elements from the top of the frame without adjusting
// the stack pointer downward. This is used, for example, before
@@ -106,7 +137,9 @@ class VirtualFrame : public Malloced {
void SpillAll();
// Spill all occurrences of a specific register from the frame.
- void Spill(Register reg);
+ void Spill(Register reg) {
+ if (is_used(reg)) SpillElementAt(register_location(reg));
+ }
// Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
@@ -127,13 +160,23 @@ class VirtualFrame : public Malloced {
// tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
- void DetachFromCodeGenerator();
+ void DetachFromCodeGenerator() {
+ RegisterAllocator* cgen_allocator = cgen()->allocator();
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Unuse(i);
+ }
+ }
// (Re)attach a frame to its code generator. This informs the register
// allocator that the frame-internal register references are active again.
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
- void AttachToCodeGenerator();
+ void AttachToCodeGenerator() {
+ RegisterAllocator* cgen_allocator = cgen()->allocator();
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Unuse(i);
+ }
+ }
// Emit code for the physical JS entry and exit frame sequences. After
// calling Enter, the virtual frame is ready for use; and after calling
@@ -149,13 +192,13 @@ class VirtualFrame : public Malloced {
void PrepareForReturn();
// Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots(int count);
+ void AllocateStackSlots();
// The current top of the expression stack as an assembly operand.
- MemOperand Top() const { return MemOperand(sp, 0); }
+ MemOperand Top() { return MemOperand(sp, 0); }
// An element of the expression stack as an assembly operand.
- MemOperand ElementAt(int index) const {
+ MemOperand ElementAt(int index) {
return MemOperand(sp, index * kPointerSize);
}
@@ -165,18 +208,18 @@ class VirtualFrame : public Malloced {
// Set a frame element to a constant. The index is frame-top relative.
void SetElementAt(int index, Handle<Object> value) {
- Result temp(value, cgen_);
+ Result temp(value);
SetElementAt(index, &temp);
}
void PushElementAt(int index) {
- PushFrameSlotAt(elements_.length() - index - 1);
+ PushFrameSlotAt(element_count() - index - 1);
}
// A frame-allocated local as an assembly operand.
- MemOperand LocalAt(int index) const {
+ MemOperand LocalAt(int index) {
ASSERT(0 <= index);
- ASSERT(index < local_count_);
+ ASSERT(index < local_count());
return MemOperand(fp, kLocal0Offset - index * kPointerSize);
}
@@ -202,13 +245,13 @@ class VirtualFrame : public Malloced {
void PushReceiverSlotAddress();
// The function frame slot.
- MemOperand Function() const { return MemOperand(fp, kFunctionOffset); }
+ MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
// Push the function on top of the frame.
void PushFunction() { PushFrameSlotAt(function_index()); }
// The context frame slot.
- MemOperand Context() const { return MemOperand(fp, kContextOffset); }
+ MemOperand Context() { return MemOperand(fp, kContextOffset); }
// Save the value of the esi register to the context frame slot.
void SaveContextRegister();
@@ -218,10 +261,11 @@ class VirtualFrame : public Malloced {
void RestoreContextRegister();
// A parameter as an assembly operand.
- MemOperand ParameterAt(int index) const {
+ MemOperand ParameterAt(int index) {
// Index -1 corresponds to the receiver.
- ASSERT(-1 <= index && index <= parameter_count_);
- return MemOperand(fp, (1 + parameter_count_ - index) * kPointerSize);
+ ASSERT(-1 <= index); // -1 is the receiver.
+ ASSERT(index <= parameter_count());
+ return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
}
// Push a copy of the value of a parameter frame slot on top of the frame.
@@ -243,14 +287,17 @@ class VirtualFrame : public Malloced {
}
// The receiver frame slot.
- MemOperand Receiver() const { return ParameterAt(-1); }
+ MemOperand Receiver() { return ParameterAt(-1); }
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
- Result CallStub(CodeStub* stub, int arg_count);
+ Result CallStub(CodeStub* stub, int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ return RawCallStub(stub);
+ }
// Call stub that expects its argument in r0. The argument is given
// as a result which must be the register r0.
@@ -297,7 +344,7 @@ class VirtualFrame : public Malloced {
void Drop() { Drop(1); }
// Duplicate the top element of the frame.
- void Dup() { PushFrameSlotAt(elements_.length() - 1); }
+ void Dup() { PushFrameSlotAt(element_count() - 1); }
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
@@ -317,7 +364,15 @@ class VirtualFrame : public Malloced {
void Push(Smi* value) { Push(Handle<Object>(value)); }
// Pushing a result invalidates it (its contents become owned by the frame).
- void Push(Result* result);
+ void Push(Result* result) {
+ if (result->is_register()) {
+ Push(result->reg(), result->static_type());
+ } else {
+ ASSERT(result->is_constant());
+ Push(result->handle());
+ }
+ result->Unuse();
+ }
// Nip removes zero or more elements from immediately below the top
// of the frame, leaving the previous top-of-frame value on top of
@@ -332,70 +387,69 @@ class VirtualFrame : public Malloced {
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
- CodeGenerator* cgen_;
- MacroAssembler* masm_;
-
- List<FrameElement> elements_;
-
- // The number of frame-allocated locals and parameters respectively.
- int parameter_count_;
- int local_count_;
+ ZoneList<FrameElement> elements_;
// The index of the element that is at the processor's stack pointer
// (the sp register).
int stack_pointer_;
- // The index of the element that is at the processor's frame pointer
- // (the fp register).
- int frame_pointer_;
-
// The index of the register frame element using each register, or
// kIllegalIndex if a register is not on the frame.
- int register_locations_[kNumRegisters];
+ int register_locations_[RegisterAllocator::kNumRegisters];
+
+ // The number of frame-allocated locals and parameters respectively.
+ int parameter_count() { return cgen()->scope()->num_parameters(); }
+ int local_count() { return cgen()->scope()->num_stack_slots(); }
+
+ // The index of the element that is at the processor's frame pointer
+ // (the fp register). The parameters, receiver, function, and context
+ // are below the frame pointer.
+ int frame_pointer() { return parameter_count() + 3; }
// The index of the first parameter. The receiver lies below the first
// parameter.
- int param0_index() const { return 1; }
+ int param0_index() { return 1; }
- // The index of the context slot in the frame.
- int context_index() const {
- ASSERT(frame_pointer_ != kIllegalIndex);
- return frame_pointer_ - 1;
- }
+ // The index of the context slot in the frame. It is immediately
+ // below the frame pointer.
+ int context_index() { return frame_pointer() - 1; }
- // The index of the function slot in the frame. It lies above the context
- // slot.
- int function_index() const {
- ASSERT(frame_pointer_ != kIllegalIndex);
- return frame_pointer_ - 2;
- }
+ // The index of the function slot in the frame. It is below the frame
+ // pointer and context slot.
+ int function_index() { return frame_pointer() - 2; }
- // The index of the first local. Between the parameters and the locals
- // lie the return address, the saved frame pointer, the context, and the
- // function.
- int local0_index() const {
- ASSERT(frame_pointer_ != kIllegalIndex);
- return frame_pointer_ + 2;
- }
+ // The index of the first local. Between the frame pointer and the
+ // locals lies the return address.
+ int local0_index() { return frame_pointer() + 2; }
// The index of the base of the expression stack.
- int expression_base_index() const { return local0_index() + local_count_; }
+ int expression_base_index() { return local0_index() + local_count(); }
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
- int fp_relative(int index) const {
- return (frame_pointer_ - index) * kPointerSize;
+ int fp_relative(int index) {
+ ASSERT(index < element_count());
+ ASSERT(frame_pointer() < element_count()); // FP is on the frame.
+ return (frame_pointer() - index) * kPointerSize;
}
// Record an occurrence of a register in the virtual frame. This has the
// effect of incrementing the register's external reference count and
// of updating the index of the register's location in the frame.
- void Use(Register reg, int index);
+ void Use(Register reg, int index) {
+ ASSERT(!is_used(reg));
+ set_register_location(reg, index);
+ cgen()->allocator()->Use(reg);
+ }
// Record that a register reference has been dropped from the frame. This
// decrements the register's external reference count and invalidates the
// index of the register's location in the frame.
- void Unuse(Register reg);
+ void Unuse(Register reg) {
+ ASSERT(is_used(reg));
+ set_register_location(reg, kIllegalIndex);
+ cgen()->allocator()->Unuse(reg);
+ }
// Spill the element at a particular index---write it to memory if
// necessary, free any associated register, and forget its value if
@@ -407,7 +461,7 @@ class VirtualFrame : public Malloced {
// Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index);
- // Sync the range of elements in [begin, end).
+ // Sync the range of elements in [begin, end] with memory.
void SyncRange(int begin, int end);
// Sync a single unsynced element that lies beneath or at the stack pointer.
@@ -471,6 +525,8 @@ class VirtualFrame : public Malloced {
bool Equals(VirtualFrame* other);
+ // Classes that need raw access to the elements_ array.
+ friend class DeferredCode;
friend class JumpTarget;
};