summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-02-24 13:14:59 -0800
committerRyan Dahl <ry@tinyclouds.org>2011-02-24 13:14:59 -0800
commite33e7d1a3712add02cd08c0069bb56ba76e49aa5 (patch)
tree3cd8937acf1e3b27a265e42bf3d42241d51f6c1a /deps/v8/src/arm
parent2680522d3a71fb5f12aa052555f3d04806338821 (diff)
downloadnode-new-e33e7d1a3712add02cd08c0069bb56ba76e49aa5.tar.gz
Upgrade V8 to 3.1.6
Diffstat (limited to 'deps/v8/src/arm')
-rw-r--r--deps/v8/src/arm/assembler-arm.cc110
-rw-r--r--deps/v8/src/arm/assembler-arm.h28
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc145
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h26
-rw-r--r--deps/v8/src/arm/codegen-arm.cc4
-rw-r--r--deps/v8/src/arm/cpu-arm.cc5
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc14
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc192
-rw-r--r--deps/v8/src/arm/lithium-arm.cc57
-rw-r--r--deps/v8/src/arm/lithium-arm.h81
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc473
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h77
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.cc303
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.h84
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc42
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h24
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc15
17 files changed, 1121 insertions, 559 deletions
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index fb9bb488c9..c91d4ba2bc 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -1848,11 +1848,31 @@ void Assembler::vldr(const DwVfpRegister dst,
offset = -offset;
u = 0;
}
- ASSERT(offset % 4 == 0);
- ASSERT((offset / 4) < 256);
+
ASSERT(offset >= 0);
- emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
- 0xB*B8 | ((offset / 4) & 255));
+ if ((offset % 4) == 0 && (offset / 4) < 256) {
+ emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
+ 0xB*B8 | ((offset / 4) & 255));
+ } else {
+ // Larger offsets must be handled by computing the correct address
+ // in the ip register.
+ ASSERT(!base.is(ip));
+ if (u == 1) {
+ add(ip, base, Operand(offset));
+ } else {
+ sub(ip, base, Operand(offset));
+ }
+ emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8);
+ }
+}
+
+
+void Assembler::vldr(const DwVfpRegister dst,
+ const MemOperand& operand,
+ const Condition cond) {
+ ASSERT(!operand.rm().is_valid());
+ ASSERT(operand.am_ == Offset);
+ vldr(dst, operand.rn(), operand.offset(), cond);
}
@@ -1870,13 +1890,33 @@ void Assembler::vldr(const SwVfpRegister dst,
offset = -offset;
u = 0;
}
- ASSERT(offset % 4 == 0);
- ASSERT((offset / 4) < 256);
- ASSERT(offset >= 0);
int sd, d;
dst.split_code(&sd, &d);
+ ASSERT(offset >= 0);
+
+ if ((offset % 4) == 0 && (offset / 4) < 256) {
emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
+ } else {
+ // Larger offsets must be handled by computing the correct address
+ // in the ip register.
+ ASSERT(!base.is(ip));
+ if (u == 1) {
+ add(ip, base, Operand(offset));
+ } else {
+ sub(ip, base, Operand(offset));
+ }
+ emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
+ }
+}
+
+
+void Assembler::vldr(const SwVfpRegister dst,
+ const MemOperand& operand,
+ const Condition cond) {
+ ASSERT(!operand.rm().is_valid());
+ ASSERT(operand.am_ == Offset);
+ vldr(dst, operand.rn(), operand.offset(), cond);
}
@@ -1894,11 +1934,30 @@ void Assembler::vstr(const DwVfpRegister src,
offset = -offset;
u = 0;
}
- ASSERT(offset % 4 == 0);
- ASSERT((offset / 4) < 256);
ASSERT(offset >= 0);
- emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
- 0xB*B8 | ((offset / 4) & 255));
+ if ((offset % 4) == 0 && (offset / 4) < 256) {
+ emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
+ 0xB*B8 | ((offset / 4) & 255));
+ } else {
+ // Larger offsets must be handled by computing the correct address
+ // in the ip register.
+ ASSERT(!base.is(ip));
+ if (u == 1) {
+ add(ip, base, Operand(offset));
+ } else {
+ sub(ip, base, Operand(offset));
+ }
+ emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8);
+ }
+}
+
+
+void Assembler::vstr(const DwVfpRegister src,
+ const MemOperand& operand,
+ const Condition cond) {
+ ASSERT(!operand.rm().is_valid());
+ ASSERT(operand.am_ == Offset);
+ vstr(src, operand.rn(), operand.offset(), cond);
}
@@ -1916,13 +1975,32 @@ void Assembler::vstr(const SwVfpRegister src,
offset = -offset;
u = 0;
}
- ASSERT(offset % 4 == 0);
- ASSERT((offset / 4) < 256);
- ASSERT(offset >= 0);
int sd, d;
src.split_code(&sd, &d);
- emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
- 0xA*B8 | ((offset / 4) & 255));
+ ASSERT(offset >= 0);
+ if ((offset % 4) == 0 && (offset / 4) < 256) {
+ emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
+ 0xA*B8 | ((offset / 4) & 255));
+ } else {
+ // Larger offsets must be handled by computing the correct address
+ // in the ip register.
+ ASSERT(!base.is(ip));
+ if (u == 1) {
+ add(ip, base, Operand(offset));
+ } else {
+ sub(ip, base, Operand(offset));
+ }
+ emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
+ }
+}
+
+
+void Assembler::vstr(const SwVfpRegister src,
+ const MemOperand& operand,
+ const Condition cond) {
+ ASSERT(!operand.rm().is_valid());
+ ASSERT(operand.am_ == Offset);
+ vldr(src, operand.rn(), operand.offset(), cond);
}
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 3941c84b34..954b9cff33 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -387,7 +387,7 @@ class Operand BASE_EMBEDDED {
// Return true if this is a register operand.
INLINE(bool is_reg() const);
- // Return true of this operand fits in one instruction so that no
+ // Return true if this operand fits in one instruction so that no
// 2-instruction solution with a load into the ip register is necessary.
bool is_single_instruction() const;
bool must_use_constant_pool() const;
@@ -439,7 +439,7 @@ class MemOperand BASE_EMBEDDED {
offset_ = offset;
}
- uint32_t offset() {
+ uint32_t offset() const {
ASSERT(rm_.is(no_reg));
return offset_;
}
@@ -447,6 +447,10 @@ class MemOperand BASE_EMBEDDED {
Register rn() const { return rn_; }
Register rm() const { return rm_; }
+ bool OffsetIsUint12Encodable() const {
+ return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
+ }
+
private:
Register rn_; // base
Register rm_; // register offset
@@ -902,22 +906,34 @@ class Assembler : public Malloced {
void vldr(const DwVfpRegister dst,
const Register base,
- int offset, // Offset must be a multiple of 4.
+ int offset,
+ const Condition cond = al);
+ void vldr(const DwVfpRegister dst,
+ const MemOperand& src,
const Condition cond = al);
void vldr(const SwVfpRegister dst,
const Register base,
- int offset, // Offset must be a multiple of 4.
+ int offset,
+ const Condition cond = al);
+ void vldr(const SwVfpRegister dst,
+ const MemOperand& src,
const Condition cond = al);
void vstr(const DwVfpRegister src,
const Register base,
- int offset, // Offset must be a multiple of 4.
+ int offset,
+ const Condition cond = al);
+ void vstr(const DwVfpRegister src,
+ const MemOperand& dst,
const Condition cond = al);
void vstr(const SwVfpRegister src,
const Register base,
- int offset, // Offset must be a multiple of 4.
+ int offset,
+ const Condition cond = al);
+ void vstr(const SwVfpRegister src,
+ const MemOperand& dst,
const Condition cond = al);
void vmov(const DwVfpRegister dst,
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index cc49f7e4e5..87fa87df0c 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -2661,8 +2661,8 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Allocate new heap number for result.
Register result = r5;
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
// Load the operands.
if (smi_operands) {
@@ -2811,8 +2811,14 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Allocate new heap number for result.
__ bind(&result_not_a_smi);
- __ AllocateHeapNumber(
- r5, scratch1, scratch2, heap_number_map, gc_required);
+ Register result = r5;
+ if (smi_operands) {
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ } else {
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ }
// r2: Answer as signed int32.
// r5: Heap number to write answer into.
@@ -2934,45 +2940,47 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime;
+ Label call_runtime, call_string_add_or_runtime;
GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
- // If all else fails, use the runtime system to get the correct
- // result.
- __ bind(&call_runtime);
+ GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
- // Try to add strings before calling runtime.
+ __ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
GenerateAddStrings(masm);
}
- GenericBinaryOpStub stub(op_, mode_, r1, r0);
- __ TailCallStub(&stub);
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
}
void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD);
+ Label left_not_string, call_runtime;
Register left = r1;
Register right = r0;
- Label call_runtime;
- // Check if first argument is a string.
- __ JumpIfSmi(left, &call_runtime);
+ // Check if left argument is a string.
+ __ JumpIfSmi(left, &left_not_string);
__ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
+ __ b(ge, &left_not_string);
- // First argument is a a string, test second.
+ StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_left_stub);
+
+ // Left operand is not a string, test right.
+ __ bind(&left_not_string);
__ JumpIfSmi(right, &call_runtime);
__ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &call_runtime);
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
+ __ TailCallStub(&string_add_right_stub);
// At least one argument is not a string.
__ bind(&call_runtime);
@@ -3706,7 +3714,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// The offset was stored in r4 safepoint slot.
// (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
- __ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4));
+ __ LoadFromSafepointRegisterSlot(scratch, r4);
__ sub(inline_site, lr, scratch);
// Get the map location in scratch and patch it.
__ GetRelocatedValueLocation(inline_site, scratch);
@@ -5438,18 +5446,19 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime;
+ Label string_add_runtime, call_builtin;
+ Builtins::JavaScript builtin_id = Builtins::ADD;
+
// Stack on entry:
- // sp[0]: second argument.
- // sp[4]: first argument.
+ // sp[0]: second argument (right).
+ // sp[4]: first argument (left).
// Load the two arguments.
__ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
__ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
- if (string_check_) {
- STATIC_ASSERT(kSmiTag == 0);
+ if (flags_ == NO_STRING_ADD_FLAGS) {
__ JumpIfEitherSmi(r0, r1, &string_add_runtime);
// Load instance types.
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -5461,13 +5470,27 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ tst(r4, Operand(kIsNotStringMask));
__ tst(r5, Operand(kIsNotStringMask), eq);
__ b(ne, &string_add_runtime);
+ } else {
+ // Here at least one of the arguments is definitely a string.
+ // We convert the one that is not known to be a string.
+ if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
+ GenerateConvertArgument(
+ masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
+ GenerateConvertArgument(
+ masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
+ }
}
// Both arguments are strings.
// r0: first string
// r1: second string
- // r4: first string instance type (if string_check_)
- // r5: second string instance type (if string_check_)
+ // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
{
Label strings_not_empty;
// Check if either of the strings are empty. In that case return the other.
@@ -5495,8 +5518,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r1: second string
// r2: length of first string
// r3: length of second string
- // r4: first string instance type (if string_check_)
- // r5: second string instance type (if string_check_)
+ // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// Look at the length of the result of adding the two strings.
Label string_add_flat_result, longer_than_two;
// Adding two lengths can't overflow.
@@ -5508,7 +5531,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ b(ne, &longer_than_two);
// Check that both strings are non-external ascii strings.
- if (!string_check_) {
+ if (flags_ != NO_STRING_ADD_FLAGS) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@@ -5556,7 +5579,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// If result is not supposed to be flat, allocate a cons string object.
// If both strings are ascii the result is an ascii cons string.
- if (!string_check_) {
+ if (flags_ != NO_STRING_ADD_FLAGS) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@@ -5604,11 +5627,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r1: second string
// r2: length of first string
// r3: length of second string
- // r4: first string instance type (if string_check_)
- // r5: second string instance type (if string_check_)
+ // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// r6: sum of lengths.
__ bind(&string_add_flat_result);
- if (!string_check_) {
+ if (flags_ != NO_STRING_ADD_FLAGS) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@@ -5706,6 +5729,60 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&string_add_runtime);
__ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+
+ if (call_builtin.is_linked()) {
+ __ bind(&call_builtin);
+ __ InvokeBuiltin(builtin_id, JUMP_JS);
+ }
+}
+
+
+void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* slow) {
+ // First check if the argument is already a string.
+ Label not_string, done;
+ __ JumpIfSmi(arg, &not_string);
+ __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
+ __ b(lt, &done);
+
+ // Check the number to string cache.
+ Label not_cached;
+ __ bind(&not_string);
+ // Puts the cached result into scratch1.
+ NumberToStringStub::GenerateLookupNumberStringCache(masm,
+ arg,
+ scratch1,
+ scratch2,
+ scratch3,
+ scratch4,
+ false,
+ &not_cached);
+ __ mov(arg, scratch1);
+ __ str(arg, MemOperand(sp, stack_offset));
+ __ jmp(&done);
+
+ // Check if the argument is a safe string wrapper.
+ __ bind(&not_cached);
+ __ JumpIfSmi(arg, slow);
+ __ CompareObjectType(
+ arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
+ __ b(ne, slow);
+ __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
+ __ and_(scratch2,
+ scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ cmp(scratch2,
+ Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ b(ne, slow);
+ __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
+ __ str(arg, MemOperand(sp, stack_offset));
+
+ __ bind(&done);
}
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index baaa2f2bda..475fbd70e8 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -335,24 +335,36 @@ class TypeRecordingBinaryOpStub: public CodeStub {
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
- NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+ // Omit left string check in stub (left is definitely a string).
+ NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+ // Omit right string check in stub (right is definitely a string).
+ NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+ // Omit both string checks in stub.
+ NO_STRING_CHECK_IN_STUB =
+ NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
};
class StringAddStub: public CodeStub {
public:
- explicit StringAddStub(StringAddFlags flags) {
- string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
- }
+ explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
private:
Major MajorKey() { return StringAdd; }
- int MinorKey() { return string_check_ ? 0 : 1; }
+ int MinorKey() { return flags_; }
void Generate(MacroAssembler* masm);
- // Should the stub check whether arguments are strings?
- bool string_check_;
+ void GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* slow);
+
+ const StringAddFlags flags_;
};
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index a3921d8efc..3e125a33fc 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -5850,8 +5850,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
} else if (variable != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
- // so this code can only be reached in non-strict mode.
- ASSERT(strict_mode_flag() == kNonStrictMode);
+ // but "delete this" is.
+ ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index 507954d9e1..51c84b3354 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -50,6 +50,11 @@ void CPU::Setup() {
void CPU::FlushICache(void* start, size_t size) {
+ // Nothing to do flushing no instructions.
+ if (size == 0) {
+ return;
+ }
+
#if defined (USE_SIMULATOR)
// Not generating ARM instructions for C-code. This means that we are
// building an ARM emulator based target. We should notify the simulator
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 9af7a8d190..9a5aa902b8 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -429,14 +429,16 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
fp_value, output_offset, value);
}
- // The context can be gotten from the function so long as we don't
- // optimize functions that need local contexts.
+ // For the bottommost output frame the context can be gotten from the input
+ // frame. For all subsequent output frames it can be gotten from the function
+ // so long as we don't inline functions that need local contexts.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function->context());
- // The context for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = reinterpret_cast<intptr_t>(function->context());
+ }
output_frame->SetFrameSlot(output_offset, value);
if (is_topmost) {
output_frame->SetRegister(cp.code(), value);
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index f04a00e052..fea9a8cfb7 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -219,46 +219,47 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
Move(dot_arguments_slot, r3, r1, r2);
}
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- EmitDeclaration(scope()->function(), Variable::CONST, NULL);
- }
- // Visit all the explicit declarations unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- scope()->VisitIllegalRedeclaration(this);
- } else {
- VisitDeclarations(scope()->declarations());
- }
- }
-
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
- // Check the stack for overflow or break request.
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
+ } else {
+ { Comment cmnt(masm_, "[ Declarations");
+ // For named function expressions, declare the function name as a
+ // constant.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailout(info->function(), NO_REGISTERS);
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ bind(&ok);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
}
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
{ Comment cmnt(masm_, "[ return <undefined>;");
- // Emit a 'return undefined' in case control fell off the end of the
- // body.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
}
EmitReturnSequence();
@@ -694,10 +695,11 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// We bypass the general EmitSlotSearch because we know more about
// this specific context.
- // The variable in the decl always resides in the current context.
+ // The variable in the decl always resides in the current function
+ // context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check if we have the correct context pointer.
+ // Check that we're not inside a 'with'.
__ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
__ cmp(r1, cp);
__ Check(eq, "Unexpected declaration in current context.");
@@ -1037,7 +1039,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
Slot* slot,
Label* slow) {
ASSERT(slot->type() == Slot::CONTEXT);
- Register current = cp;
+ Register context = cp;
Register next = r3;
Register temp = r4;
@@ -1045,22 +1047,25 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
if (s->num_heap_slots() > 0) {
if (s->calls_eval()) {
// Check that extension is NULL.
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+ __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
__ b(ne, slow);
}
- __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX));
+ __ ldr(next, ContextOperand(context, Context::CLOSURE_INDEX));
__ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
// Walk the rest of the chain without clobbering cp.
- current = next;
+ context = next;
}
}
// Check that last extension is NULL.
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+ __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
__ b(ne, slow);
- __ ldr(temp, ContextOperand(current, Context::FCONTEXT_INDEX));
- return ContextOperand(temp, slot->index());
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an cp-based operand (the write barrier cannot be allowed to
+ // destroy the cp register).
+ return ContextOperand(context, slot->index());
}
@@ -2004,34 +2009,60 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
- // Perform the assignment for non-const variables and for initialization
- // of const variables. Const assignments are simply skipped.
- Label done;
+ } else if (op == Token::INIT_CONST) {
+ // Like var declarations, const declarations are hoisted to function
+ // scope. However, unlike var initializers, const initializers are able
+ // to drill a hole to that function context, even from inside a 'with'
+ // context. We thus bypass the normal static scope lookup.
+ Slot* slot = var->AsSlot();
+ Label skip;
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ // No const parameters.
+ UNREACHABLE();
+ break;
+ case Slot::LOCAL:
+ // Detect const reinitialization by checking for the hole value.
+ __ ldr(r1, MemOperand(fp, SlotOffset(slot)));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r1, ip);
+ __ b(ne, &skip);
+ __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
+ break;
+ case Slot::CONTEXT: {
+ __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
+ __ ldr(r2, ContextOperand(r1, slot->index()));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r2, ip);
+ __ b(ne, &skip);
+ __ str(r0, ContextOperand(r1, slot->index()));
+ int offset = Context::SlotOffset(slot->index());
+ __ mov(r3, r0); // Preserve the stored value in r0.
+ __ RecordWrite(r1, Operand(offset), r3, r2);
+ break;
+ }
+ case Slot::LOOKUP:
+ __ push(r0);
+ __ mov(r0, Operand(slot->var()->name()));
+ __ Push(cp, r0); // Context and name.
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ break;
+ }
+ __ bind(&skip);
+
+ } else if (var->mode() != Variable::CONST) {
+ // Perform the assignment for non-const variables. Const assignments
+ // are simply skipped.
Slot* slot = var->AsSlot();
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
- if (op == Token::INIT_CONST) {
- // Detect const reinitialization by checking for the hole value.
- __ ldr(r1, MemOperand(fp, SlotOffset(slot)));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &done);
- }
// Perform the assignment.
__ str(result_register(), MemOperand(fp, SlotOffset(slot)));
break;
case Slot::CONTEXT: {
MemOperand target = EmitSlotSearch(slot, r1);
- if (op == Token::INIT_CONST) {
- // Detect const reinitialization by checking for the hole value.
- __ ldr(r2, target);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r2, ip);
- __ b(ne, &done);
- }
// Perform the assignment and issue the write barrier.
__ str(result_register(), target);
// RecordWrite may destroy all its register arguments.
@@ -2042,20 +2073,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
case Slot::LOOKUP:
- // Call the runtime for the assignment. The runtime will ignore
- // const reinitialization.
+ // Call the runtime for the assignment.
__ push(r0); // Value.
__ mov(r0, Operand(slot->var()->name()));
__ Push(cp, r0); // Context and name.
- if (op == Token::INIT_CONST) {
- // The runtime will ignore const redeclaration.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- }
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
break;
}
- __ bind(&done);
}
}
@@ -3373,8 +3397,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
} else if (var != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
- // so this code can only be reached in non-strict mode.
- ASSERT(strict_mode_flag() == kNonStrictMode);
+ // but "delete this" is.
+ ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
if (var->is_global()) {
__ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
@@ -3414,17 +3438,23 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
-
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+
+ // Notice that the labels are swapped.
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
+ if (context()->IsTest()) ForwardBailoutToChild(expr);
+ VisitForControl(expr->expression(), if_true, if_false, fall_through);
+ context()->Plug(if_false, if_true); // Labels swapped.
+ }
break;
}
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 903f77bbf0..d3c9fee8e3 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -346,7 +346,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-void LStoreNamed::PrintDataTo(StringStream* stream) {
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
@@ -355,7 +355,25 @@ void LStoreNamed::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
@@ -1204,8 +1222,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathSqrt:
return DefineSameAsFirst(result);
case kMathRound:
- Abort("MathRound LUnaryMathOperation not implemented");
- return NULL;
+ return AssignEnvironment(DefineAsRegister(result));
case kMathPowHalf:
Abort("MathPowHalf LUnaryMathOperation not implemented");
return NULL;
@@ -1418,8 +1435,19 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- Abort("LPower instruction not implemented on ARM");
- return NULL;
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double power. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ Representation exponent_type = instr->right()->representation();
+ ASSERT(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), d1);
+ LOperand* right = exponent_type.IsDouble() ?
+ UseFixedDouble(instr->right(), d2) :
+ UseFixed(instr->right(), r0);
+ LPower* result = new LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, d3),
+ instr,
+ CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1709,11 +1737,13 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context = UseTempRegister(instr->context());
+ LOperand* context;
LOperand* value;
if (instr->NeedsWriteBarrier()) {
+ context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value());
} else {
+ context = UseRegister(instr->context());
value = UseRegister(instr->value());
}
return new LStoreContextSlot(context, value);
@@ -1806,6 +1836,13 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
}
+LInstruction* LChunkBuilder::DoStorePixelArrayElement(
+ HStorePixelArrayElement* instr) {
+ Abort("DoStorePixelArrayElement not implemented");
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* obj = UseFixed(instr->object(), r2);
LOperand* key = UseFixed(instr->key(), r1);
@@ -1911,8 +1948,10 @@ LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object (we bail out in all other
- // cases).
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
return NULL;
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 57338f16d5..77d6b71a93 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -42,8 +42,6 @@ class LCodeGen;
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Call) \
- V(StoreKeyed) \
- V(StoreNamed) \
LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
@@ -135,6 +133,7 @@ class LCodeGen;
V(OuterContext) \
V(Parameter) \
V(PixelArrayLength) \
+ V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@@ -1058,6 +1057,18 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
+class LPower: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1510,32 +1521,22 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamed: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
public:
- LStoreNamed(LOperand* obj, LOperand* val) {
+ LStoreNamedField(LOperand* obj, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = val;
}
- DECLARE_INSTRUCTION(StoreNamed)
- DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LStoreNamedField: public LStoreNamed {
- public:
- LStoreNamedField(LOperand* obj, LOperand* val)
- : LStoreNamed(obj, val) { }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+ Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
@@ -1543,25 +1544,35 @@ class LStoreNamedField: public LStoreNamed {
};
-class LStoreNamedGeneric: public LStoreNamed {
+class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
public:
- LStoreNamedGeneric(LOperand* obj, LOperand* val)
- : LStoreNamed(obj, val) { }
+ LStoreNamedGeneric(LOperand* obj, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = val;
+ }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
}
- DECLARE_INSTRUCTION(StoreKeyed)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+ "store-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
virtual void PrintDataTo(StringStream* stream);
@@ -1571,23 +1582,21 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedFastElement: public LStoreKeyed {
+class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val)
- : LStoreKeyed(obj, key, val) {}
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-};
+ LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
-class LStoreKeyedGeneric: public LStoreKeyed {
- public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val)
- : LStoreKeyed(obj, key, val) { }
+ virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
};
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 1bfb3ad943..ca64442270 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "arm/lithium-codegen-arm.h"
+#include "arm/lithium-gap-resolver-arm.h"
#include "code-stubs.h"
#include "stub-cache.h"
@@ -54,157 +55,6 @@ class SafepointGenerator : public PostCallGenerator {
};
-class LGapNode: public ZoneObject {
- public:
- explicit LGapNode(LOperand* operand)
- : operand_(operand), resolved_(false), visited_id_(-1) { }
-
- LOperand* operand() const { return operand_; }
- bool IsResolved() const { return !IsAssigned() || resolved_; }
- void MarkResolved() {
- ASSERT(!IsResolved());
- resolved_ = true;
- }
- int visited_id() const { return visited_id_; }
- void set_visited_id(int id) {
- ASSERT(id > visited_id_);
- visited_id_ = id;
- }
-
- bool IsAssigned() const { return assigned_from_.is_set(); }
- LGapNode* assigned_from() const { return assigned_from_.get(); }
- void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
-
- private:
- LOperand* operand_;
- SetOncePointer<LGapNode> assigned_from_;
- bool resolved_;
- int visited_id_;
-};
-
-
-LGapResolver::LGapResolver()
- : nodes_(32),
- identified_cycles_(4),
- result_(16),
- next_visited_id_(0) {
-}
-
-
-const ZoneList<LMoveOperands>* LGapResolver::Resolve(
- const ZoneList<LMoveOperands>* moves,
- LOperand* marker_operand) {
- nodes_.Rewind(0);
- identified_cycles_.Rewind(0);
- result_.Rewind(0);
- next_visited_id_ = 0;
-
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) RegisterMove(move);
- }
-
- for (int i = 0; i < identified_cycles_.length(); ++i) {
- ResolveCycle(identified_cycles_[i], marker_operand);
- }
-
- int unresolved_nodes;
- do {
- unresolved_nodes = 0;
- for (int j = 0; j < nodes_.length(); j++) {
- LGapNode* node = nodes_[j];
- if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
- AddResultMove(node->assigned_from(), node);
- node->MarkResolved();
- }
- if (!node->IsResolved()) ++unresolved_nodes;
- }
- } while (unresolved_nodes > 0);
- return &result_;
-}
-
-
-void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
- AddResultMove(from->operand(), to->operand());
-}
-
-
-void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
- result_.Add(LMoveOperands(from, to));
-}
-
-
-void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
- ZoneList<LOperand*> cycle_operands(8);
- cycle_operands.Add(marker_operand);
- LGapNode* cur = start;
- do {
- cur->MarkResolved();
- cycle_operands.Add(cur->operand());
- cur = cur->assigned_from();
- } while (cur != start);
- cycle_operands.Add(marker_operand);
-
- for (int i = cycle_operands.length() - 1; i > 0; --i) {
- LOperand* from = cycle_operands[i];
- LOperand* to = cycle_operands[i - 1];
- AddResultMove(from, to);
- }
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
- ASSERT(a != b);
- LGapNode* cur = a;
- while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
- cur->set_visited_id(visited_id);
- cur = cur->assigned_from();
- }
-
- return cur == b;
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
- ASSERT(a != b);
- return CanReach(a, b, next_visited_id_++);
-}
-
-
-void LGapResolver::RegisterMove(LMoveOperands move) {
- if (move.source()->IsConstantOperand()) {
- // Constant moves should be last in the machine code. Therefore add them
- // first to the result set.
- AddResultMove(move.source(), move.destination());
- } else {
- LGapNode* from = LookupNode(move.source());
- LGapNode* to = LookupNode(move.destination());
- if (to->IsAssigned() && to->assigned_from() == from) {
- move.Eliminate();
- return;
- }
- ASSERT(!to->IsAssigned());
- if (CanReach(from, to)) {
- // This introduces a cycle. Save.
- identified_cycles_.Add(from);
- }
- to->set_assigned_from(from);
- }
-}
-
-
-LGapNode* LGapResolver::LookupNode(LOperand* operand) {
- for (int i = 0; i < nodes_.length(); ++i) {
- if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
- }
-
- // No node found => create a new one.
- LGapNode* result = new LGapNode(operand);
- nodes_.Add(result);
- return result;
-}
-
-
#define __ masm()->
bool LCodeGen::GenerateCode() {
@@ -294,6 +144,44 @@ bool LCodeGen::GeneratePrologue() {
}
}
+ // Possibly allocate a local context.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ // Argument to NewContext is the function, which is in r1.
+ __ push(r1);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+ // Context is returned in both r0 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = scope()->parameter(i)->AsSlot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ ldr(r0, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ __ mov(r1, Operand(Context::SlotOffset(slot->index())));
+ __ str(r0, MemOperand(cp, r1));
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have to use two more registers to avoid
+ // clobbering cp.
+ __ mov(r2, Operand(cp));
+ __ RecordWrite(r2, Operand(r1), r3, r0);
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
// Trace the call.
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
@@ -464,7 +352,6 @@ Operand LCodeGen::ToOperand(LOperand* op) {
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
- // TODO(regis): Revisit.
ASSERT(!op->IsRegister());
ASSERT(!op->IsDoubleRegister());
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
@@ -480,6 +367,21 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
}
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+ ASSERT(op->IsDoubleStackSlot());
+ int index = op->index();
+ if (index >= 0) {
+ // Local or spill slot. Skip the frame pointer, function, context,
+ // and the first word of the double in the fixed part of the frame.
+ return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
+ } else {
+ // Incoming parameter. Skip the return address and the first word of
+ // the double.
+ return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
+ }
+}
+
+
void LCodeGen::WriteTranslation(LEnvironment* environment,
Translation* translation) {
if (environment == NULL) return;
@@ -751,6 +653,12 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
}
+void LCodeGen::RecordSafepoint(int deoptimization_index) {
+ LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ RecordSafepoint(&empty_pointers, deoptimization_index);
+}
+
+
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
@@ -787,116 +695,7 @@ void LCodeGen::DoLabel(LLabel* label) {
void LCodeGen::DoParallelMove(LParallelMove* move) {
- // d0 must always be a scratch register.
- DoubleRegister dbl_scratch = d0;
- LUnallocated marker_operand(LUnallocated::NONE);
-
- Register core_scratch = scratch0();
- bool destroys_core_scratch = false;
-
- const ZoneList<LMoveOperands>* moves =
- resolver_.Resolve(move->move_operands(), &marker_operand);
- for (int i = moves->length() - 1; i >= 0; --i) {
- LMoveOperands move = moves->at(i);
- LOperand* from = move.source();
- LOperand* to = move.destination();
- ASSERT(!from->IsDoubleRegister() ||
- !ToDoubleRegister(from).is(dbl_scratch));
- ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch));
- ASSERT(!from->IsRegister() || !ToRegister(from).is(core_scratch));
- ASSERT(!to->IsRegister() || !ToRegister(to).is(core_scratch));
- if (from == &marker_operand) {
- if (to->IsRegister()) {
- __ mov(ToRegister(to), core_scratch);
- ASSERT(destroys_core_scratch);
- } else if (to->IsStackSlot()) {
- __ str(core_scratch, ToMemOperand(to));
- ASSERT(destroys_core_scratch);
- } else if (to->IsDoubleRegister()) {
- __ vmov(ToDoubleRegister(to), dbl_scratch);
- } else {
- ASSERT(to->IsDoubleStackSlot());
- // TODO(regis): Why is vstr not taking a MemOperand?
- // __ vstr(dbl_scratch, ToMemOperand(to));
- MemOperand to_operand = ToMemOperand(to);
- __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
- }
- } else if (to == &marker_operand) {
- if (from->IsRegister() || from->IsConstantOperand()) {
- __ mov(core_scratch, ToOperand(from));
- destroys_core_scratch = true;
- } else if (from->IsStackSlot()) {
- __ ldr(core_scratch, ToMemOperand(from));
- destroys_core_scratch = true;
- } else if (from->IsDoubleRegister()) {
- __ vmov(dbl_scratch, ToDoubleRegister(from));
- } else {
- ASSERT(from->IsDoubleStackSlot());
- // TODO(regis): Why is vldr not taking a MemOperand?
- // __ vldr(dbl_scratch, ToMemOperand(from));
- MemOperand from_operand = ToMemOperand(from);
- __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
- }
- } else if (from->IsConstantOperand()) {
- if (to->IsRegister()) {
- __ mov(ToRegister(to), ToOperand(from));
- } else {
- ASSERT(to->IsStackSlot());
- __ mov(ip, ToOperand(from));
- __ str(ip, ToMemOperand(to));
- }
- } else if (from->IsRegister()) {
- if (to->IsRegister()) {
- __ mov(ToRegister(to), ToOperand(from));
- } else {
- ASSERT(to->IsStackSlot());
- __ str(ToRegister(from), ToMemOperand(to));
- }
- } else if (to->IsRegister()) {
- ASSERT(from->IsStackSlot());
- __ ldr(ToRegister(to), ToMemOperand(from));
- } else if (from->IsStackSlot()) {
- ASSERT(to->IsStackSlot());
- __ ldr(ip, ToMemOperand(from));
- __ str(ip, ToMemOperand(to));
- } else if (from->IsDoubleRegister()) {
- if (to->IsDoubleRegister()) {
- __ vmov(ToDoubleRegister(to), ToDoubleRegister(from));
- } else {
- ASSERT(to->IsDoubleStackSlot());
- // TODO(regis): Why is vstr not taking a MemOperand?
- // __ vstr(dbl_scratch, ToMemOperand(to));
- MemOperand to_operand = ToMemOperand(to);
- __ vstr(ToDoubleRegister(from), to_operand.rn(), to_operand.offset());
- }
- } else if (to->IsDoubleRegister()) {
- ASSERT(from->IsDoubleStackSlot());
- // TODO(regis): Why is vldr not taking a MemOperand?
- // __ vldr(ToDoubleRegister(to), ToMemOperand(from));
- MemOperand from_operand = ToMemOperand(from);
- __ vldr(ToDoubleRegister(to), from_operand.rn(), from_operand.offset());
- } else {
- ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
- // TODO(regis): Why is vldr not taking a MemOperand?
- // __ vldr(dbl_scratch, ToMemOperand(from));
- MemOperand from_operand = ToMemOperand(from);
- __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
- // TODO(regis): Why is vstr not taking a MemOperand?
- // __ vstr(dbl_scratch, ToMemOperand(to));
- MemOperand to_operand = ToMemOperand(to);
- __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
- }
- }
-
- if (destroys_core_scratch) {
- __ ldr(core_scratch, MemOperand(fp, -kPointerSize));
- }
-
- LInstruction* next = GetNextInstruction();
- if (next != NULL && next->IsLazyBailout()) {
- int pc = masm()->pc_offset();
- safepoints_.SetPcAfterGap(pc);
- }
+ resolver_.Resolve(move);
}
@@ -987,7 +786,7 @@ void LCodeGen::DoModI(LModI* instr) {
DeferredModI(LCodeGen* codegen, LModI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
- codegen()->DoDeferredGenericBinaryStub(instr_, Token::MOD);
+ codegen()->DoDeferredBinaryOpStub(instr_, Token::MOD);
}
private:
LModI* instr_;
@@ -1016,7 +815,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&ok);
}
- // Try a few common cases before using the generic stub.
+ // Try a few common cases before using the stub.
Label call_stub;
const int kUnfolds = 3;
// Skip if either side is negative.
@@ -1044,7 +843,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ and_(result, scratch, Operand(left));
__ bind(&call_stub);
- // Call the generic stub. The numbers in r0 and r1 have
+ // Call the stub. The numbers in r0 and r1 have
// to be tagged to Smis. If that is not possible, deoptimize.
DeferredModI* deferred = new DeferredModI(this, instr);
__ TrySmiTag(left, &deoptimize, scratch);
@@ -1070,7 +869,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
DeferredDivI(LCodeGen* codegen, LDivI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
- codegen()->DoDeferredGenericBinaryStub(instr_, Token::DIV);
+ codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
}
private:
LDivI* instr_;
@@ -1123,7 +922,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
__ b(eq, &done);
- // Call the generic stub. The numbers in r0 and r1 have
+ // Call the stub. The numbers in r0 and r1 have
// to be tagged to Smis. If that is not possible, deoptimize.
DeferredDivI* deferred = new DeferredDivI(this, instr);
@@ -1145,19 +944,33 @@ void LCodeGen::DoDivI(LDivI* instr) {
template<int T>
-void LCodeGen::DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr,
- Token::Value op) {
+void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
+ Token::Value op) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
__ PushSafepointRegistersAndDoubles();
- GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right);
+ // Move left to r1 and right to r0 for the stub call.
+ if (left.is(r1)) {
+ __ Move(r0, right);
+ } else if (left.is(r0) && right.is(r1)) {
+ __ Swap(r0, r1, r2);
+ } else if (left.is(r0)) {
+ ASSERT(!right.is(r1));
+ __ mov(r1, r0);
+ __ mov(r0, right);
+ } else {
+ ASSERT(!left.is(r0) && !right.is(r0));
+ __ mov(r0, right);
+ __ mov(r1, left);
+ }
+ TypeRecordingBinaryOpStub stub(op, OVERWRITE_LEFT);
__ CallStub(&stub);
RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
0,
Safepoint::kNoDeoptimizationIndex);
// Overwrite the stored value of r0 with the result of the stub.
- __ StoreToSafepointRegistersAndDoublesSlot(r0);
+ __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
__ PopSafepointRegistersAndDoubles();
}
@@ -1413,7 +1226,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ vmov(r2, r3, right);
__ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
// Move the result in the double result register.
- __ vmov(ToDoubleRegister(instr->result()), r0, r1);
+ __ GetCFunctionDoubleResult(ToDoubleRegister(instr->result()));
// Restore r0-r3.
__ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
@@ -1431,10 +1244,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->InputAt(1)).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
- // TODO(regis): Implement TypeRecordingBinaryOpStub and replace current
- // GenericBinaryOpStub:
- // TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
- GenericBinaryOpStub stub(instr->op(), NO_OVERWRITE, r1, r0);
+ TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -2174,7 +1984,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ bind(&before_push_delta);
__ BlockConstPoolFor(kAdditionalDelta);
__ mov(temp, Operand(delta * kPointerSize));
- __ StoreToSafepointRegisterSlot(temp);
+ __ StoreToSafepointRegisterSlot(temp, temp);
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
ASSERT_EQ(kAdditionalDelta,
masm_->InstructionsGeneratedSince(&before_push_delta));
@@ -2182,7 +1992,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
// Put the result value into the result register slot and
// restore all registers.
- __ StoreToSafepointRegisterSlot(result);
+ __ StoreToSafepointRegisterSlot(result, result);
__ PopSafepointRegisters();
}
@@ -2302,17 +2112,13 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ ldr(result,
- MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- __ ldr(result, ContextOperand(result, instr->slot_index()));
+ __ ldr(result, ContextOperand(context, instr->slot_index()));
}
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
- __ ldr(context,
- MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
__ str(value, ContextOperand(context, instr->slot_index()));
if (instr->needs_write_barrier()) {
int offset = Context::SlotOffset(instr->slot_index());
@@ -2715,7 +2521,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
// Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input);
+ __ LoadFromSafepointRegisterSlot(input, input);
__ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
__ bind(&allocated);
@@ -2726,7 +2532,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
__ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
__ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
- __ str(tmp1, masm()->SafepointRegisterSlot(input));
+ __ StoreToSafepointRegisterSlot(tmp1, input);
__ PopSafepointRegisters();
__ bind(&done);
@@ -2843,6 +2649,30 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
}
+void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Register scratch1 = scratch0();
+ Register scratch2 = result;
+ EmitVFPTruncate(kRoundToNearest,
+ double_scratch0().low(),
+ input,
+ scratch1,
+ scratch2);
+ DeoptimizeIf(ne, instr->environment());
+ __ vmov(result, double_scratch0().low());
+
+ // Test for -0.
+ Label done;
+ __ cmp(result, Operand(0));
+ __ b(ne, &done);
+ __ vmov(scratch1, input.high());
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment());
+ __ bind(&done);
+}
+
+
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input));
@@ -2850,6 +2680,64 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
}
+void LCodeGen::DoPower(LPower* instr) {
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ Register scratch = scratch0();
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+ if (exponent_type.IsDouble()) {
+ // Prepare arguments and call C function.
+ __ PrepareCallCFunction(4, scratch);
+ __ vmov(r0, r1, ToDoubleRegister(left));
+ __ vmov(r2, r3, ToDoubleRegister(right));
+ __ CallCFunction(ExternalReference::power_double_double_function(), 4);
+ } else if (exponent_type.IsInteger32()) {
+ ASSERT(ToRegister(right).is(r0));
+ // Prepare arguments and call C function.
+ __ PrepareCallCFunction(4, scratch);
+ __ mov(r2, ToRegister(right));
+ __ vmov(r0, r1, ToDoubleRegister(left));
+ __ CallCFunction(ExternalReference::power_double_int_function(), 4);
+ } else {
+ ASSERT(exponent_type.IsTagged());
+ ASSERT(instr->hydrogen()->left()->representation().IsDouble());
+
+ Register right_reg = ToRegister(right);
+
+ // Check for smi on the right hand side.
+ Label non_smi, call;
+ __ JumpIfNotSmi(right_reg, &non_smi);
+
+ // Untag smi and convert it to a double.
+ __ SmiUntag(right_reg);
+ SwVfpRegister single_scratch = double_scratch0().low();
+ __ vmov(single_scratch, right_reg);
+ __ vcvt_f64_s32(result_reg, single_scratch);
+ __ jmp(&call);
+
+ // Heap number map check.
+ __ bind(&non_smi);
+ __ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch, Operand(ip));
+ DeoptimizeIf(ne, instr->environment());
+ int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag;
+ __ add(scratch, right_reg, Operand(value_offset));
+ __ vldr(result_reg, scratch, 0);
+
+ // Prepare arguments and call C function.
+ __ bind(&call);
+ __ PrepareCallCFunction(4, scratch);
+ __ vmov(r0, r1, ToDoubleRegister(left));
+ __ vmov(r2, r3, result_reg);
+ __ CallCFunction(ExternalReference::power_double_double_function(), 4);
+ }
+ // Store the result in the result register.
+ __ GetCFunctionDoubleResult(result_reg);
+}
+
+
void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
switch (instr->op()) {
case kMathAbs:
@@ -2858,6 +2746,9 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
case kMathFloor:
DoMathFloor(instr);
break;
+ case kMathRound:
+ DoMathRound(instr);
+ break;
case kMathSqrt:
DoMathSqrt(instr);
break;
@@ -3157,8 +3048,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ AbortIfNotSmi(r0);
}
__ SmiUntag(r0);
- MemOperand result_stack_slot = masm()->SafepointRegisterSlot(result);
- __ str(r0, result_stack_slot);
+ __ StoreToSafepointRegisterSlot(r0, result);
__ PopSafepointRegisters();
}
@@ -3239,9 +3129,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// register is stored, as this register is in the pointer map, but contains an
// integer value.
__ mov(ip, Operand(0));
- int reg_stack_index = __ SafepointRegisterStackIndex(reg.code());
- __ str(ip, MemOperand(sp, reg_stack_index * kPointerSize));
-
+ __ StoreToSafepointRegisterSlot(ip, reg);
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
@@ -3252,7 +3140,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
__ bind(&done);
__ sub(ip, reg, Operand(kHeapObjectTag));
__ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
- __ str(reg, MemOperand(sp, reg_stack_index * kPointerSize));
+ __ StoreToSafepointRegisterSlot(reg, reg);
__ PopSafepointRegisters();
}
@@ -3297,8 +3185,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- int reg_stack_index = __ SafepointRegisterStackIndex(reg.code());
- __ str(r0, MemOperand(sp, reg_stack_index * kPointerSize));
+ __ StoreToSafepointRegisterSlot(r0, reg);
__ PopSafepointRegisters();
}
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 732db44517..2d9c6edcb6 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -29,7 +29,7 @@
#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
#include "arm/lithium-arm.h"
-
+#include "arm/lithium-gap-resolver-arm.h"
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
@@ -39,31 +39,8 @@ namespace internal {
// Forward declarations.
class LDeferredCode;
-class LGapNode;
class SafepointGenerator;
-class LGapResolver BASE_EMBEDDED {
- public:
- LGapResolver();
- const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
- LOperand* marker_operand);
-
- private:
- LGapNode* LookupNode(LOperand* operand);
- bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
- bool CanReach(LGapNode* a, LGapNode* b);
- void RegisterMove(LMoveOperands move);
- void AddResultMove(LOperand* from, LOperand* to);
- void AddResultMove(LGapNode* from, LGapNode* to);
- void ResolveCycle(LGapNode* start, LOperand* marker_operand);
-
- ZoneList<LGapNode*> nodes_;
- ZoneList<LGapNode*> identified_cycles_;
- ZoneList<LMoveOperands> result_;
- int next_visited_id_;
-};
-
-
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
@@ -79,10 +56,35 @@ class LCodeGen BASE_EMBEDDED {
scope_(chunk->graph()->info()->scope()),
status_(UNUSED),
deferred_(8),
- osr_pc_offset_(-1) {
+ osr_pc_offset_(-1),
+ resolver_(this) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
+
+ // Simple accessors.
+ MacroAssembler* masm() const { return masm_; }
+
+ // Support for converting LOperands to assembler types.
+ // LOperand must be a register.
+ Register ToRegister(LOperand* op) const;
+
+ // LOperand is loaded into scratch, unless already a register.
+ Register EmitLoadRegister(LOperand* op, Register scratch);
+
+ // LOperand must be a double register.
+ DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+ // LOperand is loaded into dbl_scratch, unless already a double register.
+ DoubleRegister EmitLoadDoubleRegister(LOperand* op,
+ SwVfpRegister flt_scratch,
+ DoubleRegister dbl_scratch);
+ int ToInteger32(LConstantOperand* op) const;
+ Operand ToOperand(LOperand* op);
+ MemOperand ToMemOperand(LOperand* op) const;
+ // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+ MemOperand ToHighMemOperand(LOperand* op) const;
+
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
@@ -94,8 +96,8 @@ class LCodeGen BASE_EMBEDDED {
// Deferred code support.
template<int T>
- void DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr,
- Token::Value op);
+ void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
+ Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
@@ -136,7 +138,6 @@ class LCodeGen BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
- MacroAssembler* masm() const { return masm_; }
Register scratch0() { return r9; }
DwVfpRegister double_scratch0() { return d0; }
@@ -202,24 +203,6 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
- // LOperand must be a register.
- Register ToRegister(LOperand* op) const;
-
- // LOperand is loaded into scratch, unless already a register.
- Register EmitLoadRegister(LOperand* op, Register scratch);
-
- // LOperand must be a double register.
- DoubleRegister ToDoubleRegister(LOperand* op) const;
-
- // LOperand is loaded into dbl_scratch, unless already a double register.
- DoubleRegister EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DoubleRegister dbl_scratch);
-
- int ToInteger32(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op);
- MemOperand ToMemOperand(LOperand* op) const;
-
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
@@ -229,6 +212,7 @@ class LCodeGen BASE_EMBEDDED {
Register scratch1,
Register scratch2);
void DoMathFloor(LUnaryMathOperation* instr);
+ void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
@@ -237,6 +221,7 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
+ void RecordSafepoint(int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index);
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
new file mode 100644
index 0000000000..1a2326b748
--- /dev/null
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -0,0 +1,303 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "arm/lithium-gap-resolver-arm.h"
+#include "arm/lithium-codegen-arm.h"
+
+namespace v8 {
+namespace internal {
+
+static const Register kSavedValueRegister = { 9 };
+static const DoubleRegister kSavedDoubleValueRegister = { 0 };
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false),
+ saved_destination_(NULL) { }
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(moves_.is_empty());
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ root_index_ = i; // Any cycle is found when by reaching this move again.
+ PerformMove(i);
+ if (in_cycle_) {
+ RestoreValue();
+ }
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated()) {
+ ASSERT(moves_[i].source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move);
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph.
+
+ // We can only find a cycle, when doing a depth-first traversal of moves,
+ // be encountering the starting move again. So by spilling the source of
+ // the starting move, we break the cycle. All moves are then unblocked,
+ // and the starting move is completed by writing the spilled value to
+ // its destination. All other moves from the spilled source have been
+ // completed prior to breaking the cycle.
+ // An additional complication is that moves to MemOperands with large
+ // offsets (more than 1K or 4K) require us to spill this spilled value to
+ // the stack, to free up the register.
+ ASSERT(!moves_[index].IsPending());
+ ASSERT(!moves_[index].IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack allocated local. Multiple moves can
+ // be pending because this function is recursive.
+ ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
+ LOperand* destination = moves_[index].destination();
+ moves_[index].set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ PerformMove(i);
+ // If there is a blocking, pending move it must be moves_[root_index_]
+ // and all other moves with the same source as moves_[root_index_] are
+ // sucessfully executed (because they are cycle-free) by this loop.
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ moves_[index].set_destination(destination);
+
+ // The move may be blocked on a pending move, which must be the starting move.
+ // In this case, we have a cycle, and we save the source of this move to
+ // a scratch register to break it.
+ LMoveOperands other_move = moves_[root_index_];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ BreakCycle(index);
+ return;
+ }
+
+ // This move is no longer blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+ // We save in a register the value that should end up in the source of
+ // moves_[root_index]. After performing all moves in the tree rooted
+ // in that move, we save the value to that source.
+ ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
+ ASSERT(!in_cycle_);
+ in_cycle_ = true;
+ LOperand* source = moves_[index].source();
+ saved_destination_ = moves_[index].destination();
+ if (source->IsRegister()) {
+ __ mov(kSavedValueRegister, cgen_->ToRegister(source));
+ } else if (source->IsStackSlot()) {
+ __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
+ } else if (source->IsDoubleRegister()) {
+ __ vmov(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source));
+ } else if (source->IsDoubleStackSlot()) {
+ __ vldr(kSavedDoubleValueRegister, cgen_->ToMemOperand(source));
+ } else {
+ UNREACHABLE();
+ }
+ // This move will be done by restoring the saved value to the destination.
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+ ASSERT(in_cycle_);
+ ASSERT(saved_destination_ != NULL);
+
+ // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
+ if (saved_destination_->IsRegister()) {
+ __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+ } else if (saved_destination_->IsStackSlot()) {
+ __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+ } else if (saved_destination_->IsDoubleRegister()) {
+ __ vmov(cgen_->ToDoubleRegister(saved_destination_),
+ kSavedDoubleValueRegister);
+ } else if (saved_destination_->IsDoubleStackSlot()) {
+ __ vstr(kSavedDoubleValueRegister,
+ cgen_->ToMemOperand(saved_destination_));
+ } else {
+ UNREACHABLE();
+ }
+
+ in_cycle_ = false;
+ saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+
+ if (source->IsRegister()) {
+ Register source_register = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ __ mov(cgen_->ToRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ __ str(source_register, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ ldr(cgen_->ToRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+ if (!destination_operand.OffsetIsUint12Encodable()) {
+ // ip is overwritten while saving the value to the destination.
+ // Therefore we can't use ip. It is OK if the read from the source
+ // destroys ip, since that happens before the value is read.
+ __ vldr(kSavedDoubleValueRegister.low(), source_operand);
+ __ vstr(kSavedDoubleValueRegister.low(), destination_operand);
+ } else {
+ __ ldr(ip, source_operand);
+ __ str(ip, destination_operand);
+ }
+ } else {
+ __ ldr(kSavedValueRegister, source_operand);
+ __ str(kSavedValueRegister, destination_operand);
+ }
+ }
+
+ } else if (source->IsConstantOperand()) {
+ Operand source_operand = cgen_->ToOperand(source);
+ if (destination->IsRegister()) {
+ __ mov(cgen_->ToRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ __ mov(kSavedValueRegister, source_operand);
+ __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ vmov(cgen_->ToDoubleRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ __ vstr(source_register, destination_operand);
+ }
+
+ } else if (source->IsDoubleStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+ // kSavedDoubleValueRegister was used to break the cycle,
+ // but kSavedValueRegister is free.
+ MemOperand source_high_operand =
+ cgen_->ToHighMemOperand(source);
+ MemOperand destination_high_operand =
+ cgen_->ToHighMemOperand(destination);
+ __ ldr(kSavedValueRegister, source_operand);
+ __ str(kSavedValueRegister, destination_operand);
+ __ ldr(kSavedValueRegister, source_high_operand);
+ __ str(kSavedValueRegister, destination_high_operand);
+ } else {
+ __ vldr(kSavedDoubleValueRegister, source_operand);
+ __ vstr(kSavedDoubleValueRegister, destination_operand);
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+
+ moves_[index].Eliminate();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.h b/deps/v8/src/arm/lithium-gap-resolver-arm.h
new file mode 100644
index 0000000000..334d2920b6
--- /dev/null
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.h
@@ -0,0 +1,84 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
+#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // If a cycle is found in the series of moves, save the blocking value to
+ // a scratch register. The cycle must be found by hitting the root of the
+ // depth-first search.
+ void BreakCycle(int index);
+
+ // After a cycle has been resolved, restore the value from the scratch
+ // register to its proper destination.
+ void RestoreValue();
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ int root_index_;
+ bool in_cycle_;
+ LOperand* saved_destination_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index eb850cd948..65c92f9e13 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -485,18 +485,19 @@ void MacroAssembler::PopSafepointRegistersAndDoubles() {
PopSafepointRegisters();
}
-void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register reg) {
- str(reg, SafepointRegistersAndDoublesSlot(reg));
+void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
+ Register dst) {
+ str(src, SafepointRegistersAndDoublesSlot(dst));
}
-void MacroAssembler::StoreToSafepointRegisterSlot(Register reg) {
- str(reg, SafepointRegisterSlot(reg));
+void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
+ str(src, SafepointRegisterSlot(dst));
}
-void MacroAssembler::LoadFromSafepointRegisterSlot(Register reg) {
- ldr(reg, SafepointRegisterSlot(reg));
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ ldr(dst, SafepointRegisterSlot(src));
}
@@ -745,6 +746,14 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
}
+void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+#if !defined(USE_ARM_EABI)
+ UNREACHABLE();
+#else
+ vmov(dst, r0, r1);
+#endif
+}
+
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
@@ -2154,11 +2163,22 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
}
- // The context may be an intermediate context, not a function context.
- ldr(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- } else { // Slot is in the current function context.
- // The context may be an intermediate context, not a function context.
- ldr(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in esi).
+ mov(dst, cp);
+ }
+
+ // We should not have found a 'with' context by walking the context chain
+ // (i.e., the static scope chain and runtime context chain do not agree).
+ // A variable occurring in such a scope should have slot type LOOKUP and
+ // not CONTEXT.
+ if (FLAG_debug_code) {
+ ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ cmp(dst, ip);
+ Check(eq, "Yo dawg, I heard you liked function contexts "
+ "so I put function contexts in all your contexts");
}
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 354662da32..83c59a6f65 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -240,12 +240,13 @@ class MacroAssembler: public Assembler {
void PopSafepointRegisters();
void PushSafepointRegistersAndDoubles();
void PopSafepointRegistersAndDoubles();
- void StoreToSafepointRegisterSlot(Register reg);
- void StoreToSafepointRegistersAndDoublesSlot(Register reg);
- void LoadFromSafepointRegisterSlot(Register reg);
- static int SafepointRegisterStackIndex(int reg_code);
- static MemOperand SafepointRegisterSlot(Register reg);
- static MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+ // Store value in register src in the safepoint stack slot for
+ // register dst.
+ void StoreToSafepointRegisterSlot(Register src, Register dst);
+ void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
+ // Load the value of the src register from its safepoint stack slot
+ // into register dst.
+ void LoadFromSafepointRegisterSlot(Register dst, Register src);
// Load two consecutive registers with two consecutive memory locations.
void Ldrd(Register dst1,
@@ -683,6 +684,8 @@ class MacroAssembler: public Assembler {
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
+ void GetCFunctionDoubleResult(const DoubleRegister dst);
+
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context.
// stack_space - space to be unwound on exit (includes the call js
@@ -883,10 +886,19 @@ class MacroAssembler: public Assembler {
Register scratch1,
Register scratch2);
+ // Compute memory operands for safepoint stack slots.
+ static int SafepointRegisterStackIndex(int reg_code);
+ MemOperand SafepointRegisterSlot(Register reg);
+ MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+
bool generating_stub_;
bool allow_stub_calls_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
+
+ // Needs access to SafepointRegisterStackIndex for optimized frame
+ // traversal.
+ friend class OptimizedFrame;
};
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 675fdf49b2..e623ea1914 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -2332,8 +2332,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
- if (!function->IsBuiltin()) {
- // Calling non-builtins with a value as receiver requires boxing.
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
__ jmp(&miss);
} else {
// Check that the object is a two-byte string or a symbol.
@@ -2348,8 +2349,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case NUMBER_CHECK: {
- if (!function->IsBuiltin()) {
- // Calling non-builtins with a value as receiver requires boxing.
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
__ jmp(&miss);
} else {
Label fast;
@@ -2369,8 +2371,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
}
case BOOLEAN_CHECK: {
- if (!function->IsBuiltin()) {
- // Calling non-builtins with a value as receiver requires boxing.
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
__ jmp(&miss);
} else {
Label fast;