summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src')
-rwxr-xr-xdeps/v8/src/SConscript4
-rw-r--r--deps/v8/src/accessors.cc16
-rw-r--r--deps/v8/src/arm/assembler-arm.cc110
-rw-r--r--deps/v8/src/arm/assembler-arm.h28
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc145
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h26
-rw-r--r--deps/v8/src/arm/codegen-arm.cc4
-rw-r--r--deps/v8/src/arm/cpu-arm.cc5
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc14
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc192
-rw-r--r--deps/v8/src/arm/lithium-arm.cc57
-rw-r--r--deps/v8/src/arm/lithium-arm.h81
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc473
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h77
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.cc303
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.h84
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc42
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h24
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc15
-rw-r--r--deps/v8/src/assembler.cc8
-rw-r--r--deps/v8/src/assembler.h8
-rwxr-xr-xdeps/v8/src/compiler.cc72
-rw-r--r--deps/v8/src/compiler.h5
-rw-r--r--deps/v8/src/cpu-profiler-inl.h7
-rw-r--r--deps/v8/src/cpu-profiler.cc140
-rw-r--r--deps/v8/src/cpu-profiler.h48
-rw-r--r--deps/v8/src/execution.cc5
-rw-r--r--deps/v8/src/flag-definitions.h5
-rw-r--r--deps/v8/src/gdb-jit.cc18
-rw-r--r--deps/v8/src/handles.cc34
-rw-r--r--deps/v8/src/heap.cc198
-rw-r--r--deps/v8/src/heap.h67
-rw-r--r--deps/v8/src/hydrogen-instructions.cc210
-rw-r--r--deps/v8/src/hydrogen-instructions.h1020
-rw-r--r--deps/v8/src/hydrogen.cc952
-rw-r--r--deps/v8/src/hydrogen.h187
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h11
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc4
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc15
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc14
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc31
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc138
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h14
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc51
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h61
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc22
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h15
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc19
-rw-r--r--deps/v8/src/ic.cc93
-rw-r--r--deps/v8/src/ic.h2
-rw-r--r--deps/v8/src/lithium-allocator.cc20
-rw-r--r--deps/v8/src/lithium-allocator.h2
-rw-r--r--deps/v8/src/lithium.h8
-rw-r--r--deps/v8/src/log-utils.cc2
-rw-r--r--deps/v8/src/log.cc151
-rw-r--r--deps/v8/src/log.h26
-rw-r--r--deps/v8/src/mark-compact.cc12
-rw-r--r--deps/v8/src/objects.cc81
-rw-r--r--deps/v8/src/objects.h5
-rw-r--r--deps/v8/src/platform-cygwin.cc42
-rw-r--r--deps/v8/src/platform-freebsd.cc2
-rw-r--r--deps/v8/src/platform-linux.cc2
-rw-r--r--deps/v8/src/platform-macos.cc2
-rw-r--r--deps/v8/src/platform-openbsd.cc2
-rw-r--r--deps/v8/src/platform-solaris.cc2
-rw-r--r--deps/v8/src/platform-win32.cc2
-rw-r--r--deps/v8/src/platform.h14
-rw-r--r--deps/v8/src/profile-generator-inl.h11
-rw-r--r--deps/v8/src/profile-generator.cc78
-rw-r--r--deps/v8/src/profile-generator.h12
-rw-r--r--deps/v8/src/runtime.cc84
-rw-r--r--deps/v8/src/top.cc12
-rw-r--r--deps/v8/src/v8.cc5
-rw-r--r--deps/v8/src/v8natives.js4
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/x64/assembler-x64.cc14
-rw-r--r--deps/v8/src/x64/assembler-x64.h10
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc152
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h25
-rw-r--r--deps/v8/src/x64/codegen-x64.cc15
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc14
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc30
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc829
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h12
-rw-r--r--deps/v8/src/x64/lithium-x64.cc234
-rw-r--r--deps/v8/src/x64/lithium-x64.h156
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc67
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h53
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc15
89 files changed, 4714 insertions, 2669 deletions
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index 2e54295e8..34ca91ca6 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -31,7 +31,6 @@ root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.append(join(root_dir, 'tools'))
import js2c
Import('context')
-Import('tools')
SOURCES = {
@@ -154,6 +153,7 @@ SOURCES = {
arm/jump-target-arm.cc
arm/lithium-arm.cc
arm/lithium-codegen-arm.cc
+ arm/lithium-gap-resolver-arm.cc
arm/macro-assembler-arm.cc
arm/regexp-macro-assembler-arm.cc
arm/register-allocator-arm.cc
@@ -305,7 +305,7 @@ def Abort(message):
def ConfigureObjectFiles():
- env = Environment(tools=tools)
+ env = Environment()
env.Replace(**context.flags['v8'])
context.ApplyEnvOverrides(env)
env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index f6d1daf67..18264254b 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -446,8 +446,15 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
bool found_it = false;
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return Heap::undefined_value();
+ while (!function->should_have_prototype()) {
+ found_it = false;
+ function = FindInPrototypeChain<JSFunction>(object->GetPrototype(),
+ &found_it);
+ // There has to be one because we hit the getter.
+ ASSERT(found_it);
+ }
+
if (!function->has_prototype()) {
- if (!function->should_have_prototype()) return Heap::undefined_value();
Object* prototype;
{ MaybeObject* maybe_prototype = Heap::AllocateFunctionPrototype(function);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
@@ -467,6 +474,13 @@ MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
bool found_it = false;
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return Heap::undefined_value();
+ if (!function->should_have_prototype()) {
+ // Since we hit this accessor, object will have no prototype property.
+ return object->SetLocalPropertyIgnoreAttributes(Heap::prototype_symbol(),
+ value,
+ NONE);
+ }
+
if (function->has_initial_map()) {
// If the function has allocated the initial map
// replace it with a copy containing the new prototype.
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index fb9bb488c..c91d4ba2b 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -1848,11 +1848,31 @@ void Assembler::vldr(const DwVfpRegister dst,
offset = -offset;
u = 0;
}
- ASSERT(offset % 4 == 0);
- ASSERT((offset / 4) < 256);
+
ASSERT(offset >= 0);
- emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
- 0xB*B8 | ((offset / 4) & 255));
+ if ((offset % 4) == 0 && (offset / 4) < 256) {
+ emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
+ 0xB*B8 | ((offset / 4) & 255));
+ } else {
+ // Larger offsets must be handled by computing the correct address
+ // in the ip register.
+ ASSERT(!base.is(ip));
+ if (u == 1) {
+ add(ip, base, Operand(offset));
+ } else {
+ sub(ip, base, Operand(offset));
+ }
+ emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8);
+ }
+}
+
+
+void Assembler::vldr(const DwVfpRegister dst,
+ const MemOperand& operand,
+ const Condition cond) {
+ ASSERT(!operand.rm().is_valid());
+ ASSERT(operand.am_ == Offset);
+ vldr(dst, operand.rn(), operand.offset(), cond);
}
@@ -1870,13 +1890,33 @@ void Assembler::vldr(const SwVfpRegister dst,
offset = -offset;
u = 0;
}
- ASSERT(offset % 4 == 0);
- ASSERT((offset / 4) < 256);
- ASSERT(offset >= 0);
int sd, d;
dst.split_code(&sd, &d);
+ ASSERT(offset >= 0);
+
+ if ((offset % 4) == 0 && (offset / 4) < 256) {
emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
+ } else {
+ // Larger offsets must be handled by computing the correct address
+ // in the ip register.
+ ASSERT(!base.is(ip));
+ if (u == 1) {
+ add(ip, base, Operand(offset));
+ } else {
+ sub(ip, base, Operand(offset));
+ }
+ emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
+ }
+}
+
+
+void Assembler::vldr(const SwVfpRegister dst,
+ const MemOperand& operand,
+ const Condition cond) {
+ ASSERT(!operand.rm().is_valid());
+ ASSERT(operand.am_ == Offset);
+ vldr(dst, operand.rn(), operand.offset(), cond);
}
@@ -1894,11 +1934,30 @@ void Assembler::vstr(const DwVfpRegister src,
offset = -offset;
u = 0;
}
- ASSERT(offset % 4 == 0);
- ASSERT((offset / 4) < 256);
ASSERT(offset >= 0);
- emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
- 0xB*B8 | ((offset / 4) & 255));
+ if ((offset % 4) == 0 && (offset / 4) < 256) {
+ emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
+ 0xB*B8 | ((offset / 4) & 255));
+ } else {
+ // Larger offsets must be handled by computing the correct address
+ // in the ip register.
+ ASSERT(!base.is(ip));
+ if (u == 1) {
+ add(ip, base, Operand(offset));
+ } else {
+ sub(ip, base, Operand(offset));
+ }
+ emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8);
+ }
+}
+
+
+void Assembler::vstr(const DwVfpRegister src,
+ const MemOperand& operand,
+ const Condition cond) {
+ ASSERT(!operand.rm().is_valid());
+ ASSERT(operand.am_ == Offset);
+ vstr(src, operand.rn(), operand.offset(), cond);
}
@@ -1916,13 +1975,32 @@ void Assembler::vstr(const SwVfpRegister src,
offset = -offset;
u = 0;
}
- ASSERT(offset % 4 == 0);
- ASSERT((offset / 4) < 256);
- ASSERT(offset >= 0);
int sd, d;
src.split_code(&sd, &d);
- emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
- 0xA*B8 | ((offset / 4) & 255));
+ ASSERT(offset >= 0);
+ if ((offset % 4) == 0 && (offset / 4) < 256) {
+ emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
+ 0xA*B8 | ((offset / 4) & 255));
+ } else {
+ // Larger offsets must be handled by computing the correct address
+ // in the ip register.
+ ASSERT(!base.is(ip));
+ if (u == 1) {
+ add(ip, base, Operand(offset));
+ } else {
+ sub(ip, base, Operand(offset));
+ }
+ emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
+ }
+}
+
+
+void Assembler::vstr(const SwVfpRegister src,
+ const MemOperand& operand,
+ const Condition cond) {
+ ASSERT(!operand.rm().is_valid());
+ ASSERT(operand.am_ == Offset);
+ vldr(src, operand.rn(), operand.offset(), cond);
}
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 3941c84b3..954b9cff3 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -387,7 +387,7 @@ class Operand BASE_EMBEDDED {
// Return true if this is a register operand.
INLINE(bool is_reg() const);
- // Return true of this operand fits in one instruction so that no
+ // Return true if this operand fits in one instruction so that no
// 2-instruction solution with a load into the ip register is necessary.
bool is_single_instruction() const;
bool must_use_constant_pool() const;
@@ -439,7 +439,7 @@ class MemOperand BASE_EMBEDDED {
offset_ = offset;
}
- uint32_t offset() {
+ uint32_t offset() const {
ASSERT(rm_.is(no_reg));
return offset_;
}
@@ -447,6 +447,10 @@ class MemOperand BASE_EMBEDDED {
Register rn() const { return rn_; }
Register rm() const { return rm_; }
+ bool OffsetIsUint12Encodable() const {
+ return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
+ }
+
private:
Register rn_; // base
Register rm_; // register offset
@@ -902,22 +906,34 @@ class Assembler : public Malloced {
void vldr(const DwVfpRegister dst,
const Register base,
- int offset, // Offset must be a multiple of 4.
+ int offset,
+ const Condition cond = al);
+ void vldr(const DwVfpRegister dst,
+ const MemOperand& src,
const Condition cond = al);
void vldr(const SwVfpRegister dst,
const Register base,
- int offset, // Offset must be a multiple of 4.
+ int offset,
+ const Condition cond = al);
+ void vldr(const SwVfpRegister dst,
+ const MemOperand& src,
const Condition cond = al);
void vstr(const DwVfpRegister src,
const Register base,
- int offset, // Offset must be a multiple of 4.
+ int offset,
+ const Condition cond = al);
+ void vstr(const DwVfpRegister src,
+ const MemOperand& dst,
const Condition cond = al);
void vstr(const SwVfpRegister src,
const Register base,
- int offset, // Offset must be a multiple of 4.
+ int offset,
+ const Condition cond = al);
+ void vstr(const SwVfpRegister src,
+ const MemOperand& dst,
const Condition cond = al);
void vmov(const DwVfpRegister dst,
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index cc49f7e4e..87fa87df0 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -2661,8 +2661,8 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Allocate new heap number for result.
Register result = r5;
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
// Load the operands.
if (smi_operands) {
@@ -2811,8 +2811,14 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Allocate new heap number for result.
__ bind(&result_not_a_smi);
- __ AllocateHeapNumber(
- r5, scratch1, scratch2, heap_number_map, gc_required);
+ Register result = r5;
+ if (smi_operands) {
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ } else {
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ }
// r2: Answer as signed int32.
// r5: Heap number to write answer into.
@@ -2934,45 +2940,47 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime;
+ Label call_runtime, call_string_add_or_runtime;
GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
- // If all else fails, use the runtime system to get the correct
- // result.
- __ bind(&call_runtime);
+ GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
- // Try to add strings before calling runtime.
+ __ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
GenerateAddStrings(masm);
}
- GenericBinaryOpStub stub(op_, mode_, r1, r0);
- __ TailCallStub(&stub);
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
}
void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD);
+ Label left_not_string, call_runtime;
Register left = r1;
Register right = r0;
- Label call_runtime;
- // Check if first argument is a string.
- __ JumpIfSmi(left, &call_runtime);
+ // Check if left argument is a string.
+ __ JumpIfSmi(left, &left_not_string);
__ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
+ __ b(ge, &left_not_string);
- // First argument is a a string, test second.
+ StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_left_stub);
+
+ // Left operand is not a string, test right.
+ __ bind(&left_not_string);
__ JumpIfSmi(right, &call_runtime);
__ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &call_runtime);
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
+ __ TailCallStub(&string_add_right_stub);
// At least one argument is not a string.
__ bind(&call_runtime);
@@ -3706,7 +3714,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// The offset was stored in r4 safepoint slot.
// (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
- __ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4));
+ __ LoadFromSafepointRegisterSlot(scratch, r4);
__ sub(inline_site, lr, scratch);
// Get the map location in scratch and patch it.
__ GetRelocatedValueLocation(inline_site, scratch);
@@ -5438,18 +5446,19 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime;
+ Label string_add_runtime, call_builtin;
+ Builtins::JavaScript builtin_id = Builtins::ADD;
+
// Stack on entry:
- // sp[0]: second argument.
- // sp[4]: first argument.
+ // sp[0]: second argument (right).
+ // sp[4]: first argument (left).
// Load the two arguments.
__ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
__ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
- if (string_check_) {
- STATIC_ASSERT(kSmiTag == 0);
+ if (flags_ == NO_STRING_ADD_FLAGS) {
__ JumpIfEitherSmi(r0, r1, &string_add_runtime);
// Load instance types.
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -5461,13 +5470,27 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ tst(r4, Operand(kIsNotStringMask));
__ tst(r5, Operand(kIsNotStringMask), eq);
__ b(ne, &string_add_runtime);
+ } else {
+ // Here at least one of the arguments is definitely a string.
+ // We convert the one that is not known to be a string.
+ if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
+ GenerateConvertArgument(
+ masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
+ GenerateConvertArgument(
+ masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
+ }
}
// Both arguments are strings.
// r0: first string
// r1: second string
- // r4: first string instance type (if string_check_)
- // r5: second string instance type (if string_check_)
+ // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
{
Label strings_not_empty;
// Check if either of the strings are empty. In that case return the other.
@@ -5495,8 +5518,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r1: second string
// r2: length of first string
// r3: length of second string
- // r4: first string instance type (if string_check_)
- // r5: second string instance type (if string_check_)
+ // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// Look at the length of the result of adding the two strings.
Label string_add_flat_result, longer_than_two;
// Adding two lengths can't overflow.
@@ -5508,7 +5531,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ b(ne, &longer_than_two);
// Check that both strings are non-external ascii strings.
- if (!string_check_) {
+ if (flags_ != NO_STRING_ADD_FLAGS) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@@ -5556,7 +5579,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// If result is not supposed to be flat, allocate a cons string object.
// If both strings are ascii the result is an ascii cons string.
- if (!string_check_) {
+ if (flags_ != NO_STRING_ADD_FLAGS) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@@ -5604,11 +5627,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r1: second string
// r2: length of first string
// r3: length of second string
- // r4: first string instance type (if string_check_)
- // r5: second string instance type (if string_check_)
+ // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+ // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// r6: sum of lengths.
__ bind(&string_add_flat_result);
- if (!string_check_) {
+ if (flags_ != NO_STRING_ADD_FLAGS) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@@ -5706,6 +5729,60 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&string_add_runtime);
__ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+
+ if (call_builtin.is_linked()) {
+ __ bind(&call_builtin);
+ __ InvokeBuiltin(builtin_id, JUMP_JS);
+ }
+}
+
+
+void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* slow) {
+ // First check if the argument is already a string.
+ Label not_string, done;
+ __ JumpIfSmi(arg, &not_string);
+ __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
+ __ b(lt, &done);
+
+ // Check the number to string cache.
+ Label not_cached;
+ __ bind(&not_string);
+ // Puts the cached result into scratch1.
+ NumberToStringStub::GenerateLookupNumberStringCache(masm,
+ arg,
+ scratch1,
+ scratch2,
+ scratch3,
+ scratch4,
+ false,
+ &not_cached);
+ __ mov(arg, scratch1);
+ __ str(arg, MemOperand(sp, stack_offset));
+ __ jmp(&done);
+
+ // Check if the argument is a safe string wrapper.
+ __ bind(&not_cached);
+ __ JumpIfSmi(arg, slow);
+ __ CompareObjectType(
+ arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
+ __ b(ne, slow);
+ __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
+ __ and_(scratch2,
+ scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ cmp(scratch2,
+ Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ b(ne, slow);
+ __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
+ __ str(arg, MemOperand(sp, stack_offset));
+
+ __ bind(&done);
}
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index baaa2f2bd..475fbd70e 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -335,24 +335,36 @@ class TypeRecordingBinaryOpStub: public CodeStub {
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
- NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+ // Omit left string check in stub (left is definitely a string).
+ NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+ // Omit right string check in stub (right is definitely a string).
+ NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+ // Omit both string checks in stub.
+ NO_STRING_CHECK_IN_STUB =
+ NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
};
class StringAddStub: public CodeStub {
public:
- explicit StringAddStub(StringAddFlags flags) {
- string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
- }
+ explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
private:
Major MajorKey() { return StringAdd; }
- int MinorKey() { return string_check_ ? 0 : 1; }
+ int MinorKey() { return flags_; }
void Generate(MacroAssembler* masm);
- // Should the stub check whether arguments are strings?
- bool string_check_;
+ void GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* slow);
+
+ const StringAddFlags flags_;
};
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index a3921d8ef..3e125a33f 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -5850,8 +5850,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
} else if (variable != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
- // so this code can only be reached in non-strict mode.
- ASSERT(strict_mode_flag() == kNonStrictMode);
+ // but "delete this" is.
+ ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index 507954d9e..51c84b335 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -50,6 +50,11 @@ void CPU::Setup() {
void CPU::FlushICache(void* start, size_t size) {
+ // Nothing to do flushing no instructions.
+ if (size == 0) {
+ return;
+ }
+
#if defined (USE_SIMULATOR)
// Not generating ARM instructions for C-code. This means that we are
// building an ARM emulator based target. We should notify the simulator
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 9af7a8d19..9a5aa902b 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -429,14 +429,16 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
fp_value, output_offset, value);
}
- // The context can be gotten from the function so long as we don't
- // optimize functions that need local contexts.
+ // For the bottommost output frame the context can be gotten from the input
+ // frame. For all subsequent output frames it can be gotten from the function
+ // so long as we don't inline functions that need local contexts.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function->context());
- // The context for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = reinterpret_cast<intptr_t>(function->context());
+ }
output_frame->SetFrameSlot(output_offset, value);
if (is_topmost) {
output_frame->SetRegister(cp.code(), value);
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index f04a00e05..fea9a8cfb 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -219,46 +219,47 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
Move(dot_arguments_slot, r3, r1, r2);
}
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- EmitDeclaration(scope()->function(), Variable::CONST, NULL);
- }
- // Visit all the explicit declarations unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- scope()->VisitIllegalRedeclaration(this);
- } else {
- VisitDeclarations(scope()->declarations());
- }
- }
-
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
- // Check the stack for overflow or break request.
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
+ } else {
+ { Comment cmnt(masm_, "[ Declarations");
+ // For named function expressions, declare the function name as a
+ // constant.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailout(info->function(), NO_REGISTERS);
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ bind(&ok);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
}
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
{ Comment cmnt(masm_, "[ return <undefined>;");
- // Emit a 'return undefined' in case control fell off the end of the
- // body.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
}
EmitReturnSequence();
@@ -694,10 +695,11 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// We bypass the general EmitSlotSearch because we know more about
// this specific context.
- // The variable in the decl always resides in the current context.
+ // The variable in the decl always resides in the current function
+ // context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check if we have the correct context pointer.
+ // Check that we're not inside a 'with'.
__ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
__ cmp(r1, cp);
__ Check(eq, "Unexpected declaration in current context.");
@@ -1037,7 +1039,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
Slot* slot,
Label* slow) {
ASSERT(slot->type() == Slot::CONTEXT);
- Register current = cp;
+ Register context = cp;
Register next = r3;
Register temp = r4;
@@ -1045,22 +1047,25 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
if (s->num_heap_slots() > 0) {
if (s->calls_eval()) {
// Check that extension is NULL.
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+ __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
__ b(ne, slow);
}
- __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX));
+ __ ldr(next, ContextOperand(context, Context::CLOSURE_INDEX));
__ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
// Walk the rest of the chain without clobbering cp.
- current = next;
+ context = next;
}
}
// Check that last extension is NULL.
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+ __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
__ b(ne, slow);
- __ ldr(temp, ContextOperand(current, Context::FCONTEXT_INDEX));
- return ContextOperand(temp, slot->index());
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an cp-based operand (the write barrier cannot be allowed to
+ // destroy the cp register).
+ return ContextOperand(context, slot->index());
}
@@ -2004,34 +2009,60 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
- // Perform the assignment for non-const variables and for initialization
- // of const variables. Const assignments are simply skipped.
- Label done;
+ } else if (op == Token::INIT_CONST) {
+ // Like var declarations, const declarations are hoisted to function
+ // scope. However, unlike var initializers, const initializers are able
+ // to drill a hole to that function context, even from inside a 'with'
+ // context. We thus bypass the normal static scope lookup.
+ Slot* slot = var->AsSlot();
+ Label skip;
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ // No const parameters.
+ UNREACHABLE();
+ break;
+ case Slot::LOCAL:
+ // Detect const reinitialization by checking for the hole value.
+ __ ldr(r1, MemOperand(fp, SlotOffset(slot)));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r1, ip);
+ __ b(ne, &skip);
+ __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
+ break;
+ case Slot::CONTEXT: {
+ __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
+ __ ldr(r2, ContextOperand(r1, slot->index()));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r2, ip);
+ __ b(ne, &skip);
+ __ str(r0, ContextOperand(r1, slot->index()));
+ int offset = Context::SlotOffset(slot->index());
+ __ mov(r3, r0); // Preserve the stored value in r0.
+ __ RecordWrite(r1, Operand(offset), r3, r2);
+ break;
+ }
+ case Slot::LOOKUP:
+ __ push(r0);
+ __ mov(r0, Operand(slot->var()->name()));
+ __ Push(cp, r0); // Context and name.
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ break;
+ }
+ __ bind(&skip);
+
+ } else if (var->mode() != Variable::CONST) {
+ // Perform the assignment for non-const variables. Const assignments
+ // are simply skipped.
Slot* slot = var->AsSlot();
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
- if (op == Token::INIT_CONST) {
- // Detect const reinitialization by checking for the hole value.
- __ ldr(r1, MemOperand(fp, SlotOffset(slot)));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &done);
- }
// Perform the assignment.
__ str(result_register(), MemOperand(fp, SlotOffset(slot)));
break;
case Slot::CONTEXT: {
MemOperand target = EmitSlotSearch(slot, r1);
- if (op == Token::INIT_CONST) {
- // Detect const reinitialization by checking for the hole value.
- __ ldr(r2, target);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r2, ip);
- __ b(ne, &done);
- }
// Perform the assignment and issue the write barrier.
__ str(result_register(), target);
// RecordWrite may destroy all its register arguments.
@@ -2042,20 +2073,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
case Slot::LOOKUP:
- // Call the runtime for the assignment. The runtime will ignore
- // const reinitialization.
+ // Call the runtime for the assignment.
__ push(r0); // Value.
__ mov(r0, Operand(slot->var()->name()));
__ Push(cp, r0); // Context and name.
- if (op == Token::INIT_CONST) {
- // The runtime will ignore const redeclaration.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- }
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
break;
}
- __ bind(&done);
}
}
@@ -3373,8 +3397,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
} else if (var != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
- // so this code can only be reached in non-strict mode.
- ASSERT(strict_mode_flag() == kNonStrictMode);
+ // but "delete this" is.
+ ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
if (var->is_global()) {
__ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
@@ -3414,17 +3438,23 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
-
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+
+ // Notice that the labels are swapped.
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
+ if (context()->IsTest()) ForwardBailoutToChild(expr);
+ VisitForControl(expr->expression(), if_true, if_false, fall_through);
+ context()->Plug(if_false, if_true); // Labels swapped.
+ }
break;
}
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 903f77bbf..d3c9fee8e 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -346,7 +346,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-void LStoreNamed::PrintDataTo(StringStream* stream) {
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
@@ -355,7 +355,25 @@ void LStoreNamed::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
@@ -1204,8 +1222,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathSqrt:
return DefineSameAsFirst(result);
case kMathRound:
- Abort("MathRound LUnaryMathOperation not implemented");
- return NULL;
+ return AssignEnvironment(DefineAsRegister(result));
case kMathPowHalf:
Abort("MathPowHalf LUnaryMathOperation not implemented");
return NULL;
@@ -1418,8 +1435,19 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- Abort("LPower instruction not implemented on ARM");
- return NULL;
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double power. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ Representation exponent_type = instr->right()->representation();
+ ASSERT(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), d1);
+ LOperand* right = exponent_type.IsDouble() ?
+ UseFixedDouble(instr->right(), d2) :
+ UseFixed(instr->right(), r0);
+ LPower* result = new LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, d3),
+ instr,
+ CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1709,11 +1737,13 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context = UseTempRegister(instr->context());
+ LOperand* context;
LOperand* value;
if (instr->NeedsWriteBarrier()) {
+ context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value());
} else {
+ context = UseRegister(instr->context());
value = UseRegister(instr->value());
}
return new LStoreContextSlot(context, value);
@@ -1806,6 +1836,13 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
}
+LInstruction* LChunkBuilder::DoStorePixelArrayElement(
+ HStorePixelArrayElement* instr) {
+ Abort("DoStorePixelArrayElement not implemented");
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* obj = UseFixed(instr->object(), r2);
LOperand* key = UseFixed(instr->key(), r1);
@@ -1911,8 +1948,10 @@ LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object (we bail out in all other
- // cases).
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
return NULL;
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 57338f16d..77d6b71a9 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -42,8 +42,6 @@ class LCodeGen;
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Call) \
- V(StoreKeyed) \
- V(StoreNamed) \
LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
@@ -135,6 +133,7 @@ class LCodeGen;
V(OuterContext) \
V(Parameter) \
V(PixelArrayLength) \
+ V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@@ -1058,6 +1057,18 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
+class LPower: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LPower(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+ DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1510,32 +1521,22 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamed: public LTemplateInstruction<0, 2, 0> {
+class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
public:
- LStoreNamed(LOperand* obj, LOperand* val) {
+ LStoreNamedField(LOperand* obj, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = val;
}
- DECLARE_INSTRUCTION(StoreNamed)
- DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LStoreNamedField: public LStoreNamed {
- public:
- LStoreNamedField(LOperand* obj, LOperand* val)
- : LStoreNamed(obj, val) { }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+ Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
@@ -1543,25 +1544,35 @@ class LStoreNamedField: public LStoreNamed {
};
-class LStoreNamedGeneric: public LStoreNamed {
+class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
public:
- LStoreNamedGeneric(LOperand* obj, LOperand* val)
- : LStoreNamed(obj, val) { }
+ LStoreNamedGeneric(LOperand* obj, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = val;
+ }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
}
- DECLARE_INSTRUCTION(StoreKeyed)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+ "store-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
virtual void PrintDataTo(StringStream* stream);
@@ -1571,23 +1582,21 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedFastElement: public LStoreKeyed {
+class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val)
- : LStoreKeyed(obj, key, val) {}
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-};
+ LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
-class LStoreKeyedGeneric: public LStoreKeyed {
- public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val)
- : LStoreKeyed(obj, key, val) { }
+ virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
};
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 1bfb3ad94..ca6444227 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "arm/lithium-codegen-arm.h"
+#include "arm/lithium-gap-resolver-arm.h"
#include "code-stubs.h"
#include "stub-cache.h"
@@ -54,157 +55,6 @@ class SafepointGenerator : public PostCallGenerator {
};
-class LGapNode: public ZoneObject {
- public:
- explicit LGapNode(LOperand* operand)
- : operand_(operand), resolved_(false), visited_id_(-1) { }
-
- LOperand* operand() const { return operand_; }
- bool IsResolved() const { return !IsAssigned() || resolved_; }
- void MarkResolved() {
- ASSERT(!IsResolved());
- resolved_ = true;
- }
- int visited_id() const { return visited_id_; }
- void set_visited_id(int id) {
- ASSERT(id > visited_id_);
- visited_id_ = id;
- }
-
- bool IsAssigned() const { return assigned_from_.is_set(); }
- LGapNode* assigned_from() const { return assigned_from_.get(); }
- void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
-
- private:
- LOperand* operand_;
- SetOncePointer<LGapNode> assigned_from_;
- bool resolved_;
- int visited_id_;
-};
-
-
-LGapResolver::LGapResolver()
- : nodes_(32),
- identified_cycles_(4),
- result_(16),
- next_visited_id_(0) {
-}
-
-
-const ZoneList<LMoveOperands>* LGapResolver::Resolve(
- const ZoneList<LMoveOperands>* moves,
- LOperand* marker_operand) {
- nodes_.Rewind(0);
- identified_cycles_.Rewind(0);
- result_.Rewind(0);
- next_visited_id_ = 0;
-
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) RegisterMove(move);
- }
-
- for (int i = 0; i < identified_cycles_.length(); ++i) {
- ResolveCycle(identified_cycles_[i], marker_operand);
- }
-
- int unresolved_nodes;
- do {
- unresolved_nodes = 0;
- for (int j = 0; j < nodes_.length(); j++) {
- LGapNode* node = nodes_[j];
- if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
- AddResultMove(node->assigned_from(), node);
- node->MarkResolved();
- }
- if (!node->IsResolved()) ++unresolved_nodes;
- }
- } while (unresolved_nodes > 0);
- return &result_;
-}
-
-
-void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
- AddResultMove(from->operand(), to->operand());
-}
-
-
-void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
- result_.Add(LMoveOperands(from, to));
-}
-
-
-void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
- ZoneList<LOperand*> cycle_operands(8);
- cycle_operands.Add(marker_operand);
- LGapNode* cur = start;
- do {
- cur->MarkResolved();
- cycle_operands.Add(cur->operand());
- cur = cur->assigned_from();
- } while (cur != start);
- cycle_operands.Add(marker_operand);
-
- for (int i = cycle_operands.length() - 1; i > 0; --i) {
- LOperand* from = cycle_operands[i];
- LOperand* to = cycle_operands[i - 1];
- AddResultMove(from, to);
- }
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
- ASSERT(a != b);
- LGapNode* cur = a;
- while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
- cur->set_visited_id(visited_id);
- cur = cur->assigned_from();
- }
-
- return cur == b;
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
- ASSERT(a != b);
- return CanReach(a, b, next_visited_id_++);
-}
-
-
-void LGapResolver::RegisterMove(LMoveOperands move) {
- if (move.source()->IsConstantOperand()) {
- // Constant moves should be last in the machine code. Therefore add them
- // first to the result set.
- AddResultMove(move.source(), move.destination());
- } else {
- LGapNode* from = LookupNode(move.source());
- LGapNode* to = LookupNode(move.destination());
- if (to->IsAssigned() && to->assigned_from() == from) {
- move.Eliminate();
- return;
- }
- ASSERT(!to->IsAssigned());
- if (CanReach(from, to)) {
- // This introduces a cycle. Save.
- identified_cycles_.Add(from);
- }
- to->set_assigned_from(from);
- }
-}
-
-
-LGapNode* LGapResolver::LookupNode(LOperand* operand) {
- for (int i = 0; i < nodes_.length(); ++i) {
- if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
- }
-
- // No node found => create a new one.
- LGapNode* result = new LGapNode(operand);
- nodes_.Add(result);
- return result;
-}
-
-
#define __ masm()->
bool LCodeGen::GenerateCode() {
@@ -294,6 +144,44 @@ bool LCodeGen::GeneratePrologue() {
}
}
+ // Possibly allocate a local context.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ // Argument to NewContext is the function, which is in r1.
+ __ push(r1);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+ // Context is returned in both r0 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = scope()->parameter(i)->AsSlot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ ldr(r0, MemOperand(fp, parameter_offset));
+ // Store it in the context.
+ __ mov(r1, Operand(Context::SlotOffset(slot->index())));
+ __ str(r0, MemOperand(cp, r1));
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have to use two more registers to avoid
+ // clobbering cp.
+ __ mov(r2, Operand(cp));
+ __ RecordWrite(r2, Operand(r1), r3, r0);
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
// Trace the call.
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
@@ -464,7 +352,6 @@ Operand LCodeGen::ToOperand(LOperand* op) {
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
- // TODO(regis): Revisit.
ASSERT(!op->IsRegister());
ASSERT(!op->IsDoubleRegister());
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
@@ -480,6 +367,21 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
}
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+ ASSERT(op->IsDoubleStackSlot());
+ int index = op->index();
+ if (index >= 0) {
+ // Local or spill slot. Skip the frame pointer, function, context,
+ // and the first word of the double in the fixed part of the frame.
+ return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
+ } else {
+ // Incoming parameter. Skip the return address and the first word of
+ // the double.
+ return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
+ }
+}
+
+
void LCodeGen::WriteTranslation(LEnvironment* environment,
Translation* translation) {
if (environment == NULL) return;
@@ -751,6 +653,12 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
}
+void LCodeGen::RecordSafepoint(int deoptimization_index) {
+ LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ RecordSafepoint(&empty_pointers, deoptimization_index);
+}
+
+
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
@@ -787,116 +695,7 @@ void LCodeGen::DoLabel(LLabel* label) {
void LCodeGen::DoParallelMove(LParallelMove* move) {
- // d0 must always be a scratch register.
- DoubleRegister dbl_scratch = d0;
- LUnallocated marker_operand(LUnallocated::NONE);
-
- Register core_scratch = scratch0();
- bool destroys_core_scratch = false;
-
- const ZoneList<LMoveOperands>* moves =
- resolver_.Resolve(move->move_operands(), &marker_operand);
- for (int i = moves->length() - 1; i >= 0; --i) {
- LMoveOperands move = moves->at(i);
- LOperand* from = move.source();
- LOperand* to = move.destination();
- ASSERT(!from->IsDoubleRegister() ||
- !ToDoubleRegister(from).is(dbl_scratch));
- ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch));
- ASSERT(!from->IsRegister() || !ToRegister(from).is(core_scratch));
- ASSERT(!to->IsRegister() || !ToRegister(to).is(core_scratch));
- if (from == &marker_operand) {
- if (to->IsRegister()) {
- __ mov(ToRegister(to), core_scratch);
- ASSERT(destroys_core_scratch);
- } else if (to->IsStackSlot()) {
- __ str(core_scratch, ToMemOperand(to));
- ASSERT(destroys_core_scratch);
- } else if (to->IsDoubleRegister()) {
- __ vmov(ToDoubleRegister(to), dbl_scratch);
- } else {
- ASSERT(to->IsDoubleStackSlot());
- // TODO(regis): Why is vstr not taking a MemOperand?
- // __ vstr(dbl_scratch, ToMemOperand(to));
- MemOperand to_operand = ToMemOperand(to);
- __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
- }
- } else if (to == &marker_operand) {
- if (from->IsRegister() || from->IsConstantOperand()) {
- __ mov(core_scratch, ToOperand(from));
- destroys_core_scratch = true;
- } else if (from->IsStackSlot()) {
- __ ldr(core_scratch, ToMemOperand(from));
- destroys_core_scratch = true;
- } else if (from->IsDoubleRegister()) {
- __ vmov(dbl_scratch, ToDoubleRegister(from));
- } else {
- ASSERT(from->IsDoubleStackSlot());
- // TODO(regis): Why is vldr not taking a MemOperand?
- // __ vldr(dbl_scratch, ToMemOperand(from));
- MemOperand from_operand = ToMemOperand(from);
- __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
- }
- } else if (from->IsConstantOperand()) {
- if (to->IsRegister()) {
- __ mov(ToRegister(to), ToOperand(from));
- } else {
- ASSERT(to->IsStackSlot());
- __ mov(ip, ToOperand(from));
- __ str(ip, ToMemOperand(to));
- }
- } else if (from->IsRegister()) {
- if (to->IsRegister()) {
- __ mov(ToRegister(to), ToOperand(from));
- } else {
- ASSERT(to->IsStackSlot());
- __ str(ToRegister(from), ToMemOperand(to));
- }
- } else if (to->IsRegister()) {
- ASSERT(from->IsStackSlot());
- __ ldr(ToRegister(to), ToMemOperand(from));
- } else if (from->IsStackSlot()) {
- ASSERT(to->IsStackSlot());
- __ ldr(ip, ToMemOperand(from));
- __ str(ip, ToMemOperand(to));
- } else if (from->IsDoubleRegister()) {
- if (to->IsDoubleRegister()) {
- __ vmov(ToDoubleRegister(to), ToDoubleRegister(from));
- } else {
- ASSERT(to->IsDoubleStackSlot());
- // TODO(regis): Why is vstr not taking a MemOperand?
- // __ vstr(dbl_scratch, ToMemOperand(to));
- MemOperand to_operand = ToMemOperand(to);
- __ vstr(ToDoubleRegister(from), to_operand.rn(), to_operand.offset());
- }
- } else if (to->IsDoubleRegister()) {
- ASSERT(from->IsDoubleStackSlot());
- // TODO(regis): Why is vldr not taking a MemOperand?
- // __ vldr(ToDoubleRegister(to), ToMemOperand(from));
- MemOperand from_operand = ToMemOperand(from);
- __ vldr(ToDoubleRegister(to), from_operand.rn(), from_operand.offset());
- } else {
- ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
- // TODO(regis): Why is vldr not taking a MemOperand?
- // __ vldr(dbl_scratch, ToMemOperand(from));
- MemOperand from_operand = ToMemOperand(from);
- __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
- // TODO(regis): Why is vstr not taking a MemOperand?
- // __ vstr(dbl_scratch, ToMemOperand(to));
- MemOperand to_operand = ToMemOperand(to);
- __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
- }
- }
-
- if (destroys_core_scratch) {
- __ ldr(core_scratch, MemOperand(fp, -kPointerSize));
- }
-
- LInstruction* next = GetNextInstruction();
- if (next != NULL && next->IsLazyBailout()) {
- int pc = masm()->pc_offset();
- safepoints_.SetPcAfterGap(pc);
- }
+ resolver_.Resolve(move);
}
@@ -987,7 +786,7 @@ void LCodeGen::DoModI(LModI* instr) {
DeferredModI(LCodeGen* codegen, LModI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
- codegen()->DoDeferredGenericBinaryStub(instr_, Token::MOD);
+ codegen()->DoDeferredBinaryOpStub(instr_, Token::MOD);
}
private:
LModI* instr_;
@@ -1016,7 +815,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&ok);
}
- // Try a few common cases before using the generic stub.
+ // Try a few common cases before using the stub.
Label call_stub;
const int kUnfolds = 3;
// Skip if either side is negative.
@@ -1044,7 +843,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ and_(result, scratch, Operand(left));
__ bind(&call_stub);
- // Call the generic stub. The numbers in r0 and r1 have
+ // Call the stub. The numbers in r0 and r1 have
// to be tagged to Smis. If that is not possible, deoptimize.
DeferredModI* deferred = new DeferredModI(this, instr);
__ TrySmiTag(left, &deoptimize, scratch);
@@ -1070,7 +869,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
DeferredDivI(LCodeGen* codegen, LDivI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
- codegen()->DoDeferredGenericBinaryStub(instr_, Token::DIV);
+ codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
}
private:
LDivI* instr_;
@@ -1123,7 +922,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
__ b(eq, &done);
- // Call the generic stub. The numbers in r0 and r1 have
+ // Call the stub. The numbers in r0 and r1 have
// to be tagged to Smis. If that is not possible, deoptimize.
DeferredDivI* deferred = new DeferredDivI(this, instr);
@@ -1145,19 +944,33 @@ void LCodeGen::DoDivI(LDivI* instr) {
template<int T>
-void LCodeGen::DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr,
- Token::Value op) {
+void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
+ Token::Value op) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
__ PushSafepointRegistersAndDoubles();
- GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right);
+ // Move left to r1 and right to r0 for the stub call.
+ if (left.is(r1)) {
+ __ Move(r0, right);
+ } else if (left.is(r0) && right.is(r1)) {
+ __ Swap(r0, r1, r2);
+ } else if (left.is(r0)) {
+ ASSERT(!right.is(r1));
+ __ mov(r1, r0);
+ __ mov(r0, right);
+ } else {
+ ASSERT(!left.is(r0) && !right.is(r0));
+ __ mov(r0, right);
+ __ mov(r1, left);
+ }
+ TypeRecordingBinaryOpStub stub(op, OVERWRITE_LEFT);
__ CallStub(&stub);
RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
0,
Safepoint::kNoDeoptimizationIndex);
// Overwrite the stored value of r0 with the result of the stub.
- __ StoreToSafepointRegistersAndDoublesSlot(r0);
+ __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
__ PopSafepointRegistersAndDoubles();
}
@@ -1413,7 +1226,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ vmov(r2, r3, right);
__ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
// Move the result in the double result register.
- __ vmov(ToDoubleRegister(instr->result()), r0, r1);
+ __ GetCFunctionDoubleResult(ToDoubleRegister(instr->result()));
// Restore r0-r3.
__ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
@@ -1431,10 +1244,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->InputAt(1)).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
- // TODO(regis): Implement TypeRecordingBinaryOpStub and replace current
- // GenericBinaryOpStub:
- // TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
- GenericBinaryOpStub stub(instr->op(), NO_OVERWRITE, r1, r0);
+ TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -2174,7 +1984,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ bind(&before_push_delta);
__ BlockConstPoolFor(kAdditionalDelta);
__ mov(temp, Operand(delta * kPointerSize));
- __ StoreToSafepointRegisterSlot(temp);
+ __ StoreToSafepointRegisterSlot(temp, temp);
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
ASSERT_EQ(kAdditionalDelta,
masm_->InstructionsGeneratedSince(&before_push_delta));
@@ -2182,7 +1992,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
// Put the result value into the result register slot and
// restore all registers.
- __ StoreToSafepointRegisterSlot(result);
+ __ StoreToSafepointRegisterSlot(result, result);
__ PopSafepointRegisters();
}
@@ -2302,17 +2112,13 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ ldr(result,
- MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- __ ldr(result, ContextOperand(result, instr->slot_index()));
+ __ ldr(result, ContextOperand(context, instr->slot_index()));
}
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
- __ ldr(context,
- MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
__ str(value, ContextOperand(context, instr->slot_index()));
if (instr->needs_write_barrier()) {
int offset = Context::SlotOffset(instr->slot_index());
@@ -2715,7 +2521,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
// Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input);
+ __ LoadFromSafepointRegisterSlot(input, input);
__ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
__ bind(&allocated);
@@ -2726,7 +2532,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
__ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
__ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
- __ str(tmp1, masm()->SafepointRegisterSlot(input));
+ __ StoreToSafepointRegisterSlot(tmp1, input);
__ PopSafepointRegisters();
__ bind(&done);
@@ -2843,6 +2649,30 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
}
+void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Register scratch1 = scratch0();
+ Register scratch2 = result;
+ EmitVFPTruncate(kRoundToNearest,
+ double_scratch0().low(),
+ input,
+ scratch1,
+ scratch2);
+ DeoptimizeIf(ne, instr->environment());
+ __ vmov(result, double_scratch0().low());
+
+ // Test for -0.
+ Label done;
+ __ cmp(result, Operand(0));
+ __ b(ne, &done);
+ __ vmov(scratch1, input.high());
+ __ tst(scratch1, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(ne, instr->environment());
+ __ bind(&done);
+}
+
+
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input));
@@ -2850,6 +2680,64 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
}
+void LCodeGen::DoPower(LPower* instr) {
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ Register scratch = scratch0();
+ DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+ if (exponent_type.IsDouble()) {
+ // Prepare arguments and call C function.
+ __ PrepareCallCFunction(4, scratch);
+ __ vmov(r0, r1, ToDoubleRegister(left));
+ __ vmov(r2, r3, ToDoubleRegister(right));
+ __ CallCFunction(ExternalReference::power_double_double_function(), 4);
+ } else if (exponent_type.IsInteger32()) {
+ ASSERT(ToRegister(right).is(r0));
+ // Prepare arguments and call C function.
+ __ PrepareCallCFunction(4, scratch);
+ __ mov(r2, ToRegister(right));
+ __ vmov(r0, r1, ToDoubleRegister(left));
+ __ CallCFunction(ExternalReference::power_double_int_function(), 4);
+ } else {
+ ASSERT(exponent_type.IsTagged());
+ ASSERT(instr->hydrogen()->left()->representation().IsDouble());
+
+ Register right_reg = ToRegister(right);
+
+ // Check for smi on the right hand side.
+ Label non_smi, call;
+ __ JumpIfNotSmi(right_reg, &non_smi);
+
+ // Untag smi and convert it to a double.
+ __ SmiUntag(right_reg);
+ SwVfpRegister single_scratch = double_scratch0().low();
+ __ vmov(single_scratch, right_reg);
+ __ vcvt_f64_s32(result_reg, single_scratch);
+ __ jmp(&call);
+
+ // Heap number map check.
+ __ bind(&non_smi);
+ __ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch, Operand(ip));
+ DeoptimizeIf(ne, instr->environment());
+ int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag;
+ __ add(scratch, right_reg, Operand(value_offset));
+ __ vldr(result_reg, scratch, 0);
+
+ // Prepare arguments and call C function.
+ __ bind(&call);
+ __ PrepareCallCFunction(4, scratch);
+ __ vmov(r0, r1, ToDoubleRegister(left));
+ __ vmov(r2, r3, result_reg);
+ __ CallCFunction(ExternalReference::power_double_double_function(), 4);
+ }
+ // Store the result in the result register.
+ __ GetCFunctionDoubleResult(result_reg);
+}
+
+
void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
switch (instr->op()) {
case kMathAbs:
@@ -2858,6 +2746,9 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
case kMathFloor:
DoMathFloor(instr);
break;
+ case kMathRound:
+ DoMathRound(instr);
+ break;
case kMathSqrt:
DoMathSqrt(instr);
break;
@@ -3157,8 +3048,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ AbortIfNotSmi(r0);
}
__ SmiUntag(r0);
- MemOperand result_stack_slot = masm()->SafepointRegisterSlot(result);
- __ str(r0, result_stack_slot);
+ __ StoreToSafepointRegisterSlot(r0, result);
__ PopSafepointRegisters();
}
@@ -3239,9 +3129,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// register is stored, as this register is in the pointer map, but contains an
// integer value.
__ mov(ip, Operand(0));
- int reg_stack_index = __ SafepointRegisterStackIndex(reg.code());
- __ str(ip, MemOperand(sp, reg_stack_index * kPointerSize));
-
+ __ StoreToSafepointRegisterSlot(ip, reg);
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
@@ -3252,7 +3140,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
__ bind(&done);
__ sub(ip, reg, Operand(kHeapObjectTag));
__ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
- __ str(reg, MemOperand(sp, reg_stack_index * kPointerSize));
+ __ StoreToSafepointRegisterSlot(reg, reg);
__ PopSafepointRegisters();
}
@@ -3297,8 +3185,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- int reg_stack_index = __ SafepointRegisterStackIndex(reg.code());
- __ str(r0, MemOperand(sp, reg_stack_index * kPointerSize));
+ __ StoreToSafepointRegisterSlot(r0, reg);
__ PopSafepointRegisters();
}
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 732db4451..2d9c6edcb 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -29,7 +29,7 @@
#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
#include "arm/lithium-arm.h"
-
+#include "arm/lithium-gap-resolver-arm.h"
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
@@ -39,31 +39,8 @@ namespace internal {
// Forward declarations.
class LDeferredCode;
-class LGapNode;
class SafepointGenerator;
-class LGapResolver BASE_EMBEDDED {
- public:
- LGapResolver();
- const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
- LOperand* marker_operand);
-
- private:
- LGapNode* LookupNode(LOperand* operand);
- bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
- bool CanReach(LGapNode* a, LGapNode* b);
- void RegisterMove(LMoveOperands move);
- void AddResultMove(LOperand* from, LOperand* to);
- void AddResultMove(LGapNode* from, LGapNode* to);
- void ResolveCycle(LGapNode* start, LOperand* marker_operand);
-
- ZoneList<LGapNode*> nodes_;
- ZoneList<LGapNode*> identified_cycles_;
- ZoneList<LMoveOperands> result_;
- int next_visited_id_;
-};
-
-
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
@@ -79,10 +56,35 @@ class LCodeGen BASE_EMBEDDED {
scope_(chunk->graph()->info()->scope()),
status_(UNUSED),
deferred_(8),
- osr_pc_offset_(-1) {
+ osr_pc_offset_(-1),
+ resolver_(this) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
+
+ // Simple accessors.
+ MacroAssembler* masm() const { return masm_; }
+
+ // Support for converting LOperands to assembler types.
+ // LOperand must be a register.
+ Register ToRegister(LOperand* op) const;
+
+ // LOperand is loaded into scratch, unless already a register.
+ Register EmitLoadRegister(LOperand* op, Register scratch);
+
+ // LOperand must be a double register.
+ DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+ // LOperand is loaded into dbl_scratch, unless already a double register.
+ DoubleRegister EmitLoadDoubleRegister(LOperand* op,
+ SwVfpRegister flt_scratch,
+ DoubleRegister dbl_scratch);
+ int ToInteger32(LConstantOperand* op) const;
+ Operand ToOperand(LOperand* op);
+ MemOperand ToMemOperand(LOperand* op) const;
+ // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+ MemOperand ToHighMemOperand(LOperand* op) const;
+
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
@@ -94,8 +96,8 @@ class LCodeGen BASE_EMBEDDED {
// Deferred code support.
template<int T>
- void DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr,
- Token::Value op);
+ void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
+ Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
@@ -136,7 +138,6 @@ class LCodeGen BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
- MacroAssembler* masm() const { return masm_; }
Register scratch0() { return r9; }
DwVfpRegister double_scratch0() { return d0; }
@@ -202,24 +203,6 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
- // LOperand must be a register.
- Register ToRegister(LOperand* op) const;
-
- // LOperand is loaded into scratch, unless already a register.
- Register EmitLoadRegister(LOperand* op, Register scratch);
-
- // LOperand must be a double register.
- DoubleRegister ToDoubleRegister(LOperand* op) const;
-
- // LOperand is loaded into dbl_scratch, unless already a double register.
- DoubleRegister EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DoubleRegister dbl_scratch);
-
- int ToInteger32(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op);
- MemOperand ToMemOperand(LOperand* op) const;
-
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
@@ -229,6 +212,7 @@ class LCodeGen BASE_EMBEDDED {
Register scratch1,
Register scratch2);
void DoMathFloor(LUnaryMathOperation* instr);
+ void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
@@ -237,6 +221,7 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
+ void RecordSafepoint(int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index);
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
new file mode 100644
index 000000000..1a2326b74
--- /dev/null
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -0,0 +1,303 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "arm/lithium-gap-resolver-arm.h"
+#include "arm/lithium-codegen-arm.h"
+
+namespace v8 {
+namespace internal {
+
+static const Register kSavedValueRegister = { 9 };
+static const DoubleRegister kSavedDoubleValueRegister = { 0 };
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false),
+ saved_destination_(NULL) { }
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(moves_.is_empty());
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ root_index_ = i; // Any cycle is found when by reaching this move again.
+ PerformMove(i);
+ if (in_cycle_) {
+ RestoreValue();
+ }
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated()) {
+ ASSERT(moves_[i].source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move);
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph.
+
+ // We can only find a cycle, when doing a depth-first traversal of moves,
+ // be encountering the starting move again. So by spilling the source of
+ // the starting move, we break the cycle. All moves are then unblocked,
+ // and the starting move is completed by writing the spilled value to
+ // its destination. All other moves from the spilled source have been
+ // completed prior to breaking the cycle.
+ // An additional complication is that moves to MemOperands with large
+ // offsets (more than 1K or 4K) require us to spill this spilled value to
+ // the stack, to free up the register.
+ ASSERT(!moves_[index].IsPending());
+ ASSERT(!moves_[index].IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack allocated local. Multiple moves can
+ // be pending because this function is recursive.
+ ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
+ LOperand* destination = moves_[index].destination();
+ moves_[index].set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ PerformMove(i);
+ // If there is a blocking, pending move it must be moves_[root_index_]
+ // and all other moves with the same source as moves_[root_index_] are
+ // sucessfully executed (because they are cycle-free) by this loop.
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ moves_[index].set_destination(destination);
+
+ // The move may be blocked on a pending move, which must be the starting move.
+ // In this case, we have a cycle, and we save the source of this move to
+ // a scratch register to break it.
+ LMoveOperands other_move = moves_[root_index_];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ BreakCycle(index);
+ return;
+ }
+
+ // This move is no longer blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+ // We save in a register the value that should end up in the source of
+ // moves_[root_index]. After performing all moves in the tree rooted
+ // in that move, we save the value to that source.
+ ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
+ ASSERT(!in_cycle_);
+ in_cycle_ = true;
+ LOperand* source = moves_[index].source();
+ saved_destination_ = moves_[index].destination();
+ if (source->IsRegister()) {
+ __ mov(kSavedValueRegister, cgen_->ToRegister(source));
+ } else if (source->IsStackSlot()) {
+ __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
+ } else if (source->IsDoubleRegister()) {
+ __ vmov(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source));
+ } else if (source->IsDoubleStackSlot()) {
+ __ vldr(kSavedDoubleValueRegister, cgen_->ToMemOperand(source));
+ } else {
+ UNREACHABLE();
+ }
+ // This move will be done by restoring the saved value to the destination.
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+ ASSERT(in_cycle_);
+ ASSERT(saved_destination_ != NULL);
+
+ // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
+ if (saved_destination_->IsRegister()) {
+ __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+ } else if (saved_destination_->IsStackSlot()) {
+ __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+ } else if (saved_destination_->IsDoubleRegister()) {
+ __ vmov(cgen_->ToDoubleRegister(saved_destination_),
+ kSavedDoubleValueRegister);
+ } else if (saved_destination_->IsDoubleStackSlot()) {
+ __ vstr(kSavedDoubleValueRegister,
+ cgen_->ToMemOperand(saved_destination_));
+ } else {
+ UNREACHABLE();
+ }
+
+ in_cycle_ = false;
+ saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+
+ if (source->IsRegister()) {
+ Register source_register = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ __ mov(cgen_->ToRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ __ str(source_register, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ ldr(cgen_->ToRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+ if (!destination_operand.OffsetIsUint12Encodable()) {
+ // ip is overwritten while saving the value to the destination.
+ // Therefore we can't use ip. It is OK if the read from the source
+ // destroys ip, since that happens before the value is read.
+ __ vldr(kSavedDoubleValueRegister.low(), source_operand);
+ __ vstr(kSavedDoubleValueRegister.low(), destination_operand);
+ } else {
+ __ ldr(ip, source_operand);
+ __ str(ip, destination_operand);
+ }
+ } else {
+ __ ldr(kSavedValueRegister, source_operand);
+ __ str(kSavedValueRegister, destination_operand);
+ }
+ }
+
+ } else if (source->IsConstantOperand()) {
+ Operand source_operand = cgen_->ToOperand(source);
+ if (destination->IsRegister()) {
+ __ mov(cgen_->ToRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ __ mov(kSavedValueRegister, source_operand);
+ __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ vmov(cgen_->ToDoubleRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ __ vstr(source_register, destination_operand);
+ }
+
+ } else if (source->IsDoubleStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+ // kSavedDoubleValueRegister was used to break the cycle,
+ // but kSavedValueRegister is free.
+ MemOperand source_high_operand =
+ cgen_->ToHighMemOperand(source);
+ MemOperand destination_high_operand =
+ cgen_->ToHighMemOperand(destination);
+ __ ldr(kSavedValueRegister, source_operand);
+ __ str(kSavedValueRegister, destination_operand);
+ __ ldr(kSavedValueRegister, source_high_operand);
+ __ str(kSavedValueRegister, destination_high_operand);
+ } else {
+ __ vldr(kSavedDoubleValueRegister, source_operand);
+ __ vstr(kSavedDoubleValueRegister, destination_operand);
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+
+ moves_[index].Eliminate();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.h b/deps/v8/src/arm/lithium-gap-resolver-arm.h
new file mode 100644
index 000000000..334d2920b
--- /dev/null
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.h
@@ -0,0 +1,84 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
+#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // If a cycle is found in the series of moves, save the blocking value to
+ // a scratch register. The cycle must be found by hitting the root of the
+ // depth-first search.
+ void BreakCycle(int index);
+
+ // After a cycle has been resolved, restore the value from the scratch
+ // register to its proper destination.
+ void RestoreValue();
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ int root_index_;
+ bool in_cycle_;
+ LOperand* saved_destination_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index eb850cd94..65c92f9e1 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -485,18 +485,19 @@ void MacroAssembler::PopSafepointRegistersAndDoubles() {
PopSafepointRegisters();
}
-void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register reg) {
- str(reg, SafepointRegistersAndDoublesSlot(reg));
+void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
+ Register dst) {
+ str(src, SafepointRegistersAndDoublesSlot(dst));
}
-void MacroAssembler::StoreToSafepointRegisterSlot(Register reg) {
- str(reg, SafepointRegisterSlot(reg));
+void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
+ str(src, SafepointRegisterSlot(dst));
}
-void MacroAssembler::LoadFromSafepointRegisterSlot(Register reg) {
- ldr(reg, SafepointRegisterSlot(reg));
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ ldr(dst, SafepointRegisterSlot(src));
}
@@ -745,6 +746,14 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
}
+void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+#if !defined(USE_ARM_EABI)
+ UNREACHABLE();
+#else
+ vmov(dst, r0, r1);
+#endif
+}
+
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
@@ -2154,11 +2163,22 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
}
- // The context may be an intermediate context, not a function context.
- ldr(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- } else { // Slot is in the current function context.
- // The context may be an intermediate context, not a function context.
- ldr(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in esi).
+ mov(dst, cp);
+ }
+
+ // We should not have found a 'with' context by walking the context chain
+ // (i.e., the static scope chain and runtime context chain do not agree).
+ // A variable occurring in such a scope should have slot type LOOKUP and
+ // not CONTEXT.
+ if (FLAG_debug_code) {
+ ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ cmp(dst, ip);
+ Check(eq, "Yo dawg, I heard you liked function contexts "
+ "so I put function contexts in all your contexts");
}
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 354662da3..83c59a6f6 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -240,12 +240,13 @@ class MacroAssembler: public Assembler {
void PopSafepointRegisters();
void PushSafepointRegistersAndDoubles();
void PopSafepointRegistersAndDoubles();
- void StoreToSafepointRegisterSlot(Register reg);
- void StoreToSafepointRegistersAndDoublesSlot(Register reg);
- void LoadFromSafepointRegisterSlot(Register reg);
- static int SafepointRegisterStackIndex(int reg_code);
- static MemOperand SafepointRegisterSlot(Register reg);
- static MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+ // Store value in register src in the safepoint stack slot for
+ // register dst.
+ void StoreToSafepointRegisterSlot(Register src, Register dst);
+ void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
+ // Load the value of the src register from its safepoint stack slot
+ // into register dst.
+ void LoadFromSafepointRegisterSlot(Register dst, Register src);
// Load two consecutive registers with two consecutive memory locations.
void Ldrd(Register dst1,
@@ -683,6 +684,8 @@ class MacroAssembler: public Assembler {
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
+ void GetCFunctionDoubleResult(const DoubleRegister dst);
+
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context.
// stack_space - space to be unwound on exit (includes the call js
@@ -883,10 +886,19 @@ class MacroAssembler: public Assembler {
Register scratch1,
Register scratch2);
+ // Compute memory operands for safepoint stack slots.
+ static int SafepointRegisterStackIndex(int reg_code);
+ MemOperand SafepointRegisterSlot(Register reg);
+ MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+
bool generating_stub_;
bool allow_stub_calls_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
+
+ // Needs access to SafepointRegisterStackIndex for optimized frame
+ // traversal.
+ friend class OptimizedFrame;
};
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 675fdf49b..e623ea191 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -2332,8 +2332,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
- if (!function->IsBuiltin()) {
- // Calling non-builtins with a value as receiver requires boxing.
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
__ jmp(&miss);
} else {
// Check that the object is a two-byte string or a symbol.
@@ -2348,8 +2349,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case NUMBER_CHECK: {
- if (!function->IsBuiltin()) {
- // Calling non-builtins with a value as receiver requires boxing.
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
__ jmp(&miss);
} else {
Label fast;
@@ -2369,8 +2371,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
}
case BOOLEAN_CHECK: {
- if (!function->IsBuiltin()) {
- // Calling non-builtins with a value as receiver requires boxing.
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
__ jmp(&miss);
} else {
Label fast;
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 42a61c2b8..a323ecaa4 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -228,6 +228,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteTaggedPC(pc_delta, kEmbeddedObjectTag);
} else if (rmode == RelocInfo::CODE_TARGET) {
WriteTaggedPC(pc_delta, kCodeTargetTag);
+ ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
} else if (RelocInfo::IsPosition(rmode)) {
// Use signed delta-encoding for data.
intptr_t data_delta = rinfo->data() - last_data_;
@@ -251,6 +252,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteExtraTaggedPC(pc_delta, kPCJumpTag);
WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag);
last_data_ = rinfo->data();
+ ASSERT(begin_pos - pos_ == RelocInfo::kRelocCommentSize);
} else {
// For all other modes we simply use the mode as the extra tag.
// None of these modes need a data component.
@@ -850,12 +852,14 @@ double power_double_double(double x, double y) {
ExternalReference ExternalReference::power_double_double_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double)));
+ return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double),
+ FP_RETURN_CALL));
}
ExternalReference ExternalReference::power_double_int_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int)));
+ return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int),
+ FP_RETURN_CALL));
}
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 1b71dfc5a..095859840 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -184,6 +184,14 @@ class RelocInfo BASE_EMBEDDED {
// we do not normally record relocation info.
static const char* kFillerCommentString;
+ // The size of a comment is equal to tree bytes for the extra tagged pc +
+ // the tag for the data, and kPointerSize for the actual pointer to the
+ // comment.
+ static const int kRelocCommentSize = 3 + kPointerSize;
+
+ // The maximum size for a call instruction including pc-jump.
+ static const int kMaxCallSize = 6;
+
enum Mode {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index ae7b2b9f9..f392cceb3 100755
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -261,10 +261,8 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
Handle<SharedFunctionInfo> shared = info->shared_info();
shared->EnableDeoptimizationSupport(*unoptimized.code());
// The existing unoptimized code was replaced with the new one.
- Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG,
- Handle<String>(shared->DebugName()),
- shared->start_position(),
- &unoptimized);
+ Compiler::RecordFunctionCompilation(
+ Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
}
}
@@ -273,7 +271,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// optimizable marker in the code object and optimize anyway. This
// is safe as long as the unoptimized code has deoptimization
// support.
- ASSERT(FLAG_always_opt || info->shared_info()->code()->optimizable());
+ ASSERT(FLAG_always_opt || code->optimizable());
ASSERT(info->shared_info()->has_deoptimization_support());
if (FLAG_trace_hydrogen) {
@@ -283,8 +281,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
}
TypeFeedbackOracle oracle(
- Handle<Code>(info->shared_info()->code()),
- Handle<Context>(info->closure()->context()->global_context()));
+ code, Handle<Context>(info->closure()->context()->global_context()));
HGraphBuilder builder(&oracle);
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph(info);
@@ -294,9 +291,9 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
}
if (graph != NULL && FLAG_build_lithium) {
- Handle<Code> code = graph->Compile();
- if (!code.is_null()) {
- info->SetCode(code);
+ Handle<Code> optimized_code = graph->Compile();
+ if (!optimized_code.is_null()) {
+ info->SetCode(optimized_code);
FinishOptimization(info->closure(), start);
return true;
}
@@ -415,13 +412,25 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
return Handle<SharedFunctionInfo>::null();
}
+ // Allocate function.
ASSERT(!info->code().is_null());
+ Handle<SharedFunctionInfo> result =
+ Factory::NewSharedFunctionInfo(
+ lit->name(),
+ lit->materialized_literal_count(),
+ info->code(),
+ SerializedScopeInfo::Create(info->scope()));
+
+ ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
+ Compiler::SetFunctionInfo(result, lit, true, script);
+
if (script->name()->IsString()) {
PROFILE(CodeCreateEvent(
info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
+ *result,
String::cast(script->name())));
GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
script,
@@ -432,21 +441,11 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
- ""));
+ *result,
+ Heap::empty_string()));
GDBJIT(AddCode(Handle<String>(), script, info->code()));
}
- // Allocate function.
- Handle<SharedFunctionInfo> result =
- Factory::NewSharedFunctionInfo(
- lit->name(),
- lit->materialized_literal_count(),
- info->code(),
- SerializedScopeInfo::Create(info->scope()));
-
- ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
- Compiler::SetFunctionInfo(result, lit, true, script);
-
// Hint to the runtime system used when allocating space for initial
// property space by setting the expected number of properties for
// the instances of the function.
@@ -613,10 +612,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
ASSERT(!info->code().is_null());
Handle<Code> code = info->code();
Handle<JSFunction> function = info->closure();
- RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG,
- Handle<String>(shared->DebugName()),
- shared->start_position(),
- info);
+ RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
if (info->IsOptimizing()) {
function->ReplaceCode(*code);
@@ -724,10 +720,6 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
ASSERT(!info.code().is_null());
// Function compilation complete.
- RecordFunctionCompilation(Logger::FUNCTION_TAG,
- literal->debug_name(),
- literal->start_position(),
- &info);
scope_info = SerializedScopeInfo::Create(info.scope());
}
@@ -738,6 +730,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
info.code(),
scope_info);
SetFunctionInfo(result, literal, false, script);
+ RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
result->set_allows_lazy_compilation(allow_lazy);
// Set the expected number of properties for instances and return
@@ -776,28 +769,31 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
- Handle<String> name,
- int start_position,
- CompilationInfo* info) {
+ CompilationInfo* info,
+ Handle<SharedFunctionInfo> shared) {
+ // SharedFunctionInfo is passed separately, because if CompilationInfo
+ // was created using Script object, it will not have it.
+
// Log the code generation. If source information is available include
// script name and line number. Check explicitly whether logging is
// enabled as finding the line number is not free.
- if (Logger::is_logging() ||
- CpuProfiler::is_profiling()) {
+ if (Logger::is_logging() || CpuProfiler::is_profiling()) {
Handle<Script> script = info->script();
Handle<Code> code = info->code();
+ if (*code == Builtins::builtin(Builtins::LazyCompile)) return;
if (script->name()->IsString()) {
- int line_num = GetScriptLineNumber(script, start_position) + 1;
+ int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
USE(line_num);
PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
- *name,
+ *shared,
String::cast(script->name()),
line_num));
} else {
PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
- *name));
+ *shared,
+ shared->DebugName()));
}
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 239bea35c..e0a437ac6 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -265,9 +265,8 @@ class Compiler : public AllStatic {
#endif
static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
- Handle<String> name,
- int start_position,
- CompilationInfo* info);
+ CompilationInfo* info,
+ Handle<SharedFunctionInfo> shared);
};
diff --git a/deps/v8/src/cpu-profiler-inl.h b/deps/v8/src/cpu-profiler-inl.h
index 5df5893f8..440dedca6 100644
--- a/deps/v8/src/cpu-profiler-inl.h
+++ b/deps/v8/src/cpu-profiler-inl.h
@@ -41,6 +41,9 @@ namespace internal {
void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->AddCode(start, entry, size);
+ if (sfi_address != NULL) {
+ entry->set_shared_id(code_map->GetSFITag(sfi_address));
+ }
}
@@ -54,8 +57,8 @@ void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) {
}
-void CodeAliasEventRecord::UpdateCodeMap(CodeMap* code_map) {
- code_map->AddAlias(start, entry, code_start);
+void SFIMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
+ code_map->MoveCode(from, to);
}
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index fcf539f3b..ad04a003b 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -53,13 +53,7 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount),
- enqueue_order_(0),
- known_functions_(new HashMap(AddressesMatch)) {
-}
-
-
-ProfilerEventsProcessor::~ProfilerEventsProcessor() {
- delete known_functions_;
+ enqueue_order_(0) {
}
@@ -75,6 +69,7 @@ void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, prefix, name);
rec->size = 1;
+ rec->sfi_address = NULL;
events_buffer_.Enqueue(evt_rec);
}
@@ -84,7 +79,8 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
String* resource_name,
int line_number,
Address start,
- unsigned size) {
+ unsigned size,
+ Address sfi_address) {
if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec;
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@@ -93,6 +89,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
rec->size = size;
+ rec->sfi_address = sfi_address;
events_buffer_.Enqueue(evt_rec);
}
@@ -109,6 +106,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, name);
rec->size = size;
+ rec->sfi_address = NULL;
events_buffer_.Enqueue(evt_rec);
}
@@ -125,6 +123,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, args_count);
rec->size = size;
+ rec->sfi_address = NULL;
events_buffer_.Enqueue(evt_rec);
}
@@ -150,57 +149,14 @@ void ProfilerEventsProcessor::CodeDeleteEvent(Address from) {
}
-void ProfilerEventsProcessor::FunctionCreateEvent(Address alias,
- Address start,
- int security_token_id) {
+void ProfilerEventsProcessor::SFIMoveEvent(Address from, Address to) {
CodeEventsContainer evt_rec;
- CodeAliasEventRecord* rec = &evt_rec.CodeAliasEventRecord_;
- rec->type = CodeEventRecord::CODE_ALIAS;
+ SFIMoveEventRecord* rec = &evt_rec.SFIMoveEventRecord_;
+ rec->type = CodeEventRecord::SFI_MOVE;
rec->order = ++enqueue_order_;
- rec->start = alias;
- rec->entry = generator_->NewCodeEntry(security_token_id);
- rec->code_start = start;
+ rec->from = from;
+ rec->to = to;
events_buffer_.Enqueue(evt_rec);
-
- known_functions_->Lookup(alias, AddressHash(alias), true);
-}
-
-
-void ProfilerEventsProcessor::FunctionMoveEvent(Address from, Address to) {
- CodeMoveEvent(from, to);
-
- if (IsKnownFunction(from)) {
- known_functions_->Remove(from, AddressHash(from));
- known_functions_->Lookup(to, AddressHash(to), true);
- }
-}
-
-
-void ProfilerEventsProcessor::FunctionDeleteEvent(Address from) {
- CodeDeleteEvent(from);
-
- known_functions_->Remove(from, AddressHash(from));
-}
-
-
-bool ProfilerEventsProcessor::IsKnownFunction(Address start) {
- HashMap::Entry* entry =
- known_functions_->Lookup(start, AddressHash(start), false);
- return entry != NULL;
-}
-
-
-void ProfilerEventsProcessor::ProcessMovedFunctions() {
- for (int i = 0; i < moved_functions_.length(); ++i) {
- JSFunction* function = moved_functions_[i];
- CpuProfiler::FunctionCreateEvent(function);
- }
- moved_functions_.Clear();
-}
-
-
-void ProfilerEventsProcessor::RememberMovedFunction(JSFunction* function) {
- moved_functions_.Add(function);
}
@@ -227,13 +183,12 @@ void ProfilerEventsProcessor::AddCurrentStack() {
TickSample* sample = &record.sample;
sample->state = Top::current_vm_state();
sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
+ sample->tos = NULL;
sample->frames_count = 0;
for (StackTraceFrameIterator it;
!it.done() && sample->frames_count < TickSample::kMaxFramesCount;
it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- sample->stack[sample->frames_count++] =
- reinterpret_cast<Address>(frame->function());
+ sample->stack[sample->frames_count++] = it.frame()->pc();
}
record.order = enqueue_order_;
ticks_from_vm_buffer_.Enqueue(record);
@@ -393,20 +348,38 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Heap::empty_string(),
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
- code->ExecutableSize());
+ code->ExecutableSize(),
+ NULL);
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, String* name,
- String* source, int line) {
+ Code* code,
+ SharedFunctionInfo* shared,
+ String* name) {
singleton_->processor_->CodeCreateEvent(
tag,
name,
+ Heap::empty_string(),
+ v8::CpuProfileNode::kNoLineNumberInfo,
+ code->address(),
+ code->ExecutableSize(),
+ shared->address());
+}
+
+
+void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ String* source, int line) {
+ singleton_->processor_->CodeCreateEvent(
+ tag,
+ shared->DebugName(),
source,
line,
code->address(),
- code->ExecutableSize());
+ code->ExecutableSize(),
+ shared->address());
}
@@ -430,44 +403,8 @@ void CpuProfiler::CodeDeleteEvent(Address from) {
}
-void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
- int security_token_id = TokenEnumerator::kNoSecurityToken;
- if (function->unchecked_context()->IsContext()) {
- security_token_id = singleton_->token_enumerator_->GetTokenId(
- function->context()->global_context()->security_token());
- }
- singleton_->processor_->FunctionCreateEvent(
- function->address(),
- function->shared()->code()->address(),
- security_token_id);
-}
-
-
-void CpuProfiler::ProcessMovedFunctions() {
- singleton_->processor_->ProcessMovedFunctions();
-}
-
-
-void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function) {
- // This function is called from GC iterators (during Scavenge,
- // MC, and MS), so marking bits can be set on objects. That's
- // why unchecked accessors are used here.
-
- // The same function can be reported several times.
- if (function->unchecked_code() == Builtins::builtin(Builtins::LazyCompile)
- || singleton_->processor_->IsKnownFunction(function->address())) return;
-
- singleton_->processor_->RememberMovedFunction(function);
-}
-
-
-void CpuProfiler::FunctionMoveEvent(Address from, Address to) {
- singleton_->processor_->FunctionMoveEvent(from, to);
-}
-
-
-void CpuProfiler::FunctionDeleteEvent(Address from) {
- singleton_->processor_->FunctionDeleteEvent(from);
+void CpuProfiler::SFIMoveEvent(Address from, Address to) {
+ singleton_->processor_->SFIMoveEvent(from, to);
}
@@ -539,7 +476,6 @@ void CpuProfiler::StartProcessorIfNotStarted() {
FLAG_log_code = saved_log_code_flag;
}
Logger::LogCompiledFunctions();
- Logger::LogFunctionObjects();
Logger::LogAccessorCallbacks();
}
// Enable stack sampling.
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index 10165f67c..1ebbfebf7 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -50,7 +50,7 @@ class TokenEnumerator;
V(CODE_CREATION, CodeCreateEventRecord) \
V(CODE_MOVE, CodeMoveEventRecord) \
V(CODE_DELETE, CodeDeleteEventRecord) \
- V(CODE_ALIAS, CodeAliasEventRecord)
+ V(SFI_MOVE, SFIMoveEventRecord)
class CodeEventRecord {
@@ -73,6 +73,7 @@ class CodeCreateEventRecord : public CodeEventRecord {
Address start;
CodeEntry* entry;
unsigned size;
+ Address sfi_address;
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@@ -95,11 +96,10 @@ class CodeDeleteEventRecord : public CodeEventRecord {
};
-class CodeAliasEventRecord : public CodeEventRecord {
+class SFIMoveEventRecord : public CodeEventRecord {
public:
- Address start;
- CodeEntry* entry;
- Address code_start;
+ Address from;
+ Address to;
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@@ -134,7 +134,7 @@ class TickSampleEventRecord BASE_EMBEDDED {
class ProfilerEventsProcessor : public Thread {
public:
explicit ProfilerEventsProcessor(ProfileGenerator* generator);
- virtual ~ProfilerEventsProcessor();
+ virtual ~ProfilerEventsProcessor() {}
// Thread control.
virtual void Run();
@@ -148,7 +148,8 @@ class ProfilerEventsProcessor : public Thread {
void CodeCreateEvent(Logger::LogEventsAndTags tag,
String* name,
String* resource_name, int line_number,
- Address start, unsigned size);
+ Address start, unsigned size,
+ Address sfi_address);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
const char* name,
Address start, unsigned size);
@@ -157,17 +158,12 @@ class ProfilerEventsProcessor : public Thread {
Address start, unsigned size);
void CodeMoveEvent(Address from, Address to);
void CodeDeleteEvent(Address from);
- void FunctionCreateEvent(Address alias, Address start, int security_token_id);
- void FunctionMoveEvent(Address from, Address to);
- void FunctionDeleteEvent(Address from);
+ void SFIMoveEvent(Address from, Address to);
void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
const char* prefix, String* name,
Address start, unsigned size);
// Puts current stack into tick sample events buffer.
void AddCurrentStack();
- bool IsKnownFunction(Address start);
- void ProcessMovedFunctions();
- void RememberMovedFunction(JSFunction* function);
// Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all
@@ -188,13 +184,6 @@ class ProfilerEventsProcessor : public Thread {
bool ProcessTicks(unsigned dequeue_order);
INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
- INLINE(static bool AddressesMatch(void* key1, void* key2)) {
- return key1 == key2;
- }
- INLINE(static uint32_t AddressHash(Address addr)) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)));
- }
ProfileGenerator* generator_;
bool running_;
@@ -202,10 +191,6 @@ class ProfilerEventsProcessor : public Thread {
SamplingCircularQueue ticks_buffer_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned enqueue_order_;
-
- // Used from the VM thread.
- HashMap* known_functions_;
- List<JSFunction*> moved_functions_;
};
} } // namespace v8::internal
@@ -251,23 +236,22 @@ class CpuProfiler {
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, String* name);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
- Code* code, String* name,
+ Code* code,
+ SharedFunctionInfo *shared,
+ String* name);
+ static void CodeCreateEvent(Logger::LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo *shared,
String* source, int line);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count);
static void CodeMovingGCEvent() {}
static void CodeMoveEvent(Address from, Address to);
static void CodeDeleteEvent(Address from);
- static void FunctionCreateEvent(JSFunction* function);
- // Reports function creation in case we had missed it (e.g.
- // if it was created from compiled code).
- static void FunctionCreateEventFromMove(JSFunction* function);
- static void FunctionMoveEvent(Address from, Address to);
- static void FunctionDeleteEvent(Address from);
static void GetterCallbackEvent(String* name, Address entry_point);
static void RegExpCodeCreateEvent(Code* code, String* source);
- static void ProcessMovedFunctions();
static void SetterCallbackEvent(String* name, Address entry_point);
+ static void SFIMoveEvent(Address from, Address to);
static INLINE(bool is_profiling()) {
return NoBarrier_Load(&is_profiling_);
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index f484d8d9b..de8f0a466 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -106,6 +106,11 @@ static Handle<Object> Invoke(bool construct,
ASSERT(*has_pending_exception == Top::has_pending_exception());
if (*has_pending_exception) {
Top::ReportPendingMessages();
+ if (Top::pending_exception() == Failure::OutOfMemoryException()) {
+ if (!HandleScopeImplementer::instance()->ignore_out_of_memory()) {
+ V8::FatalProcessOutOfMemory("JS", true);
+ }
+ }
return Handle<Object>();
} else {
Top::clear_pending_message();
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 6a5e2a575..96f63c530 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -120,6 +120,7 @@ DEFINE_bool(time_hydrogen, false, "timing for hydrogen")
DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
DEFINE_bool(trace_inlining, false, "trace inlining decisions")
DEFINE_bool(trace_alloc, false, "trace register allocator")
+DEFINE_bool(trace_all_uses, false, "trace all use positions")
DEFINE_bool(trace_range, false, "trace range analysis")
DEFINE_bool(trace_gvn, false, "trace global value numbering")
DEFINE_bool(trace_representation, false, "trace representation types")
@@ -134,7 +135,11 @@ DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(aggressive_loop_invariant_motion, true,
"aggressive motion of instructions out of loops")
+#ifdef V8_TARGET_ARCH_X64
+DEFINE_bool(use_osr, false, "use on-stack replacement")
+#else
DEFINE_bool(use_osr, true, "use on-stack replacement")
+#endif
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 88a993972..5136deddb 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -1411,9 +1411,8 @@ static void AddUnwindInfo(CodeDescription *desc) {
#ifdef V8_TARGET_ARCH_X64
if (desc->tag() == GDBJITInterface::FUNCTION) {
// To avoid propagating unwinding information through
- // compilation pipeline we rely on function prologue
- // and epilogue being the same for all code objects generated
- // by the full code generator.
+ // compilation pipeline we use an approximation.
+ // For most use cases this should not affect usability.
static const int kFramePointerPushOffset = 1;
static const int kFramePointerSetOffset = 4;
static const int kFramePointerPopOffset = -3;
@@ -1427,19 +1426,6 @@ static void AddUnwindInfo(CodeDescription *desc) {
uintptr_t frame_pointer_pop_address =
desc->CodeEnd() + kFramePointerPopOffset;
-#ifdef DEBUG
- static const uint8_t kFramePointerPushInstruction = 0x48; // push ebp
- static const uint16_t kFramePointerSetInstruction = 0x5756; // mov ebp, esp
- static const uint8_t kFramePointerPopInstruction = 0xBE; // pop ebp
-
- ASSERT(*reinterpret_cast<uint8_t*>(frame_pointer_push_address) ==
- kFramePointerPushInstruction);
- ASSERT(*reinterpret_cast<uint16_t*>(frame_pointer_set_address) ==
- kFramePointerSetInstruction);
- ASSERT(*reinterpret_cast<uint8_t*>(frame_pointer_pop_address) ==
- kFramePointerPopInstruction);
-#endif
-
desc->SetStackStateStartAddress(CodeDescription::POST_RBP_PUSH,
frame_pointer_push_address);
desc->SetStackStateStartAddress(CodeDescription::POST_RBP_SET,
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index d625d644c..b48aa507e 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -834,49 +834,39 @@ bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
}
-bool CompileLazy(Handle<JSFunction> function,
- ClearExceptionFlag flag) {
+static bool CompileLazyFunction(Handle<JSFunction> function,
+ ClearExceptionFlag flag,
+ InLoopFlag in_loop_flag) {
bool result = true;
if (function->shared()->is_compiled()) {
function->ReplaceCode(function->shared()->code());
function->shared()->set_code_age(0);
} else {
CompilationInfo info(function);
+ if (in_loop_flag == IN_LOOP) info.MarkAsInLoop();
result = CompileLazyHelper(&info, flag);
ASSERT(!result || function->is_compiled());
}
- if (result && function->is_compiled()) {
- PROFILE(FunctionCreateEvent(*function));
- }
return result;
}
+bool CompileLazy(Handle<JSFunction> function,
+ ClearExceptionFlag flag) {
+ return CompileLazyFunction(function, flag, NOT_IN_LOOP);
+}
+
+
bool CompileLazyInLoop(Handle<JSFunction> function,
ClearExceptionFlag flag) {
- bool result = true;
- if (function->shared()->is_compiled()) {
- function->ReplaceCode(function->shared()->code());
- function->shared()->set_code_age(0);
- } else {
- CompilationInfo info(function);
- info.MarkAsInLoop();
- result = CompileLazyHelper(&info, flag);
- ASSERT(!result || function->is_compiled());
- }
- if (result && function->is_compiled()) {
- PROFILE(FunctionCreateEvent(*function));
- }
- return result;
+ return CompileLazyFunction(function, flag, IN_LOOP);
}
bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id) {
CompilationInfo info(function);
info.SetOptimizing(osr_ast_id);
- bool result = CompileLazyHelper(&info, KEEP_EXCEPTION);
- if (result) PROFILE(FunctionCreateEvent(*function));
- return result;
+ return CompileLazyHelper(&info, KEEP_EXCEPTION);
}
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index f88ebda53..1fadec383 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -134,7 +134,7 @@ Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
int Heap::mc_count_ = 0;
int Heap::ms_count_ = 0;
-int Heap::gc_count_ = 0;
+unsigned int Heap::gc_count_ = 0;
GCTracer* Heap::tracer_ = NULL;
@@ -515,7 +515,6 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_gc) HeapProfiler::WriteSample();
- if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions();
#endif
return next_gc_likely_to_collect_more;
@@ -1350,9 +1349,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
#if defined(ENABLE_LOGGING_AND_PROFILING)
if (Logger::is_logging() || CpuProfiler::is_profiling()) {
- if (target->IsJSFunction()) {
- PROFILE(FunctionMoveEvent(source->address(), target->address()));
- PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target)));
+ if (target->IsSharedFunctionInfo()) {
+ PROFILE(SFIMoveEvent(source->address(), target->address()));
}
}
#endif
@@ -2924,9 +2922,8 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
// constructor to the function.
Object* result;
{ MaybeObject* maybe_result =
- JSObject::cast(prototype)->SetProperty(constructor_symbol(),
- function,
- DONT_ENUM);
+ JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
+ constructor_symbol(), function, DONT_ENUM);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
return prototype;
@@ -3797,9 +3794,9 @@ bool Heap::IdleNotification() {
static const int kIdlesBeforeMarkSweep = 7;
static const int kIdlesBeforeMarkCompact = 8;
static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
- static const int kGCsBetweenCleanup = 4;
+ static const unsigned int kGCsBetweenCleanup = 4;
static int number_idle_notifications = 0;
- static int last_gc_count = gc_count_;
+ static unsigned int last_gc_count = gc_count_;
bool uncommit = true;
bool finished = false;
@@ -3808,7 +3805,7 @@ bool Heap::IdleNotification() {
// GCs have taken place. This allows another round of cleanup based
// on idle notifications if enough work has been carried out to
// provoke a number of garbage collections.
- if (gc_count_ < last_gc_count + kGCsBetweenCleanup) {
+ if (gc_count_ - last_gc_count < kGCsBetweenCleanup) {
number_idle_notifications =
Min(number_idle_notifications + 1, kMaxIdleCount);
} else {
@@ -5182,32 +5179,77 @@ void HeapIterator::reset() {
}
-#ifdef DEBUG
+#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
+
+Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
-static bool search_for_any_global;
-static Object* search_target;
-static bool found_target;
-static List<Object*> object_stack(20);
+class PathTracer::MarkVisitor: public ObjectVisitor {
+ public:
+ explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
+ void VisitPointers(Object** start, Object** end) {
+ // Scan all HeapObject pointers in [start, end)
+ for (Object** p = start; !tracer_->found() && (p < end); p++) {
+ if ((*p)->IsHeapObject())
+ tracer_->MarkRecursively(p, this);
+ }
+ }
+ private:
+ PathTracer* tracer_;
+};
-// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
-static const int kMarkTag = 2;
-static void MarkObjectRecursively(Object** p);
-class MarkObjectVisitor : public ObjectVisitor {
+class PathTracer::UnmarkVisitor: public ObjectVisitor {
public:
+ explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
void VisitPointers(Object** start, Object** end) {
- // Copy all HeapObject pointers in [start, end)
+ // Scan all HeapObject pointers in [start, end)
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject())
- MarkObjectRecursively(p);
+ tracer_->UnmarkRecursively(p, this);
}
}
+
+ private:
+ PathTracer* tracer_;
};
-static MarkObjectVisitor mark_visitor;
-static void MarkObjectRecursively(Object** p) {
+void PathTracer::VisitPointers(Object** start, Object** end) {
+ bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
+ // Visit all HeapObject pointers in [start, end)
+ for (Object** p = start; !done && (p < end); p++) {
+ if ((*p)->IsHeapObject()) {
+ TracePathFrom(p);
+ done = ((what_to_find_ == FIND_FIRST) && found_target_);
+ }
+ }
+}
+
+
+void PathTracer::Reset() {
+ found_target_ = false;
+ object_stack_.Clear();
+}
+
+
+void PathTracer::TracePathFrom(Object** root) {
+ ASSERT((search_target_ == kAnyGlobalObject) ||
+ search_target_->IsHeapObject());
+ found_target_in_trace_ = false;
+ object_stack_.Clear();
+
+ MarkVisitor mark_visitor(this);
+ MarkRecursively(root, &mark_visitor);
+
+ UnmarkVisitor unmark_visitor(this);
+ UnmarkRecursively(root, &unmark_visitor);
+
+ ProcessResults();
+}
+
+
+void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
if (!(*p)->IsHeapObject()) return;
HeapObject* obj = HeapObject::cast(*p);
@@ -5216,14 +5258,17 @@ static void MarkObjectRecursively(Object** p) {
if (!map->IsHeapObject()) return; // visited before
- if (found_target) return; // stop if target found
- object_stack.Add(obj);
- if ((search_for_any_global && obj->IsJSGlobalObject()) ||
- (!search_for_any_global && (obj == search_target))) {
- found_target = true;
+ if (found_target_in_trace_) return; // stop if target found
+ object_stack_.Add(obj);
+ if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
+ (obj == search_target_)) {
+ found_target_in_trace_ = true;
+ found_target_ = true;
return;
}
+ bool is_global_context = obj->IsGlobalContext();
+
// not visited yet
Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
@@ -5231,31 +5276,30 @@ static void MarkObjectRecursively(Object** p) {
obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
- MarkObjectRecursively(&map);
+ // Scan the object body.
+ if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
+ // This is specialized to scan Context's properly.
+ Object** start = reinterpret_cast<Object**>(obj->address() +
+ Context::kHeaderSize);
+ Object** end = reinterpret_cast<Object**>(obj->address() +
+ Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
+ mark_visitor->VisitPointers(start, end);
+ } else {
+ obj->IterateBody(map_p->instance_type(),
+ obj->SizeFromMap(map_p),
+ mark_visitor);
+ }
- obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
- &mark_visitor);
+ // Scan the map after the body because the body is a lot more interesting
+ // when doing leak detection.
+ MarkRecursively(&map, mark_visitor);
- if (!found_target) // don't pop if found the target
- object_stack.RemoveLast();
+ if (!found_target_in_trace_) // don't pop if found the target
+ object_stack_.RemoveLast();
}
-static void UnmarkObjectRecursively(Object** p);
-class UnmarkObjectVisitor : public ObjectVisitor {
- public:
- void VisitPointers(Object** start, Object** end) {
- // Copy all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject())
- UnmarkObjectRecursively(p);
- }
- }
-};
-
-static UnmarkObjectVisitor unmark_visitor;
-
-static void UnmarkObjectRecursively(Object** p) {
+void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
if (!(*p)->IsHeapObject()) return;
HeapObject* obj = HeapObject::cast(*p);
@@ -5274,63 +5318,38 @@ static void UnmarkObjectRecursively(Object** p) {
obj->set_map(reinterpret_cast<Map*>(map_p));
- UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
+ UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
obj->IterateBody(Map::cast(map_p)->instance_type(),
obj->SizeFromMap(Map::cast(map_p)),
- &unmark_visitor);
+ unmark_visitor);
}
-static void MarkRootObjectRecursively(Object** root) {
- if (search_for_any_global) {
- ASSERT(search_target == NULL);
- } else {
- ASSERT(search_target->IsHeapObject());
- }
- found_target = false;
- object_stack.Clear();
-
- MarkObjectRecursively(root);
- UnmarkObjectRecursively(root);
-
- if (found_target) {
+void PathTracer::ProcessResults() {
+ if (found_target_) {
PrintF("=====================================\n");
PrintF("==== Path to object ====\n");
PrintF("=====================================\n\n");
- ASSERT(!object_stack.is_empty());
- for (int i = 0; i < object_stack.length(); i++) {
+ ASSERT(!object_stack_.is_empty());
+ for (int i = 0; i < object_stack_.length(); i++) {
if (i > 0) PrintF("\n |\n |\n V\n\n");
- Object* obj = object_stack[i];
+ Object* obj = object_stack_[i];
obj->Print();
}
PrintF("=====================================\n");
}
}
+#endif // DEBUG || LIVE_OBJECT_LIST
-// Helper class for visiting HeapObjects recursively.
-class MarkRootVisitor: public ObjectVisitor {
- public:
- void VisitPointers(Object** start, Object** end) {
- // Visit all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject())
- MarkRootObjectRecursively(p);
- }
- }
-};
-
-
+#ifdef DEBUG
// Triggers a depth-first traversal of reachable objects from roots
// and finds a path to a specific heap object and prints it.
void Heap::TracePathToObject(Object* target) {
- search_target = target;
- search_for_any_global = false;
-
- MarkRootVisitor root_visitor;
- IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
+ PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
+ IterateRoots(&tracer, VISIT_ONLY_STRONG);
}
@@ -5338,11 +5357,10 @@ void Heap::TracePathToObject(Object* target) {
// and finds a path to any global object and prints it. Useful for
// determining the source for leaks of global objects.
void Heap::TracePathToGlobal() {
- search_target = NULL;
- search_for_any_global = true;
-
- MarkRootVisitor root_visitor;
- IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
+ PathTracer tracer(PathTracer::kAnyGlobalObject,
+ PathTracer::FIND_ALL,
+ VISIT_ALL);
+ IterateRoots(&tracer, VISIT_ONLY_STRONG);
}
#endif
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index f50c3f9ac..163eb0444 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -30,6 +30,8 @@
#include <math.h>
+#include "globals.h"
+#include "list.h"
#include "spaces.h"
#include "splay-tree-inl.h"
#include "v8-counters.h"
@@ -1180,7 +1182,7 @@ class Heap : public AllStatic {
static int mc_count_; // how many mark-compact collections happened
static int ms_count_; // how many mark-sweep collections happened
- static int gc_count_; // how many gc happened
+ static unsigned int gc_count_; // how many gc happened
// Total length of the strings we failed to flatten since the last GC.
static int unflattened_strings_length_;
@@ -1907,7 +1909,7 @@ class GCTracer BASE_EMBEDDED {
void set_collector(GarbageCollector collector) { collector_ = collector; }
// Sets the GC count.
- void set_gc_count(int count) { gc_count_ = count; }
+ void set_gc_count(unsigned int count) { gc_count_ = count; }
// Sets the full GC count.
void set_full_gc_count(int count) { full_gc_count_ = count; }
@@ -1950,7 +1952,7 @@ class GCTracer BASE_EMBEDDED {
// A count (including this one, eg, the first collection is 1) of the
// number of garbage collections.
- int gc_count_;
+ unsigned int gc_count_;
// A count (including this one) of the number of full garbage collections.
int full_gc_count_;
@@ -2152,6 +2154,65 @@ class WeakObjectRetainer {
};
+#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
+// Helper class for tracing paths to a search target Object from all roots.
+// The TracePathFrom() method can be used to trace paths from a specific
+// object to the search target object.
+class PathTracer : public ObjectVisitor {
+ public:
+ enum WhatToFind {
+ FIND_ALL, // Will find all matches.
+ FIND_FIRST // Will stop the search after first match.
+ };
+
+ // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop
+ // after the first match. If FIND_ALL is specified, then tracing will be
+ // done for all matches.
+ PathTracer(Object* search_target,
+ WhatToFind what_to_find,
+ VisitMode visit_mode)
+ : search_target_(search_target),
+ found_target_(false),
+ found_target_in_trace_(false),
+ what_to_find_(what_to_find),
+ visit_mode_(visit_mode),
+ object_stack_(20),
+ no_alloc() {}
+
+ virtual void VisitPointers(Object** start, Object** end);
+
+ void Reset();
+ void TracePathFrom(Object** root);
+
+ bool found() const { return found_target_; }
+
+ static Object* const kAnyGlobalObject;
+
+ protected:
+ class MarkVisitor;
+ class UnmarkVisitor;
+
+ void MarkRecursively(Object** p, MarkVisitor* mark_visitor);
+ void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor);
+ virtual void ProcessResults();
+
+ // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
+ static const int kMarkTag = 2;
+
+ Object* search_target_;
+ bool found_target_;
+ bool found_target_in_trace_;
+ WhatToFind what_to_find_;
+ VisitMode visit_mode_;
+ List<Object*> object_stack_;
+
+ AssertNoAllocation no_alloc; // i.e. no gc allowed.
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
+};
+#endif // DEBUG || LIVE_OBJECT_LIST
+
+
} } // namespace v8::internal
#endif // V8_HEAP_H_
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 5accc77f0..c5a7146eb 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -57,10 +57,13 @@ const char* Representation::Mnemonic() const {
case kTagged: return "t";
case kDouble: return "d";
case kInteger32: return "i";
- default:
+ case kExternal: return "x";
+ case kNumRepresentations:
UNREACHABLE();
return NULL;
}
+ UNREACHABLE();
+ return NULL;
}
@@ -221,7 +224,7 @@ HType HType::TypeFromValue(Handle<Object> value) {
}
-int HValue::LookupOperandIndex(int occurrence_index, HValue* op) const {
+int HValue::LookupOperandIndex(int occurrence_index, HValue* op) {
for (int i = 0; i < OperandCount(); ++i) {
if (OperandAt(i) == op) {
if (occurrence_index == 0) return i;
@@ -237,7 +240,7 @@ bool HValue::IsDefinedAfter(HBasicBlock* other) const {
}
-bool HValue::UsesMultipleTimes(HValue* op) const {
+bool HValue::UsesMultipleTimes(HValue* op) {
bool seen = false;
for (int i = 0; i < OperandCount(); ++i) {
if (OperandAt(i) == op) {
@@ -249,7 +252,7 @@ bool HValue::UsesMultipleTimes(HValue* op) const {
}
-bool HValue::Equals(HValue* other) const {
+bool HValue::Equals(HValue* other) {
if (other->opcode() != opcode()) return false;
if (!other->representation().Equals(representation())) return false;
if (!other->type_.Equals(type_)) return false;
@@ -264,7 +267,7 @@ bool HValue::Equals(HValue* other) const {
}
-intptr_t HValue::Hashcode() const {
+intptr_t HValue::Hashcode() {
intptr_t result = opcode();
int count = OperandCount();
for (int i = 0; i < count; ++i) {
@@ -281,33 +284,6 @@ void HValue::SetOperandAt(int index, HValue* value) {
}
-void HLoadKeyedGeneric::InternalSetOperandAt(int index, HValue* value) {
- if (index < 2) {
- operands_[index] = value;
- } else {
- context_ = value;
- }
-}
-
-
-void HStoreKeyedGeneric::InternalSetOperandAt(int index, HValue* value) {
- if (index < 3) {
- operands_[index] = value;
- } else {
- context_ = value;
- }
-}
-
-
-void HStoreNamedGeneric::InternalSetOperandAt(int index, HValue* value) {
- if (index < 2) {
- operands_[index] = value;
- } else {
- context_ = value;
- }
-}
-
-
void HValue::ReplaceAndDelete(HValue* other) {
ReplaceValue(other);
Delete();
@@ -438,7 +414,7 @@ void HValue::ComputeInitialRange() {
}
-void HInstruction::PrintTo(StringStream* stream) const {
+void HInstruction::PrintTo(StringStream* stream) {
stream->Add("%s", Mnemonic());
if (HasSideEffects()) stream->Add("*");
stream->Add(" ");
@@ -561,69 +537,64 @@ void HInstruction::Verify() {
#endif
-void HCall::PrintDataTo(StringStream* stream) const {
- stream->Add("#%d", argument_count());
-}
-
-
-void HUnaryCall::PrintDataTo(StringStream* stream) const {
+void HUnaryCall::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" ");
- HCall::PrintDataTo(stream);
+ stream->Add("#%d", argument_count());
}
-void HBinaryCall::PrintDataTo(StringStream* stream) const {
+void HBinaryCall::PrintDataTo(StringStream* stream) {
first()->PrintNameTo(stream);
stream->Add(" ");
second()->PrintNameTo(stream);
stream->Add(" ");
- HCall::PrintDataTo(stream);
+ stream->Add("#%d", argument_count());
}
-void HCallConstantFunction::PrintDataTo(StringStream* stream) const {
+void HCallConstantFunction::PrintDataTo(StringStream* stream) {
if (IsApplyFunction()) {
stream->Add("optimized apply ");
} else {
stream->Add("%o ", function()->shared()->DebugName());
}
- HCall::PrintDataTo(stream);
+ stream->Add("#%d", argument_count());
}
-void HCallNamed::PrintDataTo(StringStream* stream) const {
+void HCallNamed::PrintDataTo(StringStream* stream) {
stream->Add("%o ", *name());
HUnaryCall::PrintDataTo(stream);
}
-void HCallGlobal::PrintDataTo(StringStream* stream) const {
+void HCallGlobal::PrintDataTo(StringStream* stream) {
stream->Add("%o ", *name());
HUnaryCall::PrintDataTo(stream);
}
-void HCallKnownGlobal::PrintDataTo(StringStream* stream) const {
+void HCallKnownGlobal::PrintDataTo(StringStream* stream) {
stream->Add("o ", target()->shared()->DebugName());
- HCall::PrintDataTo(stream);
+ stream->Add("#%d", argument_count());
}
-void HCallRuntime::PrintDataTo(StringStream* stream) const {
+void HCallRuntime::PrintDataTo(StringStream* stream) {
stream->Add("%o ", *name());
- HCall::PrintDataTo(stream);
+ stream->Add("#%d", argument_count());
}
-void HClassOfTest::PrintDataTo(StringStream* stream) const {
+void HClassOfTest::PrintDataTo(StringStream* stream) {
stream->Add("class_of_test(");
value()->PrintNameTo(stream);
stream->Add(", \"%o\")", *class_name());
}
-void HAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
+void HAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintNameTo(stream);
stream->Add("[");
index()->PrintNameTo(stream);
@@ -632,7 +603,7 @@ void HAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
}
-void HControlInstruction::PrintDataTo(StringStream* stream) const {
+void HControlInstruction::PrintDataTo(StringStream* stream) {
if (FirstSuccessor() != NULL) {
int first_id = FirstSuccessor()->block_id();
if (SecondSuccessor() == NULL) {
@@ -645,13 +616,13 @@ void HControlInstruction::PrintDataTo(StringStream* stream) const {
}
-void HUnaryControlInstruction::PrintDataTo(StringStream* stream) const {
+void HUnaryControlInstruction::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
HControlInstruction::PrintDataTo(stream);
}
-void HCompareMap::PrintDataTo(StringStream* stream) const {
+void HCompareMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" (%p)", *map());
HControlInstruction::PrintDataTo(stream);
@@ -679,19 +650,19 @@ const char* HUnaryMathOperation::OpName() const {
}
-void HUnaryMathOperation::PrintDataTo(StringStream* stream) const {
+void HUnaryMathOperation::PrintDataTo(StringStream* stream) {
const char* name = OpName();
stream->Add("%s ", name);
value()->PrintNameTo(stream);
}
-void HUnaryOperation::PrintDataTo(StringStream* stream) const {
+void HUnaryOperation::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}
-void HHasInstanceType::PrintDataTo(StringStream* stream) const {
+void HHasInstanceType::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
switch (from_) {
case FIRST_JS_OBJECT_TYPE:
@@ -712,14 +683,14 @@ void HHasInstanceType::PrintDataTo(StringStream* stream) const {
}
-void HTypeofIs::PrintDataTo(StringStream* stream) const {
+void HTypeofIs::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" == ");
stream->Add(type_literal_->ToAsciiVector());
}
-void HChange::PrintDataTo(StringStream* stream) const {
+void HChange::PrintDataTo(StringStream* stream) {
HUnaryOperation::PrintDataTo(stream);
stream->Add(" %s to %s", from_.Mnemonic(), to_.Mnemonic());
@@ -735,26 +706,26 @@ HCheckInstanceType* HCheckInstanceType::NewIsJSObjectOrJSFunction(
}
-void HCheckMap::PrintDataTo(StringStream* stream) const {
+void HCheckMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" %p", *map());
}
-void HCheckFunction::PrintDataTo(StringStream* stream) const {
+void HCheckFunction::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" %p", *target());
}
-void HCallStub::PrintDataTo(StringStream* stream) const {
+void HCallStub::PrintDataTo(StringStream* stream) {
stream->Add("%s ",
CodeStub::MajorName(major_key_, false));
HUnaryCall::PrintDataTo(stream);
}
-void HInstanceOf::PrintDataTo(StringStream* stream) const {
+void HInstanceOf::PrintDataTo(StringStream* stream) {
left()->PrintNameTo(stream);
stream->Add(" ");
right()->PrintNameTo(stream);
@@ -899,7 +870,7 @@ Range* HMod::InferRange() {
}
-void HPhi::PrintTo(StringStream* stream) const {
+void HPhi::PrintTo(StringStream* stream) {
stream->Add("[");
for (int i = 0; i < OperandCount(); ++i) {
HValue* value = OperandAt(i);
@@ -925,7 +896,7 @@ void HPhi::AddInput(HValue* value) {
}
-HValue* HPhi::GetRedundantReplacement() const {
+HValue* HPhi::GetRedundantReplacement() {
HValue* candidate = NULL;
int count = OperandCount();
int position = 0;
@@ -977,7 +948,7 @@ void HPhi::AddIndirectUsesTo(int* dest) {
}
-void HSimulate::PrintDataTo(StringStream* stream) const {
+void HSimulate::PrintDataTo(StringStream* stream) {
stream->Add("id=%d ", ast_id());
if (pop_count_ > 0) stream->Add("pop %d", pop_count_);
if (values_.length() > 0) {
@@ -994,7 +965,7 @@ void HSimulate::PrintDataTo(StringStream* stream) const {
}
-void HEnterInlined::PrintDataTo(StringStream* stream) const {
+void HEnterInlined::PrintDataTo(StringStream* stream) {
SmartPointer<char> name = function()->debug_name()->ToCString();
stream->Add("%s, id=%d", *name, function()->id());
}
@@ -1035,7 +1006,7 @@ HConstant* HConstant::CopyToTruncatedInt32() const {
}
-void HConstant::PrintDataTo(StringStream* stream) const {
+void HConstant::PrintDataTo(StringStream* stream) {
handle()->ShortPrint(stream);
}
@@ -1045,7 +1016,7 @@ bool HArrayLiteral::IsCopyOnWrite() const {
}
-void HBinaryOperation::PrintDataTo(StringStream* stream) const {
+void HBinaryOperation::PrintDataTo(StringStream* stream) {
left()->PrintNameTo(stream);
stream->Add(" ");
right()->PrintNameTo(stream);
@@ -1129,7 +1100,7 @@ Range* HShl::InferRange() {
-void HCompare::PrintDataTo(StringStream* stream) const {
+void HCompare::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(token()));
stream->Add(" ");
HBinaryOperation::PrintDataTo(stream);
@@ -1148,18 +1119,26 @@ void HCompare::SetInputRepresentation(Representation r) {
}
-void HParameter::PrintDataTo(StringStream* stream) const {
+void HParameter::PrintDataTo(StringStream* stream) {
stream->Add("%u", index());
}
-void HLoadNamedField::PrintDataTo(StringStream* stream) const {
+void HLoadNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
}
-void HLoadKeyed::PrintDataTo(StringStream* stream) const {
+void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintNameTo(stream);
+ stream->Add("[");
+ key()->PrintNameTo(stream);
+ stream->Add("]");
+}
+
+
+void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
@@ -1167,7 +1146,7 @@ void HLoadKeyed::PrintDataTo(StringStream* stream) const {
}
-void HLoadPixelArrayElement::PrintDataTo(StringStream* stream) const {
+void HLoadPixelArrayElement::PrintDataTo(StringStream* stream) {
external_pointer()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
@@ -1175,7 +1154,7 @@ void HLoadPixelArrayElement::PrintDataTo(StringStream* stream) const {
}
-void HStoreNamed::PrintDataTo(StringStream* stream) const {
+void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
ASSERT(name()->IsString());
@@ -1185,15 +1164,29 @@ void HStoreNamed::PrintDataTo(StringStream* stream) const {
}
-void HStoreNamedField::PrintDataTo(StringStream* stream) const {
- HStoreNamed::PrintDataTo(stream);
+void HStoreNamedField::PrintDataTo(StringStream* stream) {
+ object()->PrintNameTo(stream);
+ stream->Add(".");
+ ASSERT(name()->IsString());
+ stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(" = ");
+ value()->PrintNameTo(stream);
if (!transition().is_null()) {
stream->Add(" (transition map %p)", *transition());
}
}
-void HStoreKeyed::PrintDataTo(StringStream* stream) const {
+void HStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintNameTo(stream);
+ stream->Add("[");
+ key()->PrintNameTo(stream);
+ stream->Add("] = ");
+ value()->PrintNameTo(stream);
+}
+
+
+void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
@@ -1202,25 +1195,34 @@ void HStoreKeyed::PrintDataTo(StringStream* stream) const {
}
-void HLoadGlobal::PrintDataTo(StringStream* stream) const {
+void HStorePixelArrayElement::PrintDataTo(StringStream* stream) {
+ external_pointer()->PrintNameTo(stream);
+ stream->Add("[");
+ key()->PrintNameTo(stream);
+ stream->Add("] = ");
+ value()->PrintNameTo(stream);
+}
+
+
+void HLoadGlobal::PrintDataTo(StringStream* stream) {
stream->Add("[%p]", *cell());
if (check_hole_value()) stream->Add(" (deleteable/read-only)");
}
-void HStoreGlobal::PrintDataTo(StringStream* stream) const {
+void HStoreGlobal::PrintDataTo(StringStream* stream) {
stream->Add("[%p] = ", *cell());
value()->PrintNameTo(stream);
}
-void HLoadContextSlot::PrintDataTo(StringStream* stream) const {
+void HLoadContextSlot::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add("[%d]", slot_index());
}
-void HStoreContextSlot::PrintDataTo(StringStream* stream) const {
+void HStoreContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintNameTo(stream);
stream->Add("[%d] = ", slot_index());
value()->PrintNameTo(stream);
@@ -1230,33 +1232,33 @@ void HStoreContextSlot::PrintDataTo(StringStream* stream) const {
// Implementation of type inference and type conversions. Calculates
// the inferred type of this instruction based on the input operands.
-HType HValue::CalculateInferredType() const {
+HType HValue::CalculateInferredType() {
return type_;
}
-HType HCheckMap::CalculateInferredType() const {
+HType HCheckMap::CalculateInferredType() {
return value()->type();
}
-HType HCheckFunction::CalculateInferredType() const {
+HType HCheckFunction::CalculateInferredType() {
return value()->type();
}
-HType HCheckNonSmi::CalculateInferredType() const {
+HType HCheckNonSmi::CalculateInferredType() {
// TODO(kasperl): Is there any way to signal that this isn't a smi?
return HType::Tagged();
}
-HType HCheckSmi::CalculateInferredType() const {
+HType HCheckSmi::CalculateInferredType() {
return HType::Smi();
}
-HType HPhi::CalculateInferredType() const {
+HType HPhi::CalculateInferredType() {
HType result = HType::Uninitialized();
for (int i = 0; i < OperandCount(); ++i) {
HType current = OperandAt(i)->type();
@@ -1266,77 +1268,77 @@ HType HPhi::CalculateInferredType() const {
}
-HType HConstant::CalculateInferredType() const {
+HType HConstant::CalculateInferredType() {
return constant_type_;
}
-HType HCompare::CalculateInferredType() const {
+HType HCompare::CalculateInferredType() {
return HType::Boolean();
}
-HType HCompareJSObjectEq::CalculateInferredType() const {
+HType HCompareJSObjectEq::CalculateInferredType() {
return HType::Boolean();
}
-HType HUnaryPredicate::CalculateInferredType() const {
+HType HUnaryPredicate::CalculateInferredType() {
return HType::Boolean();
}
-HType HBitwiseBinaryOperation::CalculateInferredType() const {
+HType HBitwiseBinaryOperation::CalculateInferredType() {
return HType::TaggedNumber();
}
-HType HArithmeticBinaryOperation::CalculateInferredType() const {
+HType HArithmeticBinaryOperation::CalculateInferredType() {
return HType::TaggedNumber();
}
-HType HAdd::CalculateInferredType() const {
+HType HAdd::CalculateInferredType() {
return HType::Tagged();
}
-HType HBitAnd::CalculateInferredType() const {
+HType HBitAnd::CalculateInferredType() {
return HType::TaggedNumber();
}
-HType HBitXor::CalculateInferredType() const {
+HType HBitXor::CalculateInferredType() {
return HType::TaggedNumber();
}
-HType HBitOr::CalculateInferredType() const {
+HType HBitOr::CalculateInferredType() {
return HType::TaggedNumber();
}
-HType HBitNot::CalculateInferredType() const {
+HType HBitNot::CalculateInferredType() {
return HType::TaggedNumber();
}
-HType HUnaryMathOperation::CalculateInferredType() const {
+HType HUnaryMathOperation::CalculateInferredType() {
return HType::TaggedNumber();
}
-HType HShl::CalculateInferredType() const {
+HType HShl::CalculateInferredType() {
return HType::TaggedNumber();
}
-HType HShr::CalculateInferredType() const {
+HType HShr::CalculateInferredType() {
return HType::TaggedNumber();
}
-HType HSar::CalculateInferredType() const {
+HType HSar::CalculateInferredType() {
return HType::TaggedNumber();
}
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 9f5170ca2..22916f503 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -51,14 +51,9 @@ class LChunkBuilder;
V(BinaryCall) \
V(BinaryOperation) \
V(BitwiseBinaryOperation) \
- V(Call) \
V(ControlInstruction) \
V(Instruction) \
- V(LoadKeyed) \
- V(MaterializedLiteral) \
V(Phi) \
- V(StoreKeyed) \
- V(StoreNamed) \
V(UnaryCall) \
V(UnaryControlInstruction) \
V(UnaryOperation) \
@@ -151,6 +146,7 @@ class LChunkBuilder;
V(StoreContextSlot) \
V(StoreGlobal) \
V(StoreKeyedFastElement) \
+ V(StorePixelArrayElement) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
@@ -192,14 +188,6 @@ class LChunkBuilder;
DECLARE_INSTRUCTION(type)
-
-template<int kSize>
-class HOperandVector : public EmbeddedVector<HValue*, kSize> {
- public:
- HOperandVector() : EmbeddedVector<HValue*, kSize>(NULL) { }
-};
-
-
class Range: public ZoneObject {
public:
Range() : lower_(kMinInt),
@@ -308,7 +296,7 @@ class Representation {
static Representation Double() { return Representation(kDouble); }
static Representation External() { return Representation(kExternal); }
- bool Equals(const Representation& other) const {
+ bool Equals(const Representation& other) {
return kind_ == other.kind_;
}
@@ -543,15 +531,12 @@ class HValue: public ZoneObject {
bool IsDefinedAfter(HBasicBlock* other) const;
// Operands.
- virtual int OperandCount() const { return 0; }
- virtual HValue* OperandAt(int index) const {
- UNREACHABLE();
- return NULL;
- }
+ virtual int OperandCount() = 0;
+ virtual HValue* OperandAt(int index) = 0;
void SetOperandAt(int index, HValue* value);
- int LookupOperandIndex(int occurrence_index, HValue* op) const;
- bool UsesMultipleTimes(HValue* op) const;
+ int LookupOperandIndex(int occurrence_index, HValue* op);
+ bool UsesMultipleTimes(HValue* op);
void ReplaceAndDelete(HValue* other);
void ReplaceValue(HValue* other);
@@ -577,10 +562,9 @@ class HValue: public ZoneObject {
void ComputeInitialRange();
// Representation helpers.
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
- }
- virtual Representation InferredRepresentation() const {
+ virtual Representation RequiredInputRepresentation(int index) const = 0;
+
+ virtual Representation InferredRepresentation() {
return representation();
}
@@ -595,11 +579,11 @@ class HValue: public ZoneObject {
HYDROGEN_ALL_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
- bool Equals(HValue* other) const;
- virtual intptr_t Hashcode() const;
+ bool Equals(HValue* other);
+ virtual intptr_t Hashcode();
// Printing support.
- virtual void PrintTo(StringStream* stream) const = 0;
+ virtual void PrintTo(StringStream* stream) = 0;
void PrintNameTo(StringStream* stream);
static void PrintTypeTo(HType type, StringStream* stream);
@@ -610,7 +594,7 @@ class HValue: public ZoneObject {
// it has changed.
bool UpdateInferredType();
- virtual HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
#ifdef DEBUG
virtual void Verify() = 0;
@@ -619,14 +603,14 @@ class HValue: public ZoneObject {
protected:
// This function must be overridden for instructions with flag kUseGVN, to
// compare the non-Operand parts of the instruction.
- virtual bool DataEquals(HValue* other) const {
+ virtual bool DataEquals(HValue* other) {
UNREACHABLE();
return false;
}
virtual void RepresentationChanged(Representation to) { }
virtual Range* InferRange();
virtual void DeleteFromGraph() = 0;
- virtual void InternalSetOperandAt(int index, HValue* value) { UNREACHABLE(); }
+ virtual void InternalSetOperandAt(int index, HValue* value) = 0;
void clear_block() {
ASSERT(block_ != NULL);
block_ = NULL;
@@ -668,8 +652,8 @@ class HInstruction: public HValue {
HInstruction* next() const { return next_; }
HInstruction* previous() const { return previous_; }
- void PrintTo(StringStream* stream) const;
- virtual void PrintDataTo(StringStream* stream) const {}
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) { }
bool IsLinked() const { return block() != NULL; }
void Unlink();
@@ -690,6 +674,8 @@ class HInstruction: public HValue {
// instruction.
virtual bool IsCheckInstruction() const { return false; }
+ virtual bool IsCall() { return false; }
+
DECLARE_INSTRUCTION(Instruction)
protected:
@@ -716,12 +702,6 @@ class HInstruction: public HValue {
};
-class HBlockEntry: public HInstruction {
- public:
- DECLARE_CONCRETE_INSTRUCTION(BlockEntry, "block_entry")
-};
-
-
class HControlInstruction: public HInstruction {
public:
HControlInstruction(HBasicBlock* first, HBasicBlock* second)
@@ -731,7 +711,7 @@ class HControlInstruction: public HInstruction {
HBasicBlock* FirstSuccessor() const { return first_successor_; }
HBasicBlock* SecondSuccessor() const { return second_successor_; }
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
DECLARE_INSTRUCTION(ControlInstruction)
@@ -741,25 +721,101 @@ class HControlInstruction: public HInstruction {
};
-class HDeoptimize: public HControlInstruction {
+template<int NumElements>
+class HOperandContainer {
+ public:
+ HOperandContainer() : elems_() { }
+
+ int length() { return NumElements; }
+ HValue*& operator[](int i) {
+ ASSERT(i < length());
+ return elems_[i];
+ }
+
+ private:
+ HValue* elems_[NumElements];
+};
+
+
+template<>
+class HOperandContainer<0> {
+ public:
+ int length() { return 0; }
+ HValue*& operator[](int i) {
+ UNREACHABLE();
+ static HValue* t = 0;
+ return t;
+ }
+};
+
+
+template<int V>
+class HTemplateInstruction : public HInstruction {
+ public:
+ int OperandCount() { return V; }
+ HValue* OperandAt(int i) { return inputs_[i]; }
+
+ protected:
+ void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
+
+ private:
+ HOperandContainer<V> inputs_;
+};
+
+
+template<int V>
+class HTemplateControlInstruction : public HControlInstruction {
+ public:
+ HTemplateControlInstruction<V>(HBasicBlock* first, HBasicBlock* second)
+ : HControlInstruction(first, second) { }
+ int OperandCount() { return V; }
+ HValue* OperandAt(int i) { return inputs_[i]; }
+
+ protected:
+ void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
+
+ private:
+ HOperandContainer<V> inputs_;
+};
+
+
+class HBlockEntry: public HTemplateInstruction<0> {
+ public:
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(BlockEntry, "block_entry")
+};
+
+
+class HDeoptimize: public HTemplateControlInstruction<0> {
public:
- HDeoptimize() : HControlInstruction(NULL, NULL) { }
+ HDeoptimize() : HTemplateControlInstruction<0>(NULL, NULL) { }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
};
-class HGoto: public HControlInstruction {
+class HGoto: public HTemplateControlInstruction<0> {
public:
explicit HGoto(HBasicBlock* target)
- : HControlInstruction(target, NULL), include_stack_check_(false) {
- }
+ : HTemplateControlInstruction<0>(target, NULL),
+ include_stack_check_(false) { }
void set_include_stack_check(bool include_stack_check) {
include_stack_check_ = include_stack_check;
}
bool include_stack_check() const { return include_stack_check_; }
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
private:
@@ -767,34 +823,20 @@ class HGoto: public HControlInstruction {
};
-class HUnaryControlInstruction: public HControlInstruction {
+class HUnaryControlInstruction: public HTemplateControlInstruction<1> {
public:
explicit HUnaryControlInstruction(HValue* value,
HBasicBlock* true_target,
HBasicBlock* false_target)
- : HControlInstruction(true_target, false_target) {
+ : HTemplateControlInstruction<1>(true_target, false_target) {
SetOperandAt(0, value);
}
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
+ virtual void PrintDataTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream) const;
-
- HValue* value() const { return OperandAt(0); }
- virtual int OperandCount() const { return 1; }
- virtual HValue* OperandAt(int index) const { return operands_[index]; }
+ HValue* value() { return OperandAt(0); }
DECLARE_INSTRUCTION(UnaryControlInstruction)
-
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- operands_[index] = value;
- }
-
- private:
- HOperandVector<1> operands_;
};
@@ -826,10 +868,14 @@ class HCompareMap: public HUnaryControlInstruction {
ASSERT(!map.is_null());
}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
Handle<Map> map() const { return map_; }
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CompareMap, "compare_map")
private:
@@ -843,38 +889,36 @@ class HReturn: public HUnaryControlInstruction {
: HUnaryControlInstruction(value, NULL, NULL) {
}
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
-class HAbnormalExit: public HControlInstruction {
+class HAbnormalExit: public HTemplateControlInstruction<0> {
public:
- HAbnormalExit() : HControlInstruction(NULL, NULL) { }
+ HAbnormalExit() : HTemplateControlInstruction<0>(NULL, NULL) { }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
DECLARE_CONCRETE_INSTRUCTION(AbnormalExit, "abnormal_exit")
};
-class HUnaryOperation: public HInstruction {
+class HUnaryOperation: public HTemplateInstruction<1> {
public:
explicit HUnaryOperation(HValue* value) {
SetOperandAt(0, value);
}
- HValue* value() const { return OperandAt(0); }
- virtual void PrintDataTo(StringStream* stream) const;
- virtual int OperandCount() const { return 1; }
- virtual HValue* OperandAt(int index) const { return operands_[index]; }
+ HValue* value() { return OperandAt(0); }
+ virtual void PrintDataTo(StringStream* stream);
DECLARE_INSTRUCTION(UnaryOperation)
-
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- operands_[index] = value;
- }
-
- private:
- HOperandVector<1> operands_;
};
@@ -924,13 +968,13 @@ class HChange: public HUnaryOperation {
return true;
}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(Change,
CanTruncateToInt32() ? "truncate" : "change")
protected:
- virtual bool DataEquals(HValue* other) const {
+ virtual bool DataEquals(HValue* other) {
if (!other->IsChange()) return false;
HChange* change = HChange::cast(other);
return value() == change->value()
@@ -954,7 +998,7 @@ class HSimulate: public HInstruction {
assigned_indexes_(2) {}
virtual ~HSimulate() {}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
bool HasAstId() const { return ast_id_ != AstNode::kNoNumber; }
int ast_id() const { return ast_id_; }
@@ -979,8 +1023,12 @@ class HSimulate: public HInstruction {
void AddPushedValue(HValue* value) {
AddValue(kNoIndex, value);
}
- virtual int OperandCount() const { return values_.length(); }
- virtual HValue* OperandAt(int index) const { return values_[index]; }
+ virtual int OperandCount() { return values_.length(); }
+ virtual HValue* OperandAt(int index) { return values_[index]; }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
DECLARE_CONCRETE_INSTRUCTION(Simulate, "simulate")
@@ -1011,25 +1059,33 @@ class HSimulate: public HInstruction {
};
-class HStackCheck: public HInstruction {
+class HStackCheck: public HTemplateInstruction<0> {
public:
HStackCheck() { }
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack_check")
};
-class HEnterInlined: public HInstruction {
+class HEnterInlined: public HTemplateInstruction<0> {
public:
HEnterInlined(Handle<JSFunction> closure, FunctionLiteral* function)
: closure_(closure), function_(function) {
}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> closure() const { return closure_; }
FunctionLiteral* function() const { return function_; }
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(EnterInlined, "enter_inlined")
private:
@@ -1038,39 +1094,49 @@ class HEnterInlined: public HInstruction {
};
-class HLeaveInlined: public HInstruction {
+class HLeaveInlined: public HTemplateInstruction<0> {
public:
HLeaveInlined() {}
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LeaveInlined, "leave_inlined")
};
class HPushArgument: public HUnaryOperation {
public:
- explicit HPushArgument(HValue* value) : HUnaryOperation(value) { }
+ explicit HPushArgument(HValue* value) : HUnaryOperation(value) {
+ set_representation(Representation::Tagged());
+ }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- HValue* argument() const { return OperandAt(0); }
+ HValue* argument() { return OperandAt(0); }
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push_argument")
};
-class HContext: public HInstruction {
+class HContext: public HTemplateInstruction<0> {
public:
HContext() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(Context, "context");
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -1083,8 +1149,12 @@ class HOuterContext: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer_context");
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -1097,8 +1167,12 @@ class HGlobalObject: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object")
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -1112,94 +1186,79 @@ class HGlobalReceiver: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver")
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
-class HCall: public HInstruction {
+template <int V>
+class HCall: public HTemplateInstruction<V> {
public:
// The argument count includes the receiver.
- explicit HCall(int argument_count) : argument_count_(argument_count) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
+ explicit HCall<V>(int argument_count) : argument_count_(argument_count) {
+ this->set_representation(Representation::Tagged());
+ this->SetAllSideEffects();
}
- virtual HType CalculateInferredType() const { return HType::Tagged(); }
+ virtual HType CalculateInferredType() { return HType::Tagged(); }
virtual int argument_count() const { return argument_count_; }
- virtual void PrintDataTo(StringStream* stream) const;
-
- DECLARE_INSTRUCTION(Call)
+ virtual bool IsCall() { return true; }
private:
int argument_count_;
};
-class HUnaryCall: public HCall {
+class HUnaryCall: public HCall<1> {
public:
HUnaryCall(HValue* value, int argument_count)
- : HCall(argument_count), value_(NULL) {
+ : HCall<1>(argument_count) {
SetOperandAt(0, value);
}
- virtual void PrintDataTo(StringStream* stream) const;
-
- HValue* value() const { return value_; }
-
- virtual int OperandCount() const { return 1; }
- virtual HValue* OperandAt(int index) const {
- ASSERT(index == 0);
- return value_;
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
}
- DECLARE_INSTRUCTION(UnaryCall)
+ virtual void PrintDataTo(StringStream* stream);
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- ASSERT(index == 0);
- value_ = value;
- }
+ HValue* value() { return OperandAt(0); }
- private:
- HValue* value_;
+ DECLARE_INSTRUCTION(UnaryCall)
};
-class HBinaryCall: public HCall {
+class HBinaryCall: public HCall<2> {
public:
HBinaryCall(HValue* first, HValue* second, int argument_count)
- : HCall(argument_count) {
+ : HCall<2>(argument_count) {
SetOperandAt(0, first);
SetOperandAt(1, second);
}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
- HValue* first() const { return operands_[0]; }
- HValue* second() const { return operands_[1]; }
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
- virtual int OperandCount() const { return 2; }
- virtual HValue* OperandAt(int index) const { return operands_[index]; }
+ HValue* first() { return OperandAt(0); }
+ HValue* second() { return OperandAt(1); }
DECLARE_INSTRUCTION(BinaryCall)
-
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- operands_[index] = value;
- }
-
- private:
- HOperandVector<2> operands_;
};
-class HCallConstantFunction: public HCall {
+class HCallConstantFunction: public HCall<0> {
public:
HCallConstantFunction(Handle<JSFunction> function, int argument_count)
- : HCall(argument_count), function_(function) { }
+ : HCall<0>(argument_count), function_(function) { }
Handle<JSFunction> function() const { return function_; }
@@ -1207,7 +1266,11 @@ class HCallConstantFunction: public HCall {
return function_->code() == Builtins::builtin(Builtins::FunctionApply);
}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call_constant_function")
@@ -1226,8 +1289,8 @@ class HCallKeyed: public HBinaryCall {
return Representation::Tagged();
}
- HValue* context() const { return first(); }
- HValue* key() const { return second(); }
+ HValue* context() { return first(); }
+ HValue* key() { return second(); }
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call_keyed")
};
@@ -1239,13 +1302,17 @@ class HCallNamed: public HUnaryCall {
: HUnaryCall(context, argument_count), name_(name) {
}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
- HValue* context() const { return value(); }
+ HValue* context() { return value(); }
Handle<String> name() const { return name_; }
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call_named")
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
private:
Handle<String> name_;
};
@@ -1257,7 +1324,11 @@ class HCallFunction: public HUnaryCall {
: HUnaryCall(context, argument_count) {
}
- HValue* context() const { return value(); }
+ HValue* context() { return value(); }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call_function")
};
@@ -1269,11 +1340,15 @@ class HCallGlobal: public HUnaryCall {
: HUnaryCall(context, argument_count), name_(name) {
}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
- HValue* context() const { return value(); }
+ HValue* context() { return value(); }
Handle<String> name() const { return name_; }
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call_global")
private:
@@ -1281,15 +1356,19 @@ class HCallGlobal: public HUnaryCall {
};
-class HCallKnownGlobal: public HCall {
+class HCallKnownGlobal: public HCall<0> {
public:
HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
- : HCall(argument_count), target_(target) { }
+ : HCall<0>(argument_count), target_(target) { }
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> target() const { return target_; }
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call_known_global")
private:
@@ -1307,24 +1386,28 @@ class HCallNew: public HBinaryCall {
return Representation::Tagged();
}
- HValue* context() const { return first(); }
- HValue* constructor() const { return second(); }
+ HValue* context() { return first(); }
+ HValue* constructor() { return second(); }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call_new")
};
-class HCallRuntime: public HCall {
+class HCallRuntime: public HCall<0> {
public:
HCallRuntime(Handle<String> name,
Runtime::Function* c_function,
int argument_count)
- : HCall(argument_count), c_function_(c_function), name_(name) { }
- virtual void PrintDataTo(StringStream* stream) const;
+ : HCall<0>(argument_count), c_function_(c_function), name_(name) { }
+ virtual void PrintDataTo(StringStream* stream);
Runtime::Function* function() const { return c_function_; }
Handle<String> name() const { return name_; }
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call_runtime")
private:
@@ -1351,7 +1434,7 @@ class HJSArrayLength: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js_array_length")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -1370,7 +1453,7 @@ class HFixedArrayLength: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed_array_length")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -1391,7 +1474,7 @@ class HPixelArrayLength: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel_array_length")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -1406,12 +1489,12 @@ class HBitNot: public HUnaryOperation {
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Integer32();
}
- virtual HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(BitNot, "bit_not")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -1442,9 +1525,9 @@ class HUnaryMathOperation: public HUnaryOperation {
SetFlag(kUseGVN);
}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
- virtual HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
@@ -1459,11 +1542,10 @@ class HUnaryMathOperation: public HUnaryOperation {
case kMathSin:
case kMathCos:
return Representation::Double();
- break;
case kMathAbs:
return representation();
- break;
default:
+ UNREACHABLE();
return Representation::None();
}
}
@@ -1484,7 +1566,7 @@ class HUnaryMathOperation: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary_math_operation")
protected:
- virtual bool DataEquals(HValue* other) const {
+ virtual bool DataEquals(HValue* other) {
HUnaryMathOperation* b = HUnaryMathOperation::cast(other);
return op_ == b->op();
}
@@ -1509,7 +1591,7 @@ class HLoadElements: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -1533,7 +1615,7 @@ class HLoadPixelArrayExternalPointer: public HUnaryOperation {
"load-pixel-array-external-pointer")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -1551,8 +1633,8 @@ class HCheckMap: public HUnaryOperation {
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream) const;
- virtual HType CalculateInferredType() const;
+ virtual void PrintDataTo(StringStream* stream);
+ virtual HType CalculateInferredType();
#ifdef DEBUG
virtual void Verify();
@@ -1563,7 +1645,7 @@ class HCheckMap: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check_map")
protected:
- virtual bool DataEquals(HValue* other) const {
+ virtual bool DataEquals(HValue* other) {
HCheckMap* b = HCheckMap::cast(other);
return map_.is_identical_to(b->map());
}
@@ -1586,8 +1668,8 @@ class HCheckFunction: public HUnaryOperation {
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream) const;
- virtual HType CalculateInferredType() const;
+ virtual void PrintDataTo(StringStream* stream);
+ virtual HType CalculateInferredType();
#ifdef DEBUG
virtual void Verify();
@@ -1598,7 +1680,7 @@ class HCheckFunction: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check_function")
protected:
- virtual bool DataEquals(HValue* other) const {
+ virtual bool DataEquals(HValue* other) {
HCheckFunction* b = HCheckFunction::cast(other);
return target_.is_identical_to(b->target());
}
@@ -1646,7 +1728,7 @@ class HCheckInstanceType: public HUnaryOperation {
// TODO(ager): It could be nice to allow the ommision of instance
// type checks if we have already performed an instance type check
// with a larger range.
- virtual bool DataEquals(HValue* other) const {
+ virtual bool DataEquals(HValue* other) {
HCheckInstanceType* b = HCheckInstanceType::cast(other);
return (first_ == b->first()) && (last_ == b->last());
}
@@ -1670,7 +1752,7 @@ class HCheckNonSmi: public HUnaryOperation {
return Representation::Tagged();
}
- virtual HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
#ifdef DEBUG
virtual void Verify();
@@ -1679,11 +1761,11 @@ class HCheckNonSmi: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check_non_smi")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
-class HCheckPrototypeMaps: public HInstruction {
+class HCheckPrototypeMaps: public HTemplateInstruction<0> {
public:
HCheckPrototypeMaps(Handle<JSObject> prototype, Handle<JSObject> holder)
: prototype_(prototype), holder_(holder) {
@@ -1702,7 +1784,11 @@ class HCheckPrototypeMaps: public HInstruction {
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check_prototype_maps")
- virtual intptr_t Hashcode() const {
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
+ virtual intptr_t Hashcode() {
ASSERT(!Heap::IsAllocationAllowed());
intptr_t hash = reinterpret_cast<intptr_t>(*prototype());
hash = 17 * hash + reinterpret_cast<intptr_t>(*holder());
@@ -1710,7 +1796,7 @@ class HCheckPrototypeMaps: public HInstruction {
}
protected:
- virtual bool DataEquals(HValue* other) const {
+ virtual bool DataEquals(HValue* other) {
HCheckPrototypeMaps* b = HCheckPrototypeMaps::cast(other);
return prototype_.is_identical_to(b->prototype()) &&
holder_.is_identical_to(b->holder());
@@ -1734,7 +1820,7 @@ class HCheckSmi: public HUnaryOperation {
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- virtual HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
#ifdef DEBUG
virtual void Verify();
@@ -1743,7 +1829,7 @@ class HCheckSmi: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check_smi")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -1762,7 +1848,7 @@ class HPhi: public HValue {
SetFlag(kFlexibleRepresentation);
}
- virtual Representation InferredRepresentation() const {
+ virtual Representation InferredRepresentation() {
bool double_occurred = false;
bool int32_occurred = false;
for (int i = 0; i < OperandCount(); ++i) {
@@ -1781,10 +1867,10 @@ class HPhi: public HValue {
virtual Representation RequiredInputRepresentation(int index) const {
return representation();
}
- virtual HType CalculateInferredType() const;
- virtual int OperandCount() const { return inputs_.length(); }
- virtual HValue* OperandAt(int index) const { return inputs_[index]; }
- HValue* GetRedundantReplacement() const;
+ virtual HType CalculateInferredType();
+ virtual int OperandCount() { return inputs_.length(); }
+ virtual HValue* OperandAt(int index) { return inputs_[index]; }
+ HValue* GetRedundantReplacement();
void AddInput(HValue* value);
bool IsReceiver() { return merged_index_ == 0; }
@@ -1793,7 +1879,7 @@ class HPhi: public HValue {
virtual const char* Mnemonic() const { return "phi"; }
- virtual void PrintTo(StringStream* stream) const;
+ virtual void PrintTo(StringStream* stream);
#ifdef DEBUG
virtual void Verify();
@@ -1841,18 +1927,22 @@ class HPhi: public HValue {
};
-class HArgumentsObject: public HInstruction {
+class HArgumentsObject: public HTemplateInstruction<0> {
public:
HArgumentsObject() {
set_representation(Representation::Tagged());
SetFlag(kIsArguments);
}
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject, "arguments-object")
};
-class HConstant: public HInstruction {
+class HConstant: public HTemplateInstruction<0> {
public:
HConstant(Handle<Object> handle, Representation r);
@@ -1860,9 +1950,13 @@ class HConstant: public HInstruction {
bool InOldSpace() const { return !Heap::InNewSpace(*handle_); }
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
virtual bool EmitAtUses() const { return !representation().IsDouble(); }
- virtual void PrintDataTo(StringStream* stream) const;
- virtual HType CalculateInferredType() const;
+ virtual void PrintDataTo(StringStream* stream);
+ virtual HType CalculateInferredType();
bool IsInteger() const { return handle_->IsSmi(); }
HConstant* CopyToRepresentation(Representation r) const;
HConstant* CopyToTruncatedInt32() const;
@@ -1878,7 +1972,7 @@ class HConstant: public HInstruction {
}
bool HasStringValue() const { return handle_->IsString(); }
- virtual intptr_t Hashcode() const {
+ virtual intptr_t Hashcode() {
ASSERT(!Heap::allow_allocation(false));
return reinterpret_cast<intptr_t>(*handle());
}
@@ -1892,7 +1986,7 @@ class HConstant: public HInstruction {
protected:
virtual Range* InferRange();
- virtual bool DataEquals(HValue* other) const {
+ virtual bool DataEquals(HValue* other) {
HConstant* other_constant = HConstant::cast(other);
return handle().is_identical_to(other_constant->handle());
}
@@ -1911,7 +2005,7 @@ class HConstant: public HInstruction {
};
-class HBinaryOperation: public HInstruction {
+class HBinaryOperation: public HTemplateInstruction<2> {
public:
HBinaryOperation(HValue* left, HValue* right) {
ASSERT(left != NULL && right != NULL);
@@ -1919,38 +2013,29 @@ class HBinaryOperation: public HInstruction {
SetOperandAt(1, right);
}
- HValue* left() const { return OperandAt(0); }
- HValue* right() const { return OperandAt(1); }
+ HValue* left() { return OperandAt(0); }
+ HValue* right() { return OperandAt(1); }
// TODO(kasperl): Move these helpers to the IA-32 Lithium
// instruction sequence builder.
- HValue* LeastConstantOperand() const {
+ HValue* LeastConstantOperand() {
if (IsCommutative() && left()->IsConstant()) return right();
return left();
}
- HValue* MostConstantOperand() const {
+ HValue* MostConstantOperand() {
if (IsCommutative() && left()->IsConstant()) return left();
return right();
}
virtual bool IsCommutative() const { return false; }
- virtual void PrintDataTo(StringStream* stream) const;
- virtual int OperandCount() const { return operands_.length(); }
- virtual HValue* OperandAt(int index) const { return operands_[index]; }
+ virtual void PrintDataTo(StringStream* stream);
DECLARE_INSTRUCTION(BinaryOperation)
-
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- operands_[index] = value;
- }
-
- HOperandVector<2> operands_;
};
-class HApplyArguments: public HInstruction {
+class HApplyArguments: public HTemplateInstruction<4> {
public:
HApplyArguments(HValue* function,
HValue* receiver,
@@ -1971,27 +2056,16 @@ class HApplyArguments: public HInstruction {
: Representation::Tagged();
}
- HValue* function() const { return OperandAt(0); }
- HValue* receiver() const { return OperandAt(1); }
- HValue* length() const { return OperandAt(2); }
- HValue* elements() const { return OperandAt(3); }
-
- virtual int OperandCount() const { return operands_.length(); }
- virtual HValue* OperandAt(int index) const { return operands_[index]; }
+ HValue* function() { return OperandAt(0); }
+ HValue* receiver() { return OperandAt(1); }
+ HValue* length() { return OperandAt(2); }
+ HValue* elements() { return OperandAt(3); }
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply_arguments")
-
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- operands_[index] = value;
- }
-
- private:
- HOperandVector<4> operands_;
};
-class HArgumentsElements: public HInstruction {
+class HArgumentsElements: public HTemplateInstruction<0> {
public:
HArgumentsElements() {
// The value produced by this instruction is a pointer into the stack
@@ -2002,8 +2076,12 @@ class HArgumentsElements: public HInstruction {
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments_elements")
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -2014,14 +2092,18 @@ class HArgumentsLength: public HUnaryOperation {
SetFlag(kUseGVN);
}
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments_length")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
-class HAccessArgumentsAt: public HInstruction {
+class HAccessArgumentsAt: public HTemplateInstruction<3> {
public:
HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) {
set_representation(Representation::Tagged());
@@ -2031,7 +2113,7 @@ class HAccessArgumentsAt: public HInstruction {
SetOperandAt(2, index);
}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
virtual Representation RequiredInputRepresentation(int index) const {
// The arguments elements is considered tagged.
@@ -2040,24 +2122,13 @@ class HAccessArgumentsAt: public HInstruction {
: Representation::Integer32();
}
- HValue* arguments() const { return operands_[0]; }
- HValue* length() const { return operands_[1]; }
- HValue* index() const { return operands_[2]; }
-
- virtual int OperandCount() const { return operands_.length(); }
- virtual HValue* OperandAt(int index) const { return operands_[index]; }
+ HValue* arguments() { return OperandAt(0); }
+ HValue* length() { return OperandAt(1); }
+ HValue* index() { return OperandAt(2); }
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access_arguments_at")
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- operands_[index] = value;
- }
-
- virtual bool DataEquals(HValue* other) const { return true; }
-
- private:
- HOperandVector<3> operands_;
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -2078,13 +2149,13 @@ class HBoundsCheck: public HBinaryOperation {
virtual void Verify();
#endif
- HValue* index() const { return left(); }
- HValue* length() const { return right(); }
+ HValue* index() { return left(); }
+ HValue* length() { return right(); }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds_check")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -2110,7 +2181,7 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
}
}
- HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
DECLARE_INSTRUCTION(BitwiseBinaryOperation)
};
@@ -2132,11 +2203,11 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
}
}
- virtual HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
virtual Representation RequiredInputRepresentation(int index) const {
return representation();
}
- virtual Representation InferredRepresentation() const {
+ virtual Representation InferredRepresentation() {
if (left()->representation().Equals(right()->representation())) {
return left()->representation();
}
@@ -2169,18 +2240,18 @@ class HCompare: public HBinaryOperation {
return input_representation_;
}
Token::Value token() const { return token_; }
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
- virtual HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
- virtual intptr_t Hashcode() const {
+ virtual intptr_t Hashcode() {
return HValue::Hashcode() * 7 + token_;
}
DECLARE_CONCRETE_INSTRUCTION(Compare, "compare")
protected:
- virtual bool DataEquals(HValue* other) const {
+ virtual bool DataEquals(HValue* other) {
HCompare* comp = HCompare::cast(other);
return token_ == comp->token();
}
@@ -2206,12 +2277,12 @@ class HCompareJSObjectEq: public HBinaryOperation {
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- virtual HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq, "compare-js-object-eq")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -2229,7 +2300,7 @@ class HUnaryPredicate: public HUnaryOperation {
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- virtual HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
};
@@ -2243,7 +2314,7 @@ class HIsNull: public HUnaryPredicate {
DECLARE_CONCRETE_INSTRUCTION(IsNull, "is_null")
protected:
- virtual bool DataEquals(HValue* other) const {
+ virtual bool DataEquals(HValue* other) {
HIsNull* b = HIsNull::cast(other);
return is_strict_ == b->is_strict();
}
@@ -2260,7 +2331,7 @@ class HIsObject: public HUnaryPredicate {
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is_object")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -2271,11 +2342,11 @@ class HIsSmi: public HUnaryPredicate {
DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is_smi")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
-class HIsConstructCall: public HInstruction {
+class HIsConstructCall: public HTemplateInstruction<0> {
public:
HIsConstructCall() {
set_representation(Representation::Tagged());
@@ -2286,10 +2357,14 @@ class HIsConstructCall: public HInstruction {
return !HasSideEffects() && (uses()->length() <= 1);
}
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is_construct_call")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -2305,12 +2380,12 @@ class HHasInstanceType: public HUnaryPredicate {
InstanceType from() { return from_; }
InstanceType to() { return to_; }
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has_instance_type")
protected:
- virtual bool DataEquals(HValue* other) const {
+ virtual bool DataEquals(HValue* other) {
HHasInstanceType* b = HHasInstanceType::cast(other);
return (from_ == b->from()) && (to_ == b->to());
}
@@ -2328,7 +2403,7 @@ class HHasCachedArrayIndex: public HUnaryPredicate {
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has_cached_array_index")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -2339,7 +2414,7 @@ class HGetCachedArrayIndex: public HUnaryPredicate {
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get_cached_array_index")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -2350,12 +2425,12 @@ class HClassOfTest: public HUnaryPredicate {
DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class_of_test")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
Handle<String> class_name() const { return class_name_; }
protected:
- virtual bool DataEquals(HValue* other) const {
+ virtual bool DataEquals(HValue* other) {
HClassOfTest* b = HClassOfTest::cast(other);
return class_name_.is_identical_to(b->class_name_);
}
@@ -2371,12 +2446,12 @@ class HTypeofIs: public HUnaryPredicate {
: HUnaryPredicate(value), type_literal_(type_literal) { }
Handle<String> type_literal() { return type_literal_; }
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof_is")
protected:
- virtual bool DataEquals(HValue* other) const {
+ virtual bool DataEquals(HValue* other) {
HTypeofIs* b = HTypeofIs::cast(other);
return type_literal_.is_identical_to(b->type_literal_);
}
@@ -2386,7 +2461,7 @@ class HTypeofIs: public HUnaryPredicate {
};
-class HInstanceOf: public HInstruction {
+class HInstanceOf: public HTemplateInstruction<3> {
public:
HInstanceOf(HValue* context, HValue* left, HValue* right) {
SetOperandAt(0, context);
@@ -2396,9 +2471,9 @@ class HInstanceOf: public HInstruction {
SetAllSideEffects();
}
- HValue* context() const { return operands_[0]; }
- HValue* left() const { return operands_[1]; }
- HValue* right() const { return operands_[2]; }
+ HValue* context() { return OperandAt(0); }
+ HValue* left() { return OperandAt(1); }
+ HValue* right() { return OperandAt(2); }
virtual bool EmitAtUses() const {
return !HasSideEffects() && (uses()->length() <= 1);
@@ -2408,20 +2483,9 @@ class HInstanceOf: public HInstruction {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream) const;
-
- virtual int OperandCount() const { return 3; }
- virtual HValue* OperandAt(int index) const { return operands_[index]; }
+ virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance_of")
-
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- operands_[index] = value;
- }
-
- private:
- HOperandVector<3> operands_;
};
@@ -2462,7 +2526,7 @@ class HPower: public HBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -2480,12 +2544,12 @@ class HAdd: public HArithmeticBinaryOperation {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- virtual HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(Add, "add")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange();
};
@@ -2502,7 +2566,7 @@ class HSub: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Sub, "sub")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange();
};
@@ -2524,7 +2588,7 @@ class HMul: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Mul, "mul")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange();
};
@@ -2541,7 +2605,7 @@ class HMod: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Mod, "mod")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange();
};
@@ -2559,7 +2623,7 @@ class HDiv: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Div, "div")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange();
};
@@ -2571,12 +2635,12 @@ class HBitAnd: public HBitwiseBinaryOperation {
: HBitwiseBinaryOperation(left, right) { }
virtual bool IsCommutative() const { return true; }
- virtual HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(BitAnd, "bit_and")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange();
};
@@ -2588,12 +2652,12 @@ class HBitXor: public HBitwiseBinaryOperation {
: HBitwiseBinaryOperation(left, right) { }
virtual bool IsCommutative() const { return true; }
- virtual HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(BitXor, "bit_xor")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -2603,12 +2667,12 @@ class HBitOr: public HBitwiseBinaryOperation {
: HBitwiseBinaryOperation(left, right) { }
virtual bool IsCommutative() const { return true; }
- virtual HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(BitOr, "bit_or")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange();
};
@@ -2620,12 +2684,12 @@ class HShl: public HBitwiseBinaryOperation {
: HBitwiseBinaryOperation(left, right) { }
virtual Range* InferRange();
- virtual HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(Shl, "shl")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -2634,12 +2698,12 @@ class HShr: public HBitwiseBinaryOperation {
HShr(HValue* left, HValue* right)
: HBitwiseBinaryOperation(left, right) { }
- virtual HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(Shr, "shr")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -2649,16 +2713,16 @@ class HSar: public HBitwiseBinaryOperation {
: HBitwiseBinaryOperation(left, right) { }
virtual Range* InferRange();
- virtual HType CalculateInferredType() const;
+ virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(Sar, "sar")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
-class HOsrEntry: public HInstruction {
+class HOsrEntry: public HTemplateInstruction<0> {
public:
explicit HOsrEntry(int ast_id) : ast_id_(ast_id) {
SetFlag(kChangesOsrEntries);
@@ -2666,6 +2730,10 @@ class HOsrEntry: public HInstruction {
int ast_id() const { return ast_id_; }
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr_entry")
private:
@@ -2673,7 +2741,7 @@ class HOsrEntry: public HInstruction {
};
-class HParameter: public HInstruction {
+class HParameter: public HTemplateInstruction<0> {
public:
explicit HParameter(unsigned index) : index_(index) {
set_representation(Representation::Tagged());
@@ -2681,7 +2749,11 @@ class HParameter: public HInstruction {
unsigned index() const { return index_; }
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
@@ -2700,7 +2772,7 @@ class HCallStub: public HUnaryCall {
CodeStub::Major major_key() { return major_key_; }
- HValue* context() const { return value(); }
+ HValue* context() { return value(); }
void set_transcendental_type(TranscendentalCache::Type transcendental_type) {
transcendental_type_ = transcendental_type;
@@ -2709,7 +2781,11 @@ class HCallStub: public HUnaryCall {
return transcendental_type_;
}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call_stub")
@@ -2719,15 +2795,19 @@ class HCallStub: public HUnaryCall {
};
-class HUnknownOSRValue: public HInstruction {
+class HUnknownOSRValue: public HTemplateInstruction<0> {
public:
HUnknownOSRValue() { set_representation(Representation::Tagged()); }
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown_osr_value")
};
-class HLoadGlobal: public HInstruction {
+class HLoadGlobal: public HTemplateInstruction<0> {
public:
HLoadGlobal(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
: cell_(cell), check_hole_value_(check_hole_value) {
@@ -2739,20 +2819,21 @@ class HLoadGlobal: public HInstruction {
Handle<JSGlobalPropertyCell> cell() const { return cell_; }
bool check_hole_value() const { return check_hole_value_; }
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
- virtual intptr_t Hashcode() const {
+ virtual intptr_t Hashcode() {
ASSERT(!Heap::allow_allocation(false));
return reinterpret_cast<intptr_t>(*cell_);
}
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load_global")
protected:
- virtual bool DataEquals(HValue* other) const {
+ virtual bool DataEquals(HValue* other) {
HLoadGlobal* b = HLoadGlobal::cast(other);
return cell_.is_identical_to(b->cell());
}
@@ -2780,7 +2861,7 @@ class HStoreGlobal: public HUnaryOperation {
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store_global")
@@ -2805,12 +2886,12 @@ class HLoadContextSlot: public HUnaryOperation {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load_context_slot")
protected:
- virtual bool DataEquals(HValue* other) const {
+ virtual bool DataEquals(HValue* other) {
HLoadContextSlot* b = HLoadContextSlot::cast(other);
return (slot_index() == b->slot_index());
}
@@ -2833,11 +2914,11 @@ class HStoreContextSlot: public HBinaryOperation {
SetFlag(kChangesContextSlots);
}
- HValue* context() const { return OperandAt(0); }
- HValue* value() const { return OperandAt(1); }
+ HValue* context() { return OperandAt(0); }
+ HValue* value() { return OperandAt(1); }
int slot_index() const { return slot_index_; }
- bool NeedsWriteBarrier() const {
+ bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value());
}
@@ -2845,7 +2926,7 @@ class HStoreContextSlot: public HBinaryOperation {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store_context_slot")
@@ -2869,19 +2950,19 @@ class HLoadNamedField: public HUnaryOperation {
}
}
- HValue* object() const { return OperandAt(0); }
+ HValue* object() { return OperandAt(0); }
bool is_in_object() const { return is_in_object_; }
int offset() const { return offset_; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load_named_field")
protected:
- virtual bool DataEquals(HValue* other) const {
+ virtual bool DataEquals(HValue* other) {
HLoadNamedField* b = HLoadNamedField::cast(other);
return is_in_object_ == b->is_in_object_ && offset_ == b->offset_;
}
@@ -2900,8 +2981,8 @@ class HLoadNamedGeneric: public HBinaryOperation {
SetAllSideEffects();
}
- HValue* context() const { return OperandAt(0); }
- HValue* object() const { return OperandAt(1); }
+ HValue* context() { return OperandAt(0); }
+ HValue* object() { return OperandAt(1); }
Handle<Object> name() const { return name_; }
virtual Representation RequiredInputRepresentation(int index) const {
@@ -2924,7 +3005,7 @@ class HLoadFunctionPrototype: public HUnaryOperation {
SetFlag(kDependsOnCalls);
}
- HValue* function() const { return OperandAt(0); }
+ HValue* function() { return OperandAt(0); }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
@@ -2933,46 +3014,34 @@ class HLoadFunctionPrototype: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load_function_prototype")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
-class HLoadKeyed: public HBinaryOperation {
+class HLoadKeyedFastElement: public HBinaryOperation {
public:
- HLoadKeyed(HValue* obj, HValue* key) : HBinaryOperation(obj, key) {
+ HLoadKeyedFastElement(HValue* obj, HValue* key) : HBinaryOperation(obj, key) {
set_representation(Representation::Tagged());
- }
-
- virtual void PrintDataTo(StringStream* stream) const;
-
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
- }
- HValue* object() const { return OperandAt(0); }
- HValue* key() const { return OperandAt(1); }
-
- DECLARE_INSTRUCTION(LoadKeyed)
-};
-
-
-class HLoadKeyedFastElement: public HLoadKeyed {
- public:
- HLoadKeyedFastElement(HValue* obj, HValue* key) : HLoadKeyed(obj, key) {
SetFlag(kDependsOnArrayElements);
SetFlag(kUseGVN);
}
+ HValue* object() { return OperandAt(0); }
+ HValue* key() { return OperandAt(1); }
+
virtual Representation RequiredInputRepresentation(int index) const {
// The key is supposed to be Integer32.
return (index == 1) ? Representation::Integer32()
: Representation::Tagged();
}
+ virtual void PrintDataTo(StringStream* stream);
+
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement,
"load_keyed_fast_element")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -2987,7 +3056,7 @@ class HLoadPixelArrayElement: public HBinaryOperation {
SetFlag(kUseGVN);
}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
virtual Representation RequiredInputRepresentation(int index) const {
// The key is supposed to be Integer32, but the base pointer
@@ -2996,76 +3065,50 @@ class HLoadPixelArrayElement: public HBinaryOperation {
: Representation::External();
}
- HValue* external_pointer() const { return OperandAt(0); }
- HValue* key() const { return OperandAt(1); }
+ HValue* external_pointer() { return OperandAt(0); }
+ HValue* key() { return OperandAt(1); }
DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
"load_pixel_array_element")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
};
-class HLoadKeyedGeneric: public HLoadKeyed {
+class HLoadKeyedGeneric: public HTemplateInstruction<3> {
public:
- HLoadKeyedGeneric(HContext* context, HValue* obj, HValue* key)
- : HLoadKeyed(obj, key), context_(NULL) {
+ HLoadKeyedGeneric(HContext* context, HValue* obj, HValue* key) {
+ set_representation(Representation::Tagged());
+ SetOperandAt(0, obj);
+ SetOperandAt(1, key);
SetOperandAt(2, context);
SetAllSideEffects();
}
- HValue* context() const { return context_; }
- HValue* object() const { return operands_[0]; }
- HValue* key() const { return operands_[1]; }
-
- virtual int OperandCount() const { return 3; }
- virtual HValue* OperandAt(int index) const {
- return (index < 2) ? operands_[index] : context_;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic")
-
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value);
-
- private:
- HValue* context_;
-};
-
+ HValue* object() { return OperandAt(0); }
+ HValue* key() { return OperandAt(1); }
+ HValue* context() { return OperandAt(2); }
-class HStoreNamed: public HBinaryOperation {
- public:
- HStoreNamed(HValue* obj, Handle<String> name, HValue* val)
- : HBinaryOperation(obj, val), name_(name) {
- }
+ virtual void PrintDataTo(StringStream* stream);
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream) const;
-
- HValue* object() const { return OperandAt(0); }
- Handle<String> name() const { return name_; }
- HValue* value() const { return OperandAt(1); }
- void set_value(HValue* value) { SetOperandAt(1, value); }
-
- DECLARE_INSTRUCTION(StoreNamed)
-
- private:
- Handle<String> name_;
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic")
};
-class HStoreNamedField: public HStoreNamed {
+class HStoreNamedField: public HBinaryOperation {
public:
HStoreNamedField(HValue* obj,
Handle<String> name,
HValue* val,
bool in_object,
int offset)
- : HStoreNamed(obj, name, val),
+ : HBinaryOperation(obj, val),
+ name_(name),
is_in_object_(in_object),
offset_(offset) {
if (is_in_object_) {
@@ -3080,137 +3123,143 @@ class HStoreNamedField: public HStoreNamed {
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
+
+ HValue* object() { return OperandAt(0); }
+ HValue* value() { return OperandAt(1); }
+ Handle<String> name() const { return name_; }
bool is_in_object() const { return is_in_object_; }
int offset() const { return offset_; }
Handle<Map> transition() const { return transition_; }
void set_transition(Handle<Map> map) { transition_ = map; }
- bool NeedsWriteBarrier() const {
+ bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value());
}
private:
+ Handle<String> name_;
bool is_in_object_;
int offset_;
Handle<Map> transition_;
};
-class HStoreNamedGeneric: public HStoreNamed {
+class HStoreNamedGeneric: public HTemplateInstruction<3> {
public:
HStoreNamedGeneric(HValue* context,
HValue* object,
Handle<String> name,
HValue* value)
- : HStoreNamed(object, name, value), context_(NULL) {
+ : name_(name) {
+ SetOperandAt(0, object);
+ SetOperandAt(1, value);
SetOperandAt(2, context);
SetAllSideEffects();
}
- HValue* context() const { return context_; }
- HValue* object() const { return operands_[0]; }
- HValue* value() const { return operands_[1]; }
+ HValue* object() { return OperandAt(0); }
+ HValue* value() { return OperandAt(1); }
+ HValue* context() { return OperandAt(2); }
+ Handle<String> name() { return name_; }
- virtual int OperandCount() const { return 3; }
+ virtual void PrintDataTo(StringStream* stream);
- virtual HValue* OperandAt(int index) const {
- return (index < 2) ? operands_[index] : context_;
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store_named_generic")
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value);
-
private:
- HValue* context_;
+ Handle<String> name_;
};
-class HStoreKeyed: public HInstruction {
+class HStoreKeyedFastElement: public HTemplateInstruction<3> {
public:
- HStoreKeyed(HValue* obj, HValue* key, HValue* val) {
+ HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
+ SetFlag(kChangesArrayElements);
}
- virtual void PrintDataTo(StringStream* stream) const;
- virtual int OperandCount() const { return operands_.length(); }
- virtual HValue* OperandAt(int index) const { return operands_[index]; }
-
virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
+ // The key is supposed to be Integer32.
+ return (index == 1) ? Representation::Integer32()
+ : Representation::Tagged();
}
- HValue* object() const { return OperandAt(0); }
- HValue* key() const { return OperandAt(1); }
- HValue* value() const { return OperandAt(2); }
+ HValue* object() { return OperandAt(0); }
+ HValue* key() { return OperandAt(1); }
+ HValue* value() { return OperandAt(2); }
- bool NeedsWriteBarrier() const {
+ bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value());
}
- DECLARE_INSTRUCTION(StoreKeyed)
-
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- operands_[index] = value;
- }
+ virtual void PrintDataTo(StringStream* stream);
- HOperandVector<3> operands_;
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+ "store_keyed_fast_element")
};
-class HStoreKeyedFastElement: public HStoreKeyed {
+class HStorePixelArrayElement: public HTemplateInstruction<3> {
public:
- HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val)
- : HStoreKeyed(obj, key, val) {
- SetFlag(kChangesArrayElements);
+ HStorePixelArrayElement(HValue* external_elements, HValue* key, HValue* val) {
+ SetFlag(kChangesPixelArrayElements);
+ SetOperandAt(0, external_elements);
+ SetOperandAt(1, key);
+ SetOperandAt(2, val);
}
+ virtual void PrintDataTo(StringStream* stream);
+
virtual Representation RequiredInputRepresentation(int index) const {
- // The key is supposed to be Integer32.
- return (index == 1) ? Representation::Integer32()
- : Representation::Tagged();
+ if (index == 0) {
+ return Representation::External();
+ } else {
+ return Representation::Integer32();
+ }
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store_keyed_fast_element")
+ HValue* external_pointer() { return OperandAt(0); }
+ HValue* key() { return OperandAt(1); }
+ HValue* value() { return OperandAt(2); }
+
+ DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement,
+ "store_pixel_array_element")
};
-class HStoreKeyedGeneric: public HStoreKeyed {
+class HStoreKeyedGeneric: public HTemplateInstruction<4> {
public:
HStoreKeyedGeneric(HValue* context,
HValue* object,
HValue* key,
- HValue* value)
- : HStoreKeyed(object, key, value), context_(NULL) {
+ HValue* value) {
+ SetOperandAt(0, object);
+ SetOperandAt(1, key);
+ SetOperandAt(2, value);
SetOperandAt(3, context);
SetAllSideEffects();
}
- HValue* context() const { return context_; }
- HValue* object() const { return operands_[0]; }
- HValue* key() const { return operands_[1]; }
- HValue* value() const { return operands_[2]; }
+ HValue* object() { return OperandAt(0); }
+ HValue* key() { return OperandAt(1); }
+ HValue* value() { return OperandAt(2); }
+ HValue* context() { return OperandAt(3); }
- virtual int OperandCount() const { return 4; }
-
- virtual HValue* OperandAt(int index) const {
- return (index < 3) ? operands_[index] : context_;
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic")
-
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value);
+ virtual void PrintDataTo(StringStream* stream);
- private:
- HValue* context_;
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic")
};
@@ -3228,13 +3277,13 @@ class HStringCharCodeAt: public HBinaryOperation {
: Representation::Tagged();
}
- HValue* string() const { return OperandAt(0); }
- HValue* index() const { return OperandAt(1); }
+ HValue* string() { return OperandAt(0); }
+ HValue* index() { return OperandAt(1); }
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string_char_code_at")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange() {
return new Range(0, String::kMaxUC16CharCode);
@@ -3253,7 +3302,7 @@ class HStringLength: public HUnaryOperation {
return Representation::Tagged();
}
- virtual HType CalculateInferredType() const {
+ virtual HType CalculateInferredType() {
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
return HType::Smi();
}
@@ -3261,7 +3310,7 @@ class HStringLength: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(StringLength, "string_length")
protected:
- virtual bool DataEquals(HValue* other) const { return true; }
+ virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange() {
return new Range(0, String::kMaxLength);
@@ -3269,31 +3318,30 @@ class HStringLength: public HUnaryOperation {
};
-class HMaterializedLiteral: public HInstruction {
+template <int V>
+class HMaterializedLiteral: public HTemplateInstruction<V> {
public:
- HMaterializedLiteral(int index, int depth)
+ HMaterializedLiteral<V>(int index, int depth)
: literal_index_(index), depth_(depth) {
- set_representation(Representation::Tagged());
+ this->set_representation(Representation::Tagged());
}
int literal_index() const { return literal_index_; }
int depth() const { return depth_; }
- DECLARE_INSTRUCTION(MaterializedLiteral)
-
private:
int literal_index_;
int depth_;
};
-class HArrayLiteral: public HMaterializedLiteral {
+class HArrayLiteral: public HMaterializedLiteral<0> {
public:
HArrayLiteral(Handle<FixedArray> constant_elements,
int length,
int literal_index,
int depth)
- : HMaterializedLiteral(literal_index, depth),
+ : HMaterializedLiteral<0>(literal_index, depth),
length_(length),
constant_elements_(constant_elements) {}
@@ -3302,6 +3350,10 @@ class HArrayLiteral: public HMaterializedLiteral {
bool IsCopyOnWrite() const;
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array_literal")
private:
@@ -3310,55 +3362,53 @@ class HArrayLiteral: public HMaterializedLiteral {
};
-class HObjectLiteral: public HMaterializedLiteral {
+class HObjectLiteral: public HMaterializedLiteral<1> {
public:
HObjectLiteral(HValue* context,
Handle<FixedArray> constant_properties,
bool fast_elements,
int literal_index,
int depth)
- : HMaterializedLiteral(literal_index, depth),
- context_(NULL),
+ : HMaterializedLiteral<1>(literal_index, depth),
constant_properties_(constant_properties),
fast_elements_(fast_elements) {
SetOperandAt(0, context);
}
- HValue* context() const { return context_; }
+ HValue* context() { return OperandAt(0); }
Handle<FixedArray> constant_properties() const {
return constant_properties_;
}
bool fast_elements() const { return fast_elements_; }
- virtual int OperandCount() const { return 1; }
- virtual HValue* OperandAt(int index) const { return context_; }
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object_literal")
- protected:
- virtual void InternalSetOperandAt(int index, HValue* value) {
- context_ = value;
- }
-
private:
- HValue* context_;
Handle<FixedArray> constant_properties_;
bool fast_elements_;
};
-class HRegExpLiteral: public HMaterializedLiteral {
+class HRegExpLiteral: public HMaterializedLiteral<0> {
public:
HRegExpLiteral(Handle<String> pattern,
Handle<String> flags,
int literal_index)
- : HMaterializedLiteral(literal_index, 0),
+ : HMaterializedLiteral<0>(literal_index, 0),
pattern_(pattern),
flags_(flags) { }
Handle<String> pattern() { return pattern_; }
Handle<String> flags() { return flags_; }
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp_literal")
private:
@@ -3367,13 +3417,17 @@ class HRegExpLiteral: public HMaterializedLiteral {
};
-class HFunctionLiteral: public HInstruction {
+class HFunctionLiteral: public HTemplateInstruction<0> {
public:
HFunctionLiteral(Handle<SharedFunctionInfo> shared, bool pretenure)
: shared_info_(shared), pretenure_(pretenure) {
set_representation(Representation::Tagged());
}
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function_literal")
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
@@ -3405,6 +3459,10 @@ class HValueOf: public HUnaryOperation {
set_representation(Representation::Tagged());
}
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value_of")
};
@@ -3423,8 +3481,8 @@ class HDeleteProperty: public HBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete_property")
- HValue* object() const { return left(); }
- HValue* key() const { return right(); }
+ HValue* object() { return left(); }
+ HValue* key() { return right(); }
};
#undef DECLARE_INSTRUCTION
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 3ebd580ad..9e40a50c7 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -482,84 +482,30 @@ HConstant* HGraph::GetConstantFalse() {
}
-void HSubgraph::AppendOptional(HSubgraph* graph,
- bool on_true_branch,
- HValue* value) {
- ASSERT(HasExit() && graph->HasExit());
- HBasicBlock* other_block = graph_->CreateBasicBlock();
- HBasicBlock* join_block = graph_->CreateBasicBlock();
-
- HTest* test = on_true_branch
- ? new HTest(value, graph->entry_block(), other_block)
- : new HTest(value, other_block, graph->entry_block());
- exit_block_->Finish(test);
- other_block->Goto(join_block);
- graph->exit_block()->Goto(join_block);
- exit_block_ = join_block;
-}
-
-
-void HSubgraph::AppendJoin(HSubgraph* then_graph,
- HSubgraph* else_graph,
- AstNode* node) {
- if (then_graph->HasExit() && else_graph->HasExit()) {
- // We need to merge, create new merge block.
+void HSubgraph::AppendJoin(HBasicBlock* first,
+ HBasicBlock* second,
+ int join_id) {
+ if (first == NULL) {
+ exit_block_ = second;
+ } else if (second == NULL) {
+ exit_block_ = first;
+ } else {
HBasicBlock* join_block = graph_->CreateBasicBlock();
- then_graph->exit_block()->Goto(join_block);
- else_graph->exit_block()->Goto(join_block);
- join_block->SetJoinId(node->id());
+ first->Goto(join_block);
+ second->Goto(join_block);
+ join_block->SetJoinId(join_id);
exit_block_ = join_block;
- } else if (then_graph->HasExit()) {
- exit_block_ = then_graph->exit_block_;
- } else if (else_graph->HasExit()) {
- exit_block_ = else_graph->exit_block_;
- } else {
- exit_block_ = NULL;
}
}
-void HSubgraph::ResolveContinue(IterationStatement* statement) {
- HBasicBlock* continue_block = BundleContinue(statement);
+void HSubgraph::ResolveContinue(IterationStatement* statement,
+ HBasicBlock* continue_block) {
if (continue_block != NULL) {
- exit_block_ = JoinBlocks(exit_block(),
- continue_block,
- statement->ContinueId());
- }
-}
-
-
-HBasicBlock* HSubgraph::BundleBreak(BreakableStatement* statement) {
- return BundleBreakContinue(statement, false, statement->ExitId());
-}
-
-
-HBasicBlock* HSubgraph::BundleContinue(IterationStatement* statement) {
- return BundleBreakContinue(statement, true, statement->ContinueId());
-}
-
-
-HBasicBlock* HSubgraph::BundleBreakContinue(BreakableStatement* statement,
- bool is_continue,
- int join_id) {
- HBasicBlock* result = NULL;
- const ZoneList<BreakContinueInfo*>* infos = break_continue_info();
- for (int i = 0; i < infos->length(); ++i) {
- BreakContinueInfo* info = infos->at(i);
- if (info->is_continue() == is_continue &&
- info->target() == statement &&
- !info->IsResolved()) {
- if (result == NULL) {
- result = graph_->CreateBasicBlock();
- }
- info->block()->Goto(result);
- info->Resolve();
- }
+ continue_block->SetJoinId(statement->ContinueId());
}
-
- if (result != NULL) result->SetJoinId(join_id);
-
- return result;
+ exit_block_ =
+ JoinBlocks(exit_block(), continue_block, statement->ContinueId());
}
@@ -574,83 +520,93 @@ HBasicBlock* HSubgraph::JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id) {
}
-void HSubgraph::AppendEndless(HSubgraph* body, IterationStatement* statement) {
- ConnectExitTo(body->entry_block());
- body->ResolveContinue(statement);
- body->ConnectExitTo(body->entry_block(), true);
- exit_block_ = body->BundleBreak(statement);
- body->entry_block()->PostProcessLoopHeader(statement);
+void HSubgraph::AppendEndless(IterationStatement* statement,
+ HBasicBlock* body_entry,
+ HBasicBlock* body_exit,
+ HBasicBlock* break_block) {
+ if (exit_block() != NULL) {
+ exit_block()->Goto(body_entry, false);
+ }
+ if (body_exit != NULL) {
+ body_exit->Goto(body_entry, true);
+ }
+ if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
+ exit_block_ = break_block;
+ body_entry->PostProcessLoopHeader(statement);
}
-void HSubgraph::AppendDoWhile(HSubgraph* body,
- IterationStatement* statement,
- HSubgraph* go_back,
- HSubgraph* exit) {
- ConnectExitTo(body->entry_block());
- go_back->ConnectExitTo(body->entry_block(), true);
-
- HBasicBlock* break_block = body->BundleBreak(statement);
+void HSubgraph::AppendDoWhile(IterationStatement* statement,
+ HBasicBlock* body_entry,
+ HBasicBlock* go_back,
+ HBasicBlock* exit_block,
+ HBasicBlock* break_block) {
+ if (this->exit_block() != NULL) {
+ this->exit_block()->Goto(body_entry, false);
+ }
+ if (go_back != NULL) {
+ go_back->Goto(body_entry, true);
+ }
+ if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
exit_block_ =
- JoinBlocks(exit->exit_block(), break_block, statement->ExitId());
- body->entry_block()->PostProcessLoopHeader(statement);
+ JoinBlocks(exit_block, break_block, statement->ExitId());
+ body_entry->PostProcessLoopHeader(statement);
}
-void HSubgraph::AppendWhile(HSubgraph* condition,
- HSubgraph* body,
- IterationStatement* statement,
- HSubgraph* continue_subgraph,
- HSubgraph* exit) {
- ConnectExitTo(condition->entry_block());
+void HSubgraph::AppendWhile(IterationStatement* statement,
+ HBasicBlock* condition_entry,
+ HBasicBlock* exit_block,
+ HBasicBlock* body_exit,
+ HBasicBlock* break_block,
+ HBasicBlock* loop_entry,
+ HBasicBlock* loop_exit) {
+ if (this->exit_block() != NULL) {
+ this->exit_block()->Goto(condition_entry, false);
+ }
- HBasicBlock* break_block = body->BundleBreak(statement);
+ if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
exit_block_ =
- JoinBlocks(exit->exit_block(), break_block, statement->ExitId());
-
- if (continue_subgraph != NULL) {
- body->ConnectExitTo(continue_subgraph->entry_block(), true);
- continue_subgraph->entry_block()->SetJoinId(statement->EntryId());
- exit_block_ = JoinBlocks(exit_block_,
- continue_subgraph->exit_block(),
- statement->ExitId());
+ JoinBlocks(exit_block, break_block, statement->ExitId());
+
+ if (loop_entry != NULL) {
+ if (body_exit != NULL) {
+ body_exit->Goto(loop_entry, true);
+ }
+ loop_entry->SetJoinId(statement->EntryId());
+ exit_block_ = JoinBlocks(exit_block_, loop_exit, statement->ExitId());
} else {
- body->ConnectExitTo(condition->entry_block(), true);
+ if (body_exit != NULL) {
+ body_exit->Goto(condition_entry, true);
+ }
}
- condition->entry_block()->PostProcessLoopHeader(statement);
+ condition_entry->PostProcessLoopHeader(statement);
}
-void HSubgraph::Append(HSubgraph* next, BreakableStatement* stmt) {
- exit_block_->Goto(next->entry_block());
- exit_block_ = next->exit_block_;
+void HSubgraph::Append(BreakableStatement* stmt,
+ HBasicBlock* entry_block,
+ HBasicBlock* exit_block,
+ HBasicBlock* break_block) {
+ exit_block_->Goto(entry_block);
+ exit_block_ = exit_block;
if (stmt != NULL) {
- next->entry_block()->SetJoinId(stmt->EntryId());
- HBasicBlock* break_block = next->BundleBreak(stmt);
- exit_block_ = JoinBlocks(exit_block(), break_block, stmt->ExitId());
+ entry_block->SetJoinId(stmt->EntryId());
+ if (break_block != NULL) break_block->SetJoinId(stmt->EntryId());
+ exit_block_ = JoinBlocks(exit_block, break_block, stmt->ExitId());
}
}
void HSubgraph::FinishExit(HControlInstruction* instruction) {
- ASSERT(HasExit());
+ ASSERT(exit_block() != NULL);
exit_block_->Finish(instruction);
exit_block_->ClearEnvironment();
exit_block_ = NULL;
}
-void HSubgraph::FinishBreakContinue(BreakableStatement* target,
- bool is_continue) {
- ASSERT(!exit_block_->IsFinished());
- BreakContinueInfo* info = new BreakContinueInfo(target, exit_block_,
- is_continue);
- break_continue_info_.Add(info);
- exit_block_ = NULL;
-}
-
-
HGraph::HGraph(CompilationInfo* info)
: HSubgraph(this),
next_block_id_(0),
@@ -1992,14 +1948,14 @@ AstContext::~AstContext() {
EffectContext::~EffectContext() {
ASSERT(owner()->HasStackOverflow() ||
- !owner()->subgraph()->HasExit() ||
+ owner()->current_block() == NULL ||
owner()->environment()->length() == original_length_);
}
ValueContext::~ValueContext() {
ASSERT(owner()->HasStackOverflow() ||
- !owner()->subgraph()->HasExit() ||
+ owner()->current_block() == NULL ||
owner()->environment()->length() == original_length_ + 1);
}
@@ -2057,7 +2013,7 @@ void TestContext::BuildBranch(HValue* value) {
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
HTest* test = new HTest(value, empty_true, empty_false);
- builder->CurrentBlock()->Finish(test);
+ builder->current_block()->Finish(test);
HValue* const no_return_value = NULL;
HBasicBlock* true_target = if_true();
@@ -2073,7 +2029,7 @@ void TestContext::BuildBranch(HValue* value) {
} else {
empty_false->Goto(false_target);
}
- builder->subgraph()->set_exit_block(NULL);
+ builder->set_current_block(NULL);
}
@@ -2130,7 +2086,6 @@ class HGraphBuilder::SubgraphScope BASE_EMBEDDED {
}
~SubgraphScope() {
- old_subgraph_->AddBreakContinueInfo(subgraph_);
builder_->current_subgraph_ = old_subgraph_;
}
@@ -2173,14 +2128,22 @@ void HGraphBuilder::VisitForControl(Expression* expr,
void HGraphBuilder::VisitArgument(Expression* expr) {
- VisitForValue(expr);
+ VISIT_FOR_VALUE(expr);
+ Push(AddInstruction(new HPushArgument(Pop())));
}
void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) {
for (int i = 0; i < arguments->length(); i++) {
VisitArgument(arguments->at(i));
- if (HasStackOverflow() || !current_subgraph_->HasExit()) return;
+ if (HasStackOverflow() || current_block() == NULL) return;
+ }
+}
+
+
+void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) {
+ for (int i = 0; i < exprs->length(); ++i) {
+ VISIT_FOR_VALUE(exprs->at(i));
}
}
@@ -2204,10 +2167,13 @@ HGraph* HGraphBuilder::CreateGraph(CompilationInfo* info) {
HSubgraph* body = CreateGotoSubgraph(environment());
AddToSubgraph(body, stmts);
if (HasStackOverflow()) return NULL;
- current_subgraph_->Append(body, NULL);
+ current_subgraph_->Append(NULL,
+ body->entry_block(),
+ body->exit_block(),
+ NULL);
body->entry_block()->SetJoinId(info->function()->id());
- if (graph_->HasExit()) {
+ if (graph()->exit_block() != NULL) {
graph_->FinishExit(new HReturn(graph_->GetConstantUndefined()));
}
}
@@ -2268,21 +2234,21 @@ void HGraphBuilder::AddToSubgraph(HSubgraph* graph,
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
- ASSERT(current_subgraph_->HasExit());
- current_subgraph_->exit_block()->AddInstruction(instr);
+ ASSERT(current_block() != NULL);
+ current_block()->AddInstruction(instr);
return instr;
}
void HGraphBuilder::AddSimulate(int id) {
- ASSERT(current_subgraph_->HasExit());
- current_subgraph_->exit_block()->AddSimulate(id);
+ ASSERT(current_block() != NULL);
+ current_block()->AddSimulate(id);
}
void HGraphBuilder::AddPhi(HPhi* instr) {
- ASSERT(current_subgraph_->HasExit());
- current_subgraph_->exit_block()->AddPhi(instr);
+ ASSERT(current_block() != NULL);
+ current_block()->AddPhi(instr);
}
@@ -2292,7 +2258,8 @@ void HGraphBuilder::PushAndAdd(HInstruction* instr) {
}
-void HGraphBuilder::PreProcessCall(HCall* call) {
+template <int V>
+HInstruction* HGraphBuilder::PreProcessCall(HCall<V>* call) {
int count = call->argument_count();
ZoneList<HValue*> arguments(count);
for (int i = 0; i < count; ++i) {
@@ -2302,6 +2269,7 @@ void HGraphBuilder::PreProcessCall(HCall* call) {
while (!arguments.is_empty()) {
AddInstruction(new HPushArgument(arguments.RemoveLast()));
}
+ return call;
}
@@ -2309,9 +2277,6 @@ void HGraphBuilder::SetupScope(Scope* scope) {
// We don't yet handle the function name for named function expressions.
if (scope->function() != NULL) BAILOUT("named function expression");
- // We can't handle heap-allocated locals.
- if (scope->num_heap_slots() > 0) BAILOUT("heap allocated locals");
-
HConstant* undefined_constant =
new HConstant(Factory::undefined_value(), Representation::Tagged());
AddInstruction(undefined_constant);
@@ -2333,6 +2298,10 @@ void HGraphBuilder::SetupScope(Scope* scope) {
// Handle the arguments and arguments shadow variables specially (they do
// not have declarations).
if (scope->arguments() != NULL) {
+ if (!scope->arguments()->IsStackAllocated() ||
+ !scope->arguments_shadow()->IsStackAllocated()) {
+ BAILOUT("context-allocated arguments");
+ }
HArgumentsObject* object = new HArgumentsObject;
AddInstruction(object);
graph()->SetArgumentsObject(object);
@@ -2345,7 +2314,7 @@ void HGraphBuilder::SetupScope(Scope* scope) {
void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
Visit(statements->at(i));
- if (HasStackOverflow() || !current_subgraph_->HasExit()) break;
+ if (HasStackOverflow() || current_block() == NULL) break;
}
}
@@ -2406,8 +2375,14 @@ HSubgraph* HGraphBuilder::CreateLoopHeaderSubgraph(HEnvironment* env) {
void HGraphBuilder::VisitBlock(Block* stmt) {
if (stmt->labels() != NULL) {
HSubgraph* block_graph = CreateGotoSubgraph(environment());
- ADD_TO_SUBGRAPH(block_graph, stmt->statements());
- current_subgraph_->Append(block_graph, stmt);
+ BreakAndContinueInfo break_info(stmt);
+ { BreakAndContinueScope push(&break_info, this);
+ ADD_TO_SUBGRAPH(block_graph, stmt->statements());
+ }
+ subgraph()->Append(stmt,
+ block_graph->entry_block(),
+ block_graph->exit_block(),
+ break_info.break_block());
} else {
VisitStatements(stmt->statements());
}
@@ -2443,18 +2418,55 @@ void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
else_graph->entry_block()->SetJoinId(stmt->ElseId());
ADD_TO_SUBGRAPH(else_graph, stmt->else_statement());
- current_subgraph_->AppendJoin(then_graph, else_graph, stmt);
+ current_subgraph_->AppendJoin(then_graph->exit_block(),
+ else_graph->exit_block(),
+ stmt->id());
+ }
+}
+
+
+HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get(
+ BreakableStatement* stmt,
+ BreakType type) {
+ BreakAndContinueScope* current = this;
+ while (current != NULL && current->info()->target() != stmt) {
+ current = current->next();
+ }
+ ASSERT(current != NULL); // Always found (unless stack is malformed).
+ HBasicBlock* block = NULL;
+ switch (type) {
+ case BREAK:
+ block = current->info()->break_block();
+ if (block == NULL) {
+ block = current->owner()->graph()->CreateBasicBlock();
+ current->info()->set_break_block(block);
+ }
+ break;
+
+ case CONTINUE:
+ block = current->info()->continue_block();
+ if (block == NULL) {
+ block = current->owner()->graph()->CreateBasicBlock();
+ current->info()->set_continue_block(block);
+ }
+ break;
}
+
+ return block;
}
void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
- current_subgraph_->FinishBreakContinue(stmt->target(), true);
+ HBasicBlock* continue_block = break_scope()->Get(stmt->target(), CONTINUE);
+ current_block()->Goto(continue_block);
+ set_current_block(NULL);
}
void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
- current_subgraph_->FinishBreakContinue(stmt->target(), false);
+ HBasicBlock* break_block = break_scope()->Get(stmt->target(), BREAK);
+ current_block()->Goto(break_block);
+ set_current_block(NULL);
}
@@ -2483,9 +2495,9 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
VISIT_FOR_VALUE(stmt->expression());
return_value = environment()->Pop();
}
- subgraph()->exit_block()->AddLeaveInlined(return_value,
+ current_block()->AddLeaveInlined(return_value,
function_return_);
- subgraph()->set_exit_block(NULL);
+ set_current_block(NULL);
}
}
}
@@ -2506,7 +2518,7 @@ HCompare* HGraphBuilder::BuildSwitchCompare(HSubgraph* subgraph,
CaseClause* clause) {
AddToSubgraph(subgraph, clause->label());
if (HasStackOverflow()) return NULL;
- HValue* clause_value = subgraph->environment()->Pop();
+ HValue* clause_value = subgraph->exit_block()->last_environment()->Pop();
HCompare* compare = new HCompare(switch_value,
clause_value,
Token::EQ_STRICT);
@@ -2587,7 +2599,7 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
// last_false_block is the (empty) false-block of the last comparison. If
// there are no comparisons at all (a single default clause), it is just
// the last block of the current subgraph.
- HBasicBlock* last_false_block = current_subgraph_->exit_block();
+ HBasicBlock* last_false_block = current_block();
if (prev_graph != current_subgraph_) {
last_false_block = graph()->CreateBasicBlock();
HBasicBlock* empty = graph()->CreateBasicBlock();
@@ -2630,17 +2642,20 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
}
// Check for fall-through from previous statement block.
- if (previous_subgraph != NULL && previous_subgraph->HasExit()) {
+ if (previous_subgraph != NULL && previous_subgraph->exit_block() != NULL) {
if (subgraph == NULL) subgraph = CreateEmptySubgraph();
previous_subgraph->exit_block()->
Finish(new HGoto(subgraph->entry_block()));
}
if (subgraph != NULL) {
- ADD_TO_SUBGRAPH(subgraph, clause->statements());
- HBasicBlock* break_block = subgraph->BundleBreak(stmt);
- if (break_block != NULL) {
- break_block->Finish(new HGoto(single_exit_block));
+ BreakAndContinueInfo break_info(stmt);
+ { BreakAndContinueScope push(&break_info, this);
+ ADD_TO_SUBGRAPH(subgraph, clause->statements());
+ }
+ if (break_info.break_block() != NULL) {
+ break_info.break_block()->SetJoinId(stmt->ExitId());
+ break_info.break_block()->Finish(new HGoto(single_exit_block));
}
}
@@ -2649,7 +2664,7 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
// If the last statement block has a fall-through, connect it to the
// single exit block.
- if (previous_subgraph != NULL && previous_subgraph->HasExit()) {
+ if (previous_subgraph != NULL && previous_subgraph->exit_block() != NULL) {
previous_subgraph->exit_block()->Finish(new HGoto(single_exit_block));
}
@@ -2659,9 +2674,9 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
}
if (single_exit_block->HasPredecessor()) {
- current_subgraph_->set_exit_block(single_exit_block);
+ set_current_block(single_exit_block);
} else {
- current_subgraph_->set_exit_block(NULL);
+ set_current_block(NULL);
}
}
@@ -2703,15 +2718,21 @@ void HSubgraph::PreProcessOsrEntry(IterationStatement* statement) {
void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
- ASSERT(subgraph()->HasExit());
+ ASSERT(current_block() != NULL);
subgraph()->PreProcessOsrEntry(stmt);
HSubgraph* body_graph = CreateLoopHeaderSubgraph(environment());
- ADD_TO_SUBGRAPH(body_graph, stmt->body());
- body_graph->ResolveContinue(stmt);
+ BreakAndContinueInfo break_info(stmt);
+ { BreakAndContinueScope push(&break_info, this);
+ ADD_TO_SUBGRAPH(body_graph, stmt->body());
+ }
+ body_graph->ResolveContinue(stmt, break_info.continue_block());
- if (!body_graph->HasExit() || stmt->cond()->ToBooleanIsTrue()) {
- current_subgraph_->AppendEndless(body_graph, stmt);
+ if (body_graph->exit_block() == NULL || stmt->cond()->ToBooleanIsTrue()) {
+ subgraph()->AppendEndless(stmt,
+ body_graph->entry_block(),
+ body_graph->exit_block(),
+ break_info.break_block());
} else {
HSubgraph* go_back = CreateEmptySubgraph();
HSubgraph* exit = CreateEmptySubgraph();
@@ -2723,18 +2744,17 @@ void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
go_back->entry_block()->SetJoinId(stmt->BackEdgeId());
exit->entry_block()->SetJoinId(stmt->ExitId());
}
- current_subgraph_->AppendDoWhile(body_graph, stmt, go_back, exit);
+ subgraph()->AppendDoWhile(stmt,
+ body_graph->entry_block(),
+ go_back->exit_block(),
+ exit->exit_block(),
+ break_info.break_block());
}
}
-bool HGraphBuilder::ShouldPeel(HSubgraph* cond, HSubgraph* body) {
- return FLAG_use_peeling;
-}
-
-
void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
- ASSERT(subgraph()->HasExit());
+ ASSERT(current_block() != NULL);
subgraph()->PreProcessOsrEntry(stmt);
HSubgraph* cond_graph = NULL;
@@ -2744,7 +2764,6 @@ void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
// If the condition is constant true, do not generate a condition subgraph.
if (stmt->cond()->ToBooleanIsTrue()) {
body_graph = CreateLoopHeaderSubgraph(environment());
- ADD_TO_SUBGRAPH(body_graph, stmt->body());
} else {
cond_graph = CreateLoopHeaderSubgraph(environment());
body_graph = CreateEmptySubgraph();
@@ -2757,36 +2776,54 @@ void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
body_graph->entry_block()->SetJoinId(stmt->BodyId());
exit_graph->entry_block()->SetJoinId(stmt->ExitId());
}
- ADD_TO_SUBGRAPH(body_graph, stmt->body());
}
- body_graph->ResolveContinue(stmt);
+ BreakAndContinueInfo break_info(stmt);
+ { BreakAndContinueScope push(&break_info, this);
+ ADD_TO_SUBGRAPH(body_graph, stmt->body());
+ }
+ body_graph->ResolveContinue(stmt, break_info.continue_block());
if (cond_graph != NULL) {
- AppendPeeledWhile(stmt, cond_graph, body_graph, exit_graph);
+ AppendPeeledWhile(stmt,
+ cond_graph->entry_block(),
+ exit_graph->exit_block(),
+ body_graph->exit_block(),
+ break_info.break_block());
} else {
// TODO(fschneider): Implement peeling for endless loops as well.
- current_subgraph_->AppendEndless(body_graph, stmt);
+ subgraph()->AppendEndless(stmt,
+ body_graph->entry_block(),
+ body_graph->exit_block(),
+ break_info.break_block());
}
}
void HGraphBuilder::AppendPeeledWhile(IterationStatement* stmt,
- HSubgraph* cond_graph,
- HSubgraph* body_graph,
- HSubgraph* exit_graph) {
- HSubgraph* loop = NULL;
- if (body_graph->HasExit() && stmt != peeled_statement_ &&
- ShouldPeel(cond_graph, body_graph)) {
+ HBasicBlock* condition_entry,
+ HBasicBlock* exit_block,
+ HBasicBlock* body_exit,
+ HBasicBlock* break_block) {
+ HBasicBlock* loop_entry = NULL;
+ HBasicBlock* loop_exit = NULL;
+ if (FLAG_use_peeling && body_exit != NULL && stmt != peeled_statement_) {
// Save the last peeled iteration statement to prevent infinite recursion.
IterationStatement* outer_peeled_statement = peeled_statement_;
peeled_statement_ = stmt;
- loop = CreateGotoSubgraph(body_graph->environment());
+ HSubgraph* loop = CreateGotoSubgraph(body_exit->last_environment());
ADD_TO_SUBGRAPH(loop, stmt);
peeled_statement_ = outer_peeled_statement;
+ loop_entry = loop->entry_block();
+ loop_exit = loop->exit_block();
}
- current_subgraph_->AppendWhile(cond_graph, body_graph, stmt, loop,
- exit_graph);
+ subgraph()->AppendWhile(stmt,
+ condition_entry,
+ exit_block,
+ body_exit,
+ break_block,
+ loop_entry,
+ loop_exit);
}
@@ -2796,7 +2833,7 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
Visit(stmt->init());
CHECK_BAILOUT;
}
- ASSERT(subgraph()->HasExit());
+ ASSERT(current_block() != NULL);
subgraph()->PreProcessOsrEntry(stmt);
HSubgraph* cond_graph = NULL;
@@ -2817,22 +2854,36 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
} else {
body_graph = CreateLoopHeaderSubgraph(environment());
}
- ADD_TO_SUBGRAPH(body_graph, stmt->body());
+ BreakAndContinueInfo break_info(stmt);
+ { BreakAndContinueScope push(&break_info, this);
+ ADD_TO_SUBGRAPH(body_graph, stmt->body());
+ }
HSubgraph* next_graph = NULL;
- body_graph->ResolveContinue(stmt);
+ body_graph->ResolveContinue(stmt, break_info.continue_block());
- if (stmt->next() != NULL && body_graph->HasExit()) {
- next_graph = CreateGotoSubgraph(body_graph->environment());
+ if (stmt->next() != NULL && body_graph->exit_block() != NULL) {
+ next_graph =
+ CreateGotoSubgraph(body_graph->exit_block()->last_environment());
ADD_TO_SUBGRAPH(next_graph, stmt->next());
- body_graph->Append(next_graph, NULL);
+ body_graph->Append(NULL,
+ next_graph->entry_block(),
+ next_graph->exit_block(),
+ NULL);
next_graph->entry_block()->SetJoinId(stmt->ContinueId());
}
if (cond_graph != NULL) {
- AppendPeeledWhile(stmt, cond_graph, body_graph, exit_graph);
+ AppendPeeledWhile(stmt,
+ cond_graph->entry_block(),
+ exit_graph->exit_block(),
+ body_graph->exit_block(),
+ break_info.break_block());
} else {
- current_subgraph_->AppendEndless(body_graph, stmt);
+ subgraph()->AppendEndless(stmt,
+ body_graph->entry_block(),
+ body_graph->exit_block(),
+ break_info.break_block());
}
}
@@ -2886,7 +2937,9 @@ void HGraphBuilder::VisitConditional(Conditional* expr) {
else_graph->entry_block()->SetJoinId(expr->ElseId());
ADD_TO_SUBGRAPH(else_graph, expr->else_expression());
- current_subgraph_->AppendJoin(then_graph, else_graph, expr);
+ current_subgraph_->AppendJoin(then_graph->exit_block(),
+ else_graph->exit_block(),
+ expr->id());
ast_context()->ReturnValue(Pop());
}
@@ -3099,9 +3152,9 @@ HBasicBlock* HGraphBuilder::BuildTypeSwitch(HValue* receiver,
maps->at(i),
if_true->entry_block(),
if_false->entry_block());
- subgraph()->exit_block()->Finish(compare);
+ current_block()->Finish(compare);
- if (if_true->HasExit()) {
+ if (if_true->exit_block() != NULL) {
// In an effect context the value of the type switch is not needed.
// There is no need to merge it at the join block only to discard it.
if (ast_context()->IsEffect()) {
@@ -3110,15 +3163,15 @@ HBasicBlock* HGraphBuilder::BuildTypeSwitch(HValue* receiver,
if_true->exit_block()->Goto(join_block);
}
- subgraph()->set_exit_block(if_false->exit_block());
+ set_current_block(if_false->exit_block());
}
// Connect the default if necessary.
- if (subgraph()->HasExit()) {
+ if (current_block() != NULL) {
if (ast_context()->IsEffect()) {
environment()->Drop(1);
}
- subgraph()->exit_block()->Goto(join_block);
+ current_block()->Goto(join_block);
}
if (join_block->predecessors()->is_empty()) return NULL;
@@ -3275,10 +3328,10 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
HBasicBlock* new_exit_block =
BuildTypeSwitch(object, &maps, &subgraphs, default_graph, expr->id());
- subgraph()->set_exit_block(new_exit_block);
+ set_current_block(new_exit_block);
// In an effect context, we did not materialized the value in the
// predecessor environments so there's no need to handle it here.
- if (subgraph()->HasExit() && !ast_context()->IsEffect()) {
+ if (current_block() != NULL && !ast_context()->IsEffect()) {
ast_context()->ReturnValue(Pop());
}
}
@@ -3326,12 +3379,20 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
HValue* key = Pop();
HValue* object = Pop();
- bool is_fast_elements = expr->IsMonomorphic() &&
- expr->GetMonomorphicReceiverType()->has_fast_elements();
-
- instr = is_fast_elements
- ? BuildStoreKeyedFastElement(object, key, value, expr)
- : BuildStoreKeyedGeneric(object, key, value);
+ if (expr->IsMonomorphic()) {
+ Handle<Map> receiver_type(expr->GetMonomorphicReceiverType());
+ // An object has either fast elements or pixel array elements, but never
+ // both. Pixel array maps that are assigned to pixel array elements are
+ // always created with the fast elements flag cleared.
+ if (receiver_type->has_pixel_array_elements()) {
+ instr = BuildStoreKeyedPixelArrayElement(object, key, value, expr);
+ } else if (receiver_type->has_fast_elements()) {
+ instr = BuildStoreKeyedFastElement(object, key, value, expr);
+ }
+ }
+ if (instr == NULL) {
+ instr = BuildStoreKeyedGeneric(object, key, value);
+ }
}
Push(value);
@@ -3601,10 +3662,10 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
HBasicBlock* new_exit_block =
BuildTypeSwitch(object, &maps, &subgraphs, default_graph, expr->id());
- subgraph()->set_exit_block(new_exit_block);
+ set_current_block(new_exit_block);
// In an effect context, we did not materialized the value in the
// predecessor environments so there's no need to handle it here.
- if (subgraph()->HasExit() && !ast_context()->IsEffect()) {
+ if (current_block() != NULL && !ast_context()->IsEffect()) {
ast_context()->ReturnValue(Pop());
}
}
@@ -3711,7 +3772,8 @@ HInstruction* HGraphBuilder::BuildLoadKeyedPixelArrayElement(HValue* object,
AddInstruction(new HCheckMap(object, map));
HLoadElements* elements = new HLoadElements(object);
AddInstruction(elements);
- HInstruction* length = AddInstruction(new HPixelArrayLength(elements));
+ HInstruction* length = new HPixelArrayLength(elements);
+ AddInstruction(length);
AddInstruction(new HBoundsCheck(key, length));
HLoadPixelArrayExternalPointer* external_elements =
new HLoadPixelArrayExternalPointer(elements);
@@ -3754,6 +3816,28 @@ HInstruction* HGraphBuilder::BuildStoreKeyedFastElement(HValue* object,
}
+HInstruction* HGraphBuilder::BuildStoreKeyedPixelArrayElement(
+ HValue* object,
+ HValue* key,
+ HValue* val,
+ Expression* expr) {
+ ASSERT(expr->IsMonomorphic());
+ AddInstruction(new HCheckNonSmi(object));
+ Handle<Map> map = expr->GetMonomorphicReceiverType();
+ ASSERT(!map->has_fast_elements());
+ ASSERT(map->has_pixel_array_elements());
+ AddInstruction(new HCheckMap(object, map));
+ HLoadElements* elements = new HLoadElements(object);
+ AddInstruction(elements);
+ HInstruction* length = AddInstruction(new HPixelArrayLength(elements));
+ AddInstruction(new HBoundsCheck(key, length));
+ HLoadPixelArrayExternalPointer* external_elements =
+ new HLoadPixelArrayExternalPointer(elements);
+ AddInstruction(external_elements);
+ return new HStorePixelArrayElement(external_elements, key, val);
+}
+
+
bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
VariableProxy* proxy = expr->obj()->AsVariableProxy();
if (proxy == NULL) return false;
@@ -3899,7 +3983,8 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
// Check for bailout, as trying to inline might fail due to bailout
// during hydrogen processing.
CHECK_BAILOUT;
- HCall* call = new HCallConstantFunction(expr->target(), argument_count);
+ HCallConstantFunction* call =
+ new HCallConstantFunction(expr->target(), argument_count);
call->set_position(expr->position());
PreProcessCall(call);
PushAndAdd(call);
@@ -3916,7 +4001,7 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
if (maps.length() == 0) {
HContext* context = new HContext;
AddInstruction(context);
- HCall* call = new HCallNamed(context, name, argument_count);
+ HCallNamed* call = new HCallNamed(context, name, argument_count);
call->set_position(expr->position());
PreProcessCall(call);
ast_context()->ReturnInstruction(call, expr->id());
@@ -3929,7 +4014,7 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
} else {
HContext* context = new HContext;
AddInstruction(context);
- HCall* call = new HCallNamed(context, name, argument_count);
+ HCallNamed* call = new HCallNamed(context, name, argument_count);
call->set_position(expr->position());
PreProcessCall(call);
PushAndAdd(call);
@@ -3938,7 +4023,7 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
HBasicBlock* new_exit_block =
BuildTypeSwitch(receiver, &maps, &subgraphs, default_graph, expr->id());
- subgraph()->set_exit_block(new_exit_block);
+ set_current_block(new_exit_block);
// In an effect context, we did not materialized the value in the
// predecessor environments so there's no need to handle it here.
if (new_exit_block != NULL && !ast_context()->IsEffect()) {
@@ -4009,6 +4094,7 @@ bool HGraphBuilder::TryInline(Call* expr) {
}
return false;
}
+ if (inner_info.scope()->num_heap_slots() > 0) return false;
FunctionLiteral* function = inner_info.function();
// Count the number of AST nodes added by inlining this call.
@@ -4048,10 +4134,7 @@ bool HGraphBuilder::TryInline(Call* expr) {
if (!FullCodeGenerator::MakeCode(&inner_info)) return false;
shared->EnableDeoptimizationSupport(*inner_info.code());
Compiler::RecordFunctionCompilation(
- Logger::FUNCTION_TAG,
- Handle<String>(shared->DebugName()),
- shared->start_position(),
- &inner_info);
+ Logger::FUNCTION_TAG, &inner_info, shared);
}
// Save the pending call context and type feedback oracle. Set up new ones
@@ -4107,7 +4190,7 @@ bool HGraphBuilder::TryInline(Call* expr) {
if (FLAG_trace_inlining) TraceInline(target, true);
- if (body->HasExit()) {
+ if (body->exit_block() != NULL) {
// Add a return of undefined if control can fall off the body. In a
// test context, undefined is false.
HValue* return_value = graph()->GetConstantUndefined();
@@ -4136,7 +4219,7 @@ bool HGraphBuilder::TryInline(Call* expr) {
AddSimulate(expr->ReturnId());
// Jump to the function entry (without re-recording the environment).
- subgraph()->exit_block()->Finish(new HGoto(body->entry_block()));
+ current_block()->Finish(new HGoto(body->entry_block()));
// Fix up the function exits.
if (test_context != NULL) {
@@ -4166,11 +4249,11 @@ bool HGraphBuilder::TryInline(Call* expr) {
// TODO(kmillikin): Come up with a better way to handle this. It is too
// subtle. NULL here indicates that the enclosing context has no control
// flow to handle.
- subgraph()->set_exit_block(NULL);
+ set_current_block(NULL);
} else {
function_return_->SetJoinId(expr->id());
- subgraph()->set_exit_block(function_return_);
+ set_current_block(function_return_);
}
call_context_ = saved_call_context;
@@ -4332,14 +4415,13 @@ static bool HasCustomCallGenerator(Handle<JSFunction> function) {
void HGraphBuilder::VisitCall(Call* expr) {
Expression* callee = expr->expression();
int argument_count = expr->arguments()->length() + 1; // Plus receiver.
- HCall* call = NULL;
+ HInstruction* call = NULL;
Property* prop = callee->AsProperty();
if (prop != NULL) {
if (!prop->key()->IsPropertyName()) {
// Keyed function call.
- VisitArgument(prop->obj());
- CHECK_BAILOUT;
+ VISIT_FOR_VALUE(prop->obj());
VISIT_FOR_VALUE(prop->key());
// Push receiver and key like the non-optimized code generator expects it.
@@ -4348,14 +4430,13 @@ void HGraphBuilder::VisitCall(Call* expr) {
Push(key);
Push(receiver);
- VisitArgumentList(expr->arguments());
+ VisitExpressions(expr->arguments());
CHECK_BAILOUT;
HContext* context = new HContext;
AddInstruction(context);
- call = new HCallKeyed(context, key, argument_count);
+ call = PreProcessCall(new HCallKeyed(context, key, argument_count));
call->set_position(expr->position());
- PreProcessCall(call);
Drop(1); // Key.
ast_context()->ReturnInstruction(call, expr->id());
return;
@@ -4367,9 +4448,8 @@ void HGraphBuilder::VisitCall(Call* expr) {
if (TryCallApply(expr)) return;
CHECK_BAILOUT;
- VisitArgument(prop->obj());
- CHECK_BAILOUT;
- VisitArgumentList(expr->arguments());
+ VISIT_FOR_VALUE(prop->obj());
+ VisitExpressions(expr->arguments());
CHECK_BAILOUT;
Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
@@ -4396,12 +4476,12 @@ void HGraphBuilder::VisitCall(Call* expr) {
// IC when a primitive receiver check is required.
HContext* context = new HContext;
AddInstruction(context);
- call = new HCallNamed(context, name, argument_count);
+ call = PreProcessCall(new HCallNamed(context, name, argument_count));
} else {
AddCheckConstantFunction(expr, receiver, receiver_map, true);
if (TryInline(expr)) {
- if (subgraph()->HasExit()) {
+ if (current_block() != NULL) {
HValue* return_value = Pop();
// If we inlined a function in a test context then we need to emit
// a simulate here to shadow the ones at the end of the
@@ -4416,7 +4496,8 @@ void HGraphBuilder::VisitCall(Call* expr) {
// Check for bailout, as the TryInline call in the if condition above
// might return false due to bailout during hydrogen processing.
CHECK_BAILOUT;
- call = new HCallConstantFunction(expr->target(), argument_count);
+ call = PreProcessCall(new HCallConstantFunction(expr->target(),
+ argument_count));
}
}
} else if (types != NULL && types->length() > 1) {
@@ -4427,7 +4508,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
} else {
HContext* context = new HContext;
AddInstruction(context);
- call = new HCallNamed(context, name, argument_count);
+ call = PreProcessCall(new HCallNamed(context, name, argument_count));
}
} else {
@@ -4436,8 +4517,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
if (!global_call) {
++argument_count;
- VisitArgument(expr->expression());
- CHECK_BAILOUT;
+ VISIT_FOR_VALUE(expr->expression());
}
if (global_call) {
@@ -4456,7 +4536,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
HGlobalObject* global_object = new HGlobalObject(context);
AddInstruction(context);
PushAndAdd(global_object);
- VisitArgumentList(expr->arguments());
+ VisitExpressions(expr->arguments());
CHECK_BAILOUT;
VISIT_FOR_VALUE(expr->expression());
@@ -4473,7 +4553,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
environment()->SetExpressionStackAt(receiver_index, global_receiver);
if (TryInline(expr)) {
- if (subgraph()->HasExit()) {
+ if (current_block() != NULL) {
HValue* return_value = Pop();
// If we inlined a function in a test context then we need to
// emit a simulate here to shadow the ones at the end of the
@@ -4489,15 +4569,18 @@ void HGraphBuilder::VisitCall(Call* expr) {
// during hydrogen processing.
CHECK_BAILOUT;
- call = new HCallKnownGlobal(expr->target(), argument_count);
+ call = PreProcessCall(new HCallKnownGlobal(expr->target(),
+ argument_count));
} else {
HContext* context = new HContext;
AddInstruction(context);
PushAndAdd(new HGlobalObject(context));
- VisitArgumentList(expr->arguments());
+ VisitExpressions(expr->arguments());
CHECK_BAILOUT;
- call = new HCallGlobal(context, var->name(), argument_count);
+ call = PreProcessCall(new HCallGlobal(context,
+ var->name(),
+ argument_count));
}
} else {
@@ -4506,15 +4589,14 @@ void HGraphBuilder::VisitCall(Call* expr) {
AddInstruction(context);
AddInstruction(global_object);
PushAndAdd(new HGlobalReceiver(global_object));
- VisitArgumentList(expr->arguments());
+ VisitExpressions(expr->arguments());
CHECK_BAILOUT;
- call = new HCallFunction(context, argument_count);
+ call = PreProcessCall(new HCallFunction(context, argument_count));
}
}
call->set_position(expr->position());
- PreProcessCall(call);
ast_context()->ReturnInstruction(call, expr->id());
}
@@ -4522,9 +4604,8 @@ void HGraphBuilder::VisitCall(Call* expr) {
void HGraphBuilder::VisitCallNew(CallNew* expr) {
// The constructor function is also used as the receiver argument to the
// JS construct call builtin.
- VisitArgument(expr->expression());
- CHECK_BAILOUT;
- VisitArgumentList(expr->arguments());
+ VISIT_FOR_VALUE(expr->expression());
+ VisitExpressions(expr->arguments());
CHECK_BAILOUT;
HContext* context = new HContext;
@@ -4534,7 +4615,7 @@ void HGraphBuilder::VisitCallNew(CallNew* expr) {
// to the construct call.
int arg_count = expr->arguments()->length() + 1; // Plus constructor.
HValue* constructor = environment()->ExpressionStackAt(arg_count - 1);
- HCall* call = new HCallNew(context, constructor, arg_count);
+ HCallNew* call = new HCallNew(context, constructor, arg_count);
call->set_position(expr->position());
PreProcessCall(call);
ast_context()->ReturnInstruction(call, expr->id());
@@ -4557,25 +4638,15 @@ const HGraphBuilder::InlineFunctionGenerator
void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->IsEqualTo(CStrVector("_Log"))) {
- ast_context()->ReturnValue(graph()->GetConstantUndefined());
- return;
- }
-
- Runtime::Function* function = expr->function();
if (expr->is_jsruntime()) {
BAILOUT("call to a JavaScript runtime function");
}
- ASSERT(function != NULL);
- VisitArgumentList(expr->arguments());
- CHECK_BAILOUT;
-
- int argument_count = expr->arguments()->length();
+ Runtime::Function* function = expr->function();
+ ASSERT(function != NULL);
if (function->intrinsic_type == Runtime::INLINE) {
- ASSERT(name->length() > 0);
- ASSERT(name->Get(0) == '_');
+ ASSERT(expr->name()->length() > 0);
+ ASSERT(expr->name()->Get(0) == '_');
// Call to an inline function.
int lookup_index = static_cast<int>(function->function_id) -
static_cast<int>(Runtime::kFirstInlineFunction);
@@ -4585,12 +4656,17 @@ void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index];
// Call the inline code generator using the pointer-to-member.
- (this->*generator)(argument_count, expr->id());
+ (this->*generator)(expr);
} else {
ASSERT(function->intrinsic_type == Runtime::RUNTIME);
- HCall* call = new HCallRuntime(name, expr->function(), argument_count);
+ VisitArgumentList(expr->arguments());
+ CHECK_BAILOUT;
+
+ Handle<String> name = expr->name();
+ int argument_count = expr->arguments()->length();
+ HCallRuntime* call = new HCallRuntime(name, function, argument_count);
call->set_position(RelocInfo::kNoPosition);
- PreProcessCall(call);
+ Drop(argument_count);
ast_context()->ReturnInstruction(call, expr->id());
}
}
@@ -4640,21 +4716,29 @@ void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
VisitForControl(expr->expression(),
context->if_false(),
context->if_true());
- } else {
+ } else if (ast_context()->IsValue()) {
HSubgraph* true_graph = CreateEmptySubgraph();
HSubgraph* false_graph = CreateEmptySubgraph();
VISIT_FOR_CONTROL(expr->expression(),
false_graph->entry_block(),
true_graph->entry_block());
true_graph->entry_block()->SetJoinId(expr->expression()->id());
- true_graph->environment()->Push(graph_->GetConstantTrue());
+ true_graph->exit_block()->last_environment()->Push(
+ graph_->GetConstantTrue());
false_graph->entry_block()->SetJoinId(expr->expression()->id());
- false_graph->environment()->Push(graph_->GetConstantFalse());
+ false_graph->exit_block()->last_environment()->Push(
+ graph_->GetConstantFalse());
- current_subgraph_->AppendJoin(true_graph, false_graph, expr);
+ current_subgraph_->AppendJoin(true_graph->exit_block(),
+ false_graph->exit_block(),
+ expr->id());
ast_context()->ReturnValue(Pop());
+ } else {
+ ASSERT(ast_context()->IsEffect());
+ VISIT_FOR_EFFECT(expr->expression());
}
+
} else if (op == Token::BIT_NOT || op == Token::SUB) {
VISIT_FOR_VALUE(expr->expression());
HValue* value = Pop();
@@ -4943,12 +5027,12 @@ void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
// Translate right subexpression by visiting it in the same AST
// context as the entire expression.
- subgraph()->set_exit_block(eval_right);
+ set_current_block(eval_right);
Visit(expr->right());
- } else {
+ } else if (ast_context()->IsValue()) {
VISIT_FOR_VALUE(expr->left());
- ASSERT(current_subgraph_->HasExit());
+ ASSERT(current_block() != NULL);
HValue* left = Top();
HEnvironment* environment_copy = environment()->Copy();
@@ -4956,9 +5040,51 @@ void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
HSubgraph* right_subgraph;
right_subgraph = CreateBranchSubgraph(environment_copy);
ADD_TO_SUBGRAPH(right_subgraph, expr->right());
- current_subgraph_->AppendOptional(right_subgraph, is_logical_and, left);
- current_subgraph_->exit_block()->SetJoinId(expr->id());
+
+ ASSERT(current_block() != NULL &&
+ right_subgraph->exit_block() != NULL);
+ // We need an extra block to maintain edge-split form.
+ HBasicBlock* empty_block = graph()->CreateBasicBlock();
+ HBasicBlock* join_block = graph()->CreateBasicBlock();
+
+ HTest* test = is_logical_and
+ ? new HTest(left, right_subgraph->entry_block(), empty_block)
+ : new HTest(left, empty_block, right_subgraph->entry_block());
+ current_block()->Finish(test);
+ empty_block->Goto(join_block);
+ right_subgraph->exit_block()->Goto(join_block);
+ join_block->SetJoinId(expr->id());
+ set_current_block(join_block);
ast_context()->ReturnValue(Pop());
+ } else {
+ ASSERT(ast_context()->IsEffect());
+ // In an effect context, we don't need the value of the left
+ // subexpression, only its control flow and side effects. We need an
+ // extra block to maintain edge-split form.
+ HBasicBlock* empty_block = graph()->CreateBasicBlock();
+ HBasicBlock* right_block = graph()->CreateBasicBlock();
+ HBasicBlock* join_block = graph()->CreateBasicBlock();
+ if (is_logical_and) {
+ VISIT_FOR_CONTROL(expr->left(), right_block, empty_block);
+ } else {
+ VISIT_FOR_CONTROL(expr->left(), empty_block, right_block);
+ }
+ // TODO(kmillikin): Find a way to fix this. It's ugly that there are
+ // actually two empty blocks (one here and one inserted by
+ // TestContext::BuildBranch, and that they both have an HSimulate
+ // though the second one is not a merge node, and that we really have
+ // no good AST ID to put on that first HSimulate.
+ empty_block->SetJoinId(expr->id());
+ right_block->SetJoinId(expr->RightId());
+ set_current_block(right_block);
+ VISIT_FOR_EFFECT(expr->right());
+
+ empty_block->Goto(join_block);
+ current_block()->Goto(join_block);
+ join_block->SetJoinId(expr->id());
+ set_current_block(join_block);
+ // We did not materialize any value in the predecessor environments,
+ // so there is no need to handle it here.
}
} else {
@@ -5135,343 +5261,361 @@ void HGraphBuilder::VisitDeclaration(Declaration* decl) {
// Generators for inline runtime functions.
// Support for types.
-void HGraphBuilder::GenerateIsSmi(int argument_count, int ast_id) {
- ASSERT(argument_count == 1);
+void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 1);
+ VISIT_FOR_VALUE(call->arguments()->at(0));
HValue* value = Pop();
HIsSmi* result = new HIsSmi(value);
- ast_context()->ReturnInstruction(result, ast_id);
+ ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateIsSpecObject(int argument_count, int ast_id) {
- ASSERT(argument_count == 1);
+void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 1);
+ VISIT_FOR_VALUE(call->arguments()->at(0));
HValue* value = Pop();
HHasInstanceType* result =
new HHasInstanceType(value, FIRST_JS_OBJECT_TYPE, LAST_TYPE);
- ast_context()->ReturnInstruction(result, ast_id);
+ ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateIsFunction(int argument_count, int ast_id) {
- ASSERT(argument_count == 1);
+void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 1);
+ VISIT_FOR_VALUE(call->arguments()->at(0));
HValue* value = Pop();
HHasInstanceType* result = new HHasInstanceType(value, JS_FUNCTION_TYPE);
- ast_context()->ReturnInstruction(result, ast_id);
+ ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateHasCachedArrayIndex(int argument_count,
- int ast_id) {
- ASSERT(argument_count == 1);
+void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 1);
+ VISIT_FOR_VALUE(call->arguments()->at(0));
HValue* value = Pop();
HHasCachedArrayIndex* result = new HHasCachedArrayIndex(value);
- ast_context()->ReturnInstruction(result, ast_id);
+ ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateIsArray(int argument_count, int ast_id) {
- ASSERT(argument_count == 1);
+void HGraphBuilder::GenerateIsArray(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 1);
+ VISIT_FOR_VALUE(call->arguments()->at(0));
HValue* value = Pop();
HHasInstanceType* result = new HHasInstanceType(value, JS_ARRAY_TYPE);
- ast_context()->ReturnInstruction(result, ast_id);
+ ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateIsRegExp(int argument_count, int ast_id) {
- ASSERT(argument_count == 1);
+void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 1);
+ VISIT_FOR_VALUE(call->arguments()->at(0));
HValue* value = Pop();
HHasInstanceType* result = new HHasInstanceType(value, JS_REGEXP_TYPE);
- ast_context()->ReturnInstruction(result, ast_id);
+ ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateIsObject(int argument_count, int ast_id) {
- ASSERT(argument_count == 1);
-
+void HGraphBuilder::GenerateIsObject(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 1);
+ VISIT_FOR_VALUE(call->arguments()->at(0));
HValue* value = Pop();
HIsObject* test = new HIsObject(value);
- ast_context()->ReturnInstruction(test, ast_id);
+ ast_context()->ReturnInstruction(test, call->id());
}
-void HGraphBuilder::GenerateIsNonNegativeSmi(int argument_count,
- int ast_id) {
+void HGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
BAILOUT("inlined runtime function: IsNonNegativeSmi");
}
-void HGraphBuilder::GenerateIsUndetectableObject(int argument_count,
- int ast_id) {
+void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
BAILOUT("inlined runtime function: IsUndetectableObject");
}
void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
- int argument_count,
- int ast_id) {
+ CallRuntime* call) {
BAILOUT("inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
}
// Support for construct call checks.
-void HGraphBuilder::GenerateIsConstructCall(int argument_count, int ast_id) {
- ASSERT(argument_count == 0);
- ast_context()->ReturnInstruction(new HIsConstructCall, ast_id);
+void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 0);
+ ast_context()->ReturnInstruction(new HIsConstructCall, call->id());
}
// Support for arguments.length and arguments[?].
-void HGraphBuilder::GenerateArgumentsLength(int argument_count, int ast_id) {
- ASSERT(argument_count == 0);
+void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 0);
HInstruction* elements = AddInstruction(new HArgumentsElements);
HArgumentsLength* result = new HArgumentsLength(elements);
- ast_context()->ReturnInstruction(result, ast_id);
+ ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateArguments(int argument_count, int ast_id) {
- ASSERT(argument_count == 1);
+void HGraphBuilder::GenerateArguments(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 1);
+ VISIT_FOR_VALUE(call->arguments()->at(0));
HValue* index = Pop();
HInstruction* elements = AddInstruction(new HArgumentsElements);
HInstruction* length = AddInstruction(new HArgumentsLength(elements));
HAccessArgumentsAt* result = new HAccessArgumentsAt(elements, length, index);
- ast_context()->ReturnInstruction(result, ast_id);
+ ast_context()->ReturnInstruction(result, call->id());
}
// Support for accessing the class and value fields of an object.
-void HGraphBuilder::GenerateClassOf(int argument_count, int ast_id) {
+void HGraphBuilder::GenerateClassOf(CallRuntime* call) {
// The special form detected by IsClassOfTest is detected before we get here
// and does not cause a bailout.
BAILOUT("inlined runtime function: ClassOf");
}
-void HGraphBuilder::GenerateValueOf(int argument_count, int ast_id) {
- ASSERT(argument_count == 1);
+void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 1);
+ VISIT_FOR_VALUE(call->arguments()->at(0));
HValue* value = Pop();
HValueOf* result = new HValueOf(value);
- ast_context()->ReturnInstruction(result, ast_id);
+ ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateSetValueOf(int argument_count, int ast_id) {
+void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
BAILOUT("inlined runtime function: SetValueOf");
}
// Fast support for charCodeAt(n).
-void HGraphBuilder::GenerateStringCharCodeAt(int argument_count, int ast_id) {
- ASSERT(argument_count == 2);
+void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 2);
+ VISIT_FOR_VALUE(call->arguments()->at(0));
+ VISIT_FOR_VALUE(call->arguments()->at(1));
HValue* index = Pop();
HValue* string = Pop();
HStringCharCodeAt* result = BuildStringCharCodeAt(string, index);
- ast_context()->ReturnInstruction(result, ast_id);
+ ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharFromCode(int argument_count,
- int ast_id) {
+void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
BAILOUT("inlined runtime function: StringCharFromCode");
}
// Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharAt(int argument_count, int ast_id) {
- ASSERT_EQ(2, argument_count);
+void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
+ ASSERT_EQ(2, call->arguments()->length());
+ VisitArgumentList(call->arguments());
+ CHECK_BAILOUT;
HContext* context = new HContext;
AddInstruction(context);
- HCallStub* result =
- new HCallStub(context, CodeStub::StringCharAt, argument_count);
- PreProcessCall(result);
- ast_context()->ReturnInstruction(result, ast_id);
+ HCallStub* result = new HCallStub(context, CodeStub::StringCharAt, 2);
+ Drop(2);
+ ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for object equality testing.
-void HGraphBuilder::GenerateObjectEquals(int argument_count, int ast_id) {
- ASSERT(argument_count == 2);
+void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 2);
+ VISIT_FOR_VALUE(call->arguments()->at(0));
+ VISIT_FOR_VALUE(call->arguments()->at(1));
HValue* right = Pop();
HValue* left = Pop();
HCompareJSObjectEq* result = new HCompareJSObjectEq(left, right);
- ast_context()->ReturnInstruction(result, ast_id);
+ ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateLog(int argument_count, int ast_id) {
- UNREACHABLE(); // We caught this in VisitCallRuntime.
+void HGraphBuilder::GenerateLog(CallRuntime* call) {
+ // %_Log is ignored in optimized code.
+ ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
// Fast support for Math.random().
-void HGraphBuilder::GenerateRandomHeapNumber(int argument_count, int ast_id) {
+void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
BAILOUT("inlined runtime function: RandomHeapNumber");
}
// Fast support for StringAdd.
-void HGraphBuilder::GenerateStringAdd(int argument_count, int ast_id) {
- ASSERT_EQ(2, argument_count);
+void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
+ ASSERT_EQ(2, call->arguments()->length());
+ VisitArgumentList(call->arguments());
+ CHECK_BAILOUT;
HContext* context = new HContext;
AddInstruction(context);
- HCallStub* result =
- new HCallStub(context, CodeStub::StringAdd, argument_count);
- PreProcessCall(result);
- ast_context()->ReturnInstruction(result, ast_id);
+ HCallStub* result = new HCallStub(context, CodeStub::StringAdd, 2);
+ Drop(2);
+ ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for SubString.
-void HGraphBuilder::GenerateSubString(int argument_count, int ast_id) {
- ASSERT_EQ(3, argument_count);
+void HGraphBuilder::GenerateSubString(CallRuntime* call) {
+ ASSERT_EQ(3, call->arguments()->length());
+ VisitArgumentList(call->arguments());
+ CHECK_BAILOUT;
HContext* context = new HContext;
AddInstruction(context);
- HCallStub* result =
- new HCallStub(context, CodeStub::SubString, argument_count);
- PreProcessCall(result);
- ast_context()->ReturnInstruction(result, ast_id);
+ HCallStub* result = new HCallStub(context, CodeStub::SubString, 3);
+ Drop(3);
+ ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for StringCompare.
-void HGraphBuilder::GenerateStringCompare(int argument_count, int ast_id) {
- ASSERT_EQ(2, argument_count);
+void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
+ ASSERT_EQ(2, call->arguments()->length());
+ VisitArgumentList(call->arguments());
+ CHECK_BAILOUT;
HContext* context = new HContext;
AddInstruction(context);
- HCallStub* result =
- new HCallStub(context, CodeStub::StringCompare, argument_count);
- PreProcessCall(result);
- ast_context()->ReturnInstruction(result, ast_id);
+ HCallStub* result = new HCallStub(context, CodeStub::StringCompare, 2);
+ Drop(2);
+ ast_context()->ReturnInstruction(result, call->id());
}
// Support for direct calls from JavaScript to native RegExp code.
-void HGraphBuilder::GenerateRegExpExec(int argument_count, int ast_id) {
- ASSERT_EQ(4, argument_count);
+void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
+ ASSERT_EQ(4, call->arguments()->length());
+ VisitArgumentList(call->arguments());
+ CHECK_BAILOUT;
HContext* context = new HContext;
AddInstruction(context);
- HCallStub* result =
- new HCallStub(context, CodeStub::RegExpExec, argument_count);
- PreProcessCall(result);
- ast_context()->ReturnInstruction(result, ast_id);
+ HCallStub* result = new HCallStub(context, CodeStub::RegExpExec, 4);
+ Drop(4);
+ ast_context()->ReturnInstruction(result, call->id());
}
// Construct a RegExp exec result with two in-object properties.
-void HGraphBuilder::GenerateRegExpConstructResult(int argument_count,
- int ast_id) {
- ASSERT_EQ(3, argument_count);
+void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
+ ASSERT_EQ(3, call->arguments()->length());
+ VisitArgumentList(call->arguments());
+ CHECK_BAILOUT;
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
- new HCallStub(context, CodeStub::RegExpConstructResult, argument_count);
- PreProcessCall(result);
- ast_context()->ReturnInstruction(result, ast_id);
+ new HCallStub(context, CodeStub::RegExpConstructResult, 3);
+ Drop(3);
+ ast_context()->ReturnInstruction(result, call->id());
}
// Support for fast native caches.
-void HGraphBuilder::GenerateGetFromCache(int argument_count, int ast_id) {
+void HGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
BAILOUT("inlined runtime function: GetFromCache");
}
// Fast support for number to string.
-void HGraphBuilder::GenerateNumberToString(int argument_count, int ast_id) {
- ASSERT_EQ(1, argument_count);
+void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
+ ASSERT_EQ(1, call->arguments()->length());
+ VisitArgumentList(call->arguments());
+ CHECK_BAILOUT;
HContext* context = new HContext;
AddInstruction(context);
- HCallStub* result =
- new HCallStub(context, CodeStub::NumberToString, argument_count);
- PreProcessCall(result);
- ast_context()->ReturnInstruction(result, ast_id);
+ HCallStub* result = new HCallStub(context, CodeStub::NumberToString, 1);
+ Drop(1);
+ ast_context()->ReturnInstruction(result, call->id());
}
// Fast swapping of elements. Takes three expressions, the object and two
// indices. This should only be used if the indices are known to be
// non-negative and within bounds of the elements array at the call site.
-void HGraphBuilder::GenerateSwapElements(int argument_count, int ast_id) {
+void HGraphBuilder::GenerateSwapElements(CallRuntime* call) {
BAILOUT("inlined runtime function: SwapElements");
}
// Fast call for custom callbacks.
-void HGraphBuilder::GenerateCallFunction(int argument_count, int ast_id) {
+void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
BAILOUT("inlined runtime function: CallFunction");
}
// Fast call to math functions.
-void HGraphBuilder::GenerateMathPow(int argument_count, int ast_id) {
- ASSERT_EQ(2, argument_count);
+void HGraphBuilder::GenerateMathPow(CallRuntime* call) {
+ ASSERT_EQ(2, call->arguments()->length());
+ VISIT_FOR_VALUE(call->arguments()->at(0));
+ VISIT_FOR_VALUE(call->arguments()->at(1));
HValue* right = Pop();
HValue* left = Pop();
HPower* result = new HPower(left, right);
- ast_context()->ReturnInstruction(result, ast_id);
+ ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateMathSin(int argument_count, int ast_id) {
- ASSERT_EQ(1, argument_count);
+void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
+ ASSERT_EQ(1, call->arguments()->length());
+ VisitArgumentList(call->arguments());
+ CHECK_BAILOUT;
HContext* context = new HContext;
AddInstruction(context);
- HCallStub* result =
- new HCallStub(context, CodeStub::TranscendentalCache, argument_count);
+ HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::SIN);
- PreProcessCall(result);
- ast_context()->ReturnInstruction(result, ast_id);
+ Drop(1);
+ ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateMathCos(int argument_count, int ast_id) {
- ASSERT_EQ(1, argument_count);
+void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
+ ASSERT_EQ(1, call->arguments()->length());
+ VisitArgumentList(call->arguments());
+ CHECK_BAILOUT;
HContext* context = new HContext;
AddInstruction(context);
- HCallStub* result =
- new HCallStub(context, CodeStub::TranscendentalCache, argument_count);
+ HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::COS);
- PreProcessCall(result);
- ast_context()->ReturnInstruction(result, ast_id);
+ Drop(1);
+ ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateMathLog(int argument_count, int ast_id) {
- ASSERT_EQ(1, argument_count);
+void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
+ ASSERT_EQ(1, call->arguments()->length());
+ VisitArgumentList(call->arguments());
+ CHECK_BAILOUT;
HContext* context = new HContext;
AddInstruction(context);
- HCallStub* result =
- new HCallStub(context, CodeStub::TranscendentalCache, argument_count);
+ HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::LOG);
- PreProcessCall(result);
- ast_context()->ReturnInstruction(result, ast_id);
+ Drop(1);
+ ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateMathSqrt(int argument_count, int ast_id) {
+void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
BAILOUT("inlined runtime function: MathSqrt");
}
// Check whether two RegExps are equivalent
-void HGraphBuilder::GenerateIsRegExpEquivalent(int argument_count,
- int ast_id) {
+void HGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
BAILOUT("inlined runtime function: IsRegExpEquivalent");
}
-void HGraphBuilder::GenerateGetCachedArrayIndex(int argument_count,
- int ast_id) {
- ASSERT(argument_count == 1);
+void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 1);
+ VISIT_FOR_VALUE(call->arguments()->at(0));
HValue* value = Pop();
HGetCachedArrayIndex* result = new HGetCachedArrayIndex(value);
- ast_context()->ReturnInstruction(result, ast_id);
+ ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateFastAsciiArrayJoin(int argument_count,
- int ast_id) {
+void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
BAILOUT("inlined runtime function: FastAsciiArrayJoin");
}
@@ -5872,7 +6016,7 @@ void HTracer::TraceLiveRange(LiveRange* range, const char* type) {
if (op != NULL && op->IsUnallocated()) hint_index = op->VirtualRegister();
trace_.Add(" %d %d", parent_index, hint_index);
UseInterval* cur_interval = range->first_interval();
- while (cur_interval != NULL) {
+ while (cur_interval != NULL && range->Covers(cur_interval->start())) {
trace_.Add(" [%d, %d[",
cur_interval->start().Value(),
cur_interval->end().Value());
@@ -5881,7 +6025,7 @@ void HTracer::TraceLiveRange(LiveRange* range, const char* type) {
UsePosition* current_pos = range->first_pos();
while (current_pos != NULL) {
- if (current_pos->RegisterIsBeneficial()) {
+ if (current_pos->RegisterIsBeneficial() || FLAG_trace_all_uses) {
trace_.Add(" %d M", current_pos->pos().Value());
}
current_pos = current_pos->next();
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index c911b6c1f..e8c0b0630 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -196,94 +196,54 @@ class HSubgraph: public ZoneObject {
explicit HSubgraph(HGraph* graph)
: graph_(graph),
entry_block_(NULL),
- exit_block_(NULL),
- break_continue_info_(4) {
+ exit_block_(NULL) {
}
HGraph* graph() const { return graph_; }
- HEnvironment* environment() const {
- ASSERT(HasExit());
- return exit_block_->last_environment();
+ HBasicBlock* entry_block() const { return entry_block_; }
+ HBasicBlock* exit_block() const { return exit_block_; }
+ void set_exit_block(HBasicBlock* block) {
+ exit_block_ = block;
}
- bool HasExit() const { return exit_block_ != NULL; }
-
void PreProcessOsrEntry(IterationStatement* statement);
- void AppendOptional(HSubgraph* graph,
- bool on_true_branch,
- HValue* boolean_value);
- void AppendJoin(HSubgraph* then_graph, HSubgraph* else_graph, AstNode* node);
- void AppendWhile(HSubgraph* condition,
- HSubgraph* body,
- IterationStatement* statement,
- HSubgraph* continue_subgraph,
- HSubgraph* exit);
- void AppendDoWhile(HSubgraph* body,
- IterationStatement* statement,
- HSubgraph* go_back,
- HSubgraph* exit);
- void AppendEndless(HSubgraph* body, IterationStatement* statement);
- void Append(HSubgraph* next, BreakableStatement* statement);
- void ResolveContinue(IterationStatement* statement);
- HBasicBlock* BundleBreak(BreakableStatement* statement);
- HBasicBlock* BundleContinue(IterationStatement* statement);
- HBasicBlock* BundleBreakContinue(BreakableStatement* statement,
- bool is_continue,
- int join_id);
+ void AppendJoin(HBasicBlock* first, HBasicBlock* second, int join_id);
+ void AppendWhile(IterationStatement* statement,
+ HBasicBlock* condition_entry,
+ HBasicBlock* exit_block,
+ HBasicBlock* body_exit,
+ HBasicBlock* break_block,
+ HBasicBlock* loop_entry,
+ HBasicBlock* loop_exit);
+ void AppendDoWhile(IterationStatement* statement,
+ HBasicBlock* body_entry,
+ HBasicBlock* go_back,
+ HBasicBlock* exit_block,
+ HBasicBlock* break_block);
+ void AppendEndless(IterationStatement* statement,
+ HBasicBlock* body_entry,
+ HBasicBlock* body_exit,
+ HBasicBlock* break_block);
+ void Append(BreakableStatement* stmt,
+ HBasicBlock* entry_block,
+ HBasicBlock* exit_block,
+ HBasicBlock* break_block);
+ void ResolveContinue(IterationStatement* statement,
+ HBasicBlock* continue_block);
HBasicBlock* JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id);
void FinishExit(HControlInstruction* instruction);
- void FinishBreakContinue(BreakableStatement* target, bool is_continue);
void Initialize(HBasicBlock* block) {
ASSERT(entry_block_ == NULL);
entry_block_ = block;
exit_block_ = block;
}
- HBasicBlock* entry_block() const { return entry_block_; }
- HBasicBlock* exit_block() const { return exit_block_; }
- void set_exit_block(HBasicBlock* block) {
- exit_block_ = block;
- }
-
- void ConnectExitTo(HBasicBlock* other, bool include_stack_check = false) {
- if (HasExit()) {
- exit_block()->Goto(other, include_stack_check);
- }
- }
-
- void AddBreakContinueInfo(HSubgraph* other) {
- break_continue_info_.AddAll(other->break_continue_info_);
- }
protected:
- class BreakContinueInfo: public ZoneObject {
- public:
- BreakContinueInfo(BreakableStatement* target, HBasicBlock* block,
- bool is_continue)
- : target_(target), block_(block), continue_(is_continue) {}
- BreakableStatement* target() const { return target_; }
- HBasicBlock* block() const { return block_; }
- bool is_continue() const { return continue_; }
- bool IsResolved() const { return block_ == NULL; }
- void Resolve() { block_ = NULL; }
-
- private:
- BreakableStatement* target_;
- HBasicBlock* block_;
- bool continue_;
- };
-
- const ZoneList<BreakContinueInfo*>* break_continue_info() const {
- return &break_continue_info_;
- }
-
HGraph* graph_; // The graph this is a subgraph of.
HBasicBlock* entry_block_;
HBasicBlock* exit_block_;
-
- private:
- ZoneList<BreakContinueInfo*> break_continue_info_;
};
@@ -621,6 +581,53 @@ class TestContext: public AstContext {
class HGraphBuilder: public AstVisitor {
public:
+ enum BreakType { BREAK, CONTINUE };
+
+ // A class encapsulating (lazily-allocated) break and continue blocks for
+ // a breakable statement. Separated from BreakAndContinueScope so that it
+ // can have a separate lifetime.
+ class BreakAndContinueInfo BASE_EMBEDDED {
+ public:
+ explicit BreakAndContinueInfo(BreakableStatement* target)
+ : target_(target), break_block_(NULL), continue_block_(NULL) {
+ }
+
+ BreakableStatement* target() { return target_; }
+ HBasicBlock* break_block() { return break_block_; }
+ void set_break_block(HBasicBlock* block) { break_block_ = block; }
+ HBasicBlock* continue_block() { return continue_block_; }
+ void set_continue_block(HBasicBlock* block) { continue_block_ = block; }
+
+ private:
+ BreakableStatement* target_;
+ HBasicBlock* break_block_;
+ HBasicBlock* continue_block_;
+ };
+
+ // A helper class to maintain a stack of current BreakAndContinueInfo
+ // structures mirroring BreakableStatement nesting.
+ class BreakAndContinueScope BASE_EMBEDDED {
+ public:
+ BreakAndContinueScope(BreakAndContinueInfo* info, HGraphBuilder* owner)
+ : info_(info), owner_(owner), next_(owner->break_scope()) {
+ owner->set_break_scope(this);
+ }
+
+ ~BreakAndContinueScope() { owner_->set_break_scope(next_); }
+
+ BreakAndContinueInfo* info() { return info_; }
+ HGraphBuilder* owner() { return owner_; }
+ BreakAndContinueScope* next() { return next_; }
+
+ // Search the break stack for a break or continue target.
+ HBasicBlock* Get(BreakableStatement* stmt, BreakType type);
+
+ private:
+ BreakAndContinueInfo* info_;
+ HGraphBuilder* owner_;
+ BreakAndContinueScope* next_;
+ };
+
explicit HGraphBuilder(TypeFeedbackOracle* oracle)
: oracle_(oracle),
graph_(NULL),
@@ -629,16 +636,25 @@ class HGraphBuilder: public AstVisitor {
ast_context_(NULL),
call_context_(NULL),
function_return_(NULL),
- inlined_count_(0) { }
+ inlined_count_(0),
+ break_scope_(NULL) {
+ }
HGraph* CreateGraph(CompilationInfo* info);
// Simple accessors.
HGraph* graph() const { return graph_; }
HSubgraph* subgraph() const { return current_subgraph_; }
+ BreakAndContinueScope* break_scope() const { return break_scope_; }
+ void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
- HEnvironment* environment() const { return subgraph()->environment(); }
- HBasicBlock* CurrentBlock() const { return subgraph()->exit_block(); }
+ HBasicBlock* current_block() const { return subgraph()->exit_block(); }
+ void set_current_block(HBasicBlock* block) {
+ subgraph()->set_exit_block(block);
+ }
+ HEnvironment* environment() const {
+ return current_block()->last_environment();
+ }
// Adding instructions.
HInstruction* AddInstruction(HInstruction* instr);
@@ -650,8 +666,7 @@ class HGraphBuilder: public AstVisitor {
private:
// Type of a member function that generates inline code for a native function.
- typedef void (HGraphBuilder::*InlineFunctionGenerator)(int argument_count,
- int ast_id);
+ typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
// Forward declarations for inner scope classes.
class SubgraphScope;
@@ -675,7 +690,7 @@ class HGraphBuilder: public AstVisitor {
// Generators for inline runtime functions.
#define INLINE_FUNCTION_GENERATOR_DECLARATION(Name, argc, ressize) \
- void Generate##Name(int argument_count, int ast_id);
+ void Generate##Name(CallRuntime* call);
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
@@ -684,9 +699,10 @@ class HGraphBuilder: public AstVisitor {
void Bailout(const char* reason);
void AppendPeeledWhile(IterationStatement* stmt,
- HSubgraph* cond_graph,
- HSubgraph* body_graph,
- HSubgraph* exit_graph);
+ HBasicBlock* condition_entry,
+ HBasicBlock* exit_block,
+ HBasicBlock* body_exit,
+ HBasicBlock* break_block);
void AddToSubgraph(HSubgraph* graph, ZoneList<Statement*>* stmts);
void AddToSubgraph(HSubgraph* graph, Statement* stmt);
@@ -702,17 +718,21 @@ class HGraphBuilder: public AstVisitor {
HBasicBlock* true_block,
HBasicBlock* false_block);
- // Visit an argument subexpression.
+ // Visit an argument subexpression and emit a push to the outgoing
+ // arguments.
void VisitArgument(Expression* expr);
void VisitArgumentList(ZoneList<Expression*>* arguments);
+ // Visit a list of expressions from left to right, each in a value context.
+ void VisitExpressions(ZoneList<Expression*>* exprs);
+
void AddPhi(HPhi* phi);
void PushAndAdd(HInstruction* instr);
// Remove the arguments from the bailout environment and emit instructions
// to push them as outgoing parameters.
- void PreProcessCall(HCall* call);
+ template <int V> HInstruction* PreProcessCall(HCall<V>* call);
void AssumeRepresentation(HValue* value, Representation r);
static Representation ToRepresentation(TypeInfo info);
@@ -724,8 +744,6 @@ class HGraphBuilder: public AstVisitor {
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- bool ShouldPeel(HSubgraph* cond, HSubgraph* body);
-
HBasicBlock* CreateBasicBlock(HEnvironment* env);
HSubgraph* CreateEmptySubgraph();
HSubgraph* CreateGotoSubgraph(HEnvironment* env);
@@ -816,6 +834,11 @@ class HGraphBuilder: public AstVisitor {
HValue* val,
Expression* expr);
+ HInstruction* BuildStoreKeyedPixelArrayElement(HValue* object,
+ HValue* key,
+ HValue* val,
+ Expression* expr);
+
HCompare* BuildSwitchCompare(HSubgraph* subgraph,
HValue* switch_value,
CaseClause* clause);
@@ -853,6 +876,8 @@ class HGraphBuilder: public AstVisitor {
int inlined_count_;
+ BreakAndContinueScope* break_scope_;
+
friend class AstContext; // Pushes and pops the AST context stack.
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 568b4d84f..b60157c75 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -183,13 +183,6 @@ const XMMRegister xmm7 = { 7 };
typedef XMMRegister DoubleRegister;
-// Index of register used in pusha/popa.
-// Order of pushed registers: EAX, ECX, EDX, EBX, ESP, EBP, ESI, and EDI
-inline int EspIndexForPushAll(Register reg) {
- return Register::kNumRegisters - 1 - reg.code();
-}
-
-
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
@@ -980,6 +973,10 @@ class Assembler : public Malloced {
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+ int relocation_writer_size() {
+ return (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ }
+
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
static const int kMinimalBufferSize = 4*KB;
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 6331a6e2e..cb05c3824 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -2385,14 +2385,14 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- NearLabel call_runtime;
+ ASSERT(op_ == Token::ADD);
+ NearLabel left_not_string, call_runtime;
// Registers containing left and right operands respectively.
Register left = edx;
Register right = eax;
// Test if left operand is a string.
- NearLabel left_not_string;
__ test(left, Immediate(kSmiTagMask));
__ j(zero, &left_not_string);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 02e29191d..ae544dc63 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -8234,8 +8234,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
if (variable != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
- // so this code can only be reached in non-strict mode.
- ASSERT(strict_mode_flag() == kNonStrictMode);
+ // but "delete this" is.
+ ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
@@ -8244,7 +8244,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
CALL_FUNCTION, 3);
frame_->Push(&answer);
- return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// Call the runtime to delete from the context holding the named
@@ -8255,13 +8254,11 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(Immediate(variable->name()));
Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
frame_->Push(&answer);
- return;
+ } else {
+ // Default: Result of deleting non-global, not dynamically
+ // introduced variables is false.
+ frame_->Push(Factory::false_value());
}
-
- // Default: Result of deleting non-global, not dynamically
- // introduced variables is false.
- frame_->Push(Factory::false_value());
-
} else {
// Default: Result of deleting expressions is true.
Load(node->expression()); // may have side-effects
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 322993ee6..5f4d94449 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -431,14 +431,16 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
fp_value, output_offset, value);
}
- // The context can be gotten from the function so long as we don't
- // optimize functions that need local contexts.
+ // For the bottommost output frame the context can be gotten from the input
+ // frame. For all subsequent output frames it can be gotten from the function
+ // so long as we don't inline functions that need local contexts.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(function->context());
- // The context for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = reinterpret_cast<uint32_t>(function->context());
+ }
output_frame->SetFrameSlot(output_offset, value);
if (is_topmost) output_frame->SetRegister(esi.code(), value);
if (FLAG_trace_deopt) {
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index a5c94c6bf..3cdca4c62 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -3743,8 +3743,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
} else if (var != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
- // so this code can only be reached in non-strict mode.
- ASSERT(strict_mode_flag() == kNonStrictMode);
+ // but "delete this" is.
+ ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
if (var->is_global()) {
__ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
@@ -3782,17 +3782,22 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ // Notice that the labels are swapped.
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
+ if (context()->IsTest()) ForwardBailoutToChild(expr);
+ VisitForControl(expr->expression(), if_true, if_false, fall_through);
+ context()->Plug(if_false, if_true); // Labels swapped.
+ }
break;
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index a59b1a5ba..d61ebdc0f 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -55,7 +55,7 @@ class SafepointGenerator : public PostCallGenerator {
// Ensure that we have enough space in the reloc info to patch
// this with calls when doing deoptimization.
if (ensure_reloc_space_) {
- codegen_->masm()->RecordComment(RelocInfo::kFillerCommentString, true);
+ codegen_->EnsureRelocSpaceForDeoptimization();
}
codegen_->RecordSafepoint(pointers_, deoptimization_index_);
}
@@ -78,6 +78,7 @@ bool LCodeGen::GenerateCode() {
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
+ GenerateRelocPadding() &&
GenerateSafepointTable();
}
@@ -122,6 +123,16 @@ void LCodeGen::Comment(const char* format, ...) {
}
+bool LCodeGen::GenerateRelocPadding() {
+ int reloc_size = masm()->relocation_writer_size();
+ while (reloc_size < deoptimization_reloc_size.min_size) {
+ __ RecordComment(RelocInfo::kFillerCommentString, true);
+ reloc_size += RelocInfo::kRelocCommentSize;
+ }
+ return !is_aborted();
+}
+
+
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
@@ -163,6 +174,45 @@ bool LCodeGen::GeneratePrologue() {
}
}
+ // Possibly allocate a local context.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ // Argument to NewContext is the function, which is still in edi.
+ __ push(edi);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+ // Context is returned in both eax and esi. It replaces the context
+ // passed to us. It's saved in the stack and kept live in esi.
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+
+ // Copy parameters into context if necessary.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = scope()->parameter(i)->AsSlot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ mov(eax, Operand(ebp, parameter_offset));
+ // Store it in the context.
+ int context_offset = Context::SlotOffset(slot->index());
+ __ mov(Operand(esi, context_offset), eax);
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have to use a third register to avoid
+ // clobbering esi.
+ __ mov(ecx, esi);
+ __ RecordWrite(ecx, context_offset, eax, ebx);
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
// Trace the call.
if (FLAG_trace) {
// We have not executed any compiled code yet, so esi still holds the
@@ -335,6 +385,22 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
}
+void LCodeGen::EnsureRelocSpaceForDeoptimization() {
+ // Since we patch the reloc info with RUNTIME_ENTRY calls every patch
+ // site will take up 2 bytes + any pc-jumps.
+ // We are conservative and always reserver 6 bytes in case where a
+ // simple pc-jump is not enough.
+ uint32_t pc_delta =
+ masm()->pc_offset() - deoptimization_reloc_size.last_pc_offset;
+ if (is_uintn(pc_delta, 6)) {
+ deoptimization_reloc_size.min_size += 2;
+ } else {
+ deoptimization_reloc_size.min_size += 6;
+ }
+ deoptimization_reloc_size.last_pc_offset = masm()->pc_offset();
+}
+
+
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged) {
@@ -382,10 +448,13 @@ void LCodeGen::CallCode(Handle<Code> code,
ASSERT(instr != NULL);
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
+
if (!adjusted) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
__ call(code, mode);
+
+ EnsureRelocSpaceForDeoptimization();
RegisterLazyDeoptimization(instr);
// Signal that we don't inline smi code before these stubs in the
@@ -595,6 +664,12 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
}
+void LCodeGen::RecordSafepoint(int deoptimization_index) {
+ LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ RecordSafepoint(&empty_pointers, deoptimization_index);
+}
+
+
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
@@ -1836,7 +1911,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label before_push_delta;
__ bind(&before_push_delta);
__ mov(temp, Immediate(delta));
- __ mov(Operand(esp, EspIndexForPushAll(temp) * kPointerSize), temp);
+ __ StoreToSafepointRegisterSlot(temp, temp);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ call(stub.GetCode(), RelocInfo::CODE_TARGET);
ASSERT_EQ(kAdditionalDelta,
@@ -1844,8 +1919,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
// Put the result value into the eax slot and restore all registers.
- __ mov(Operand(esp, EspIndexForPushAll(eax) * kPointerSize), eax);
-
+ __ StoreToSafepointRegisterSlot(eax, eax);
__ PopSafepointRegisters();
}
@@ -2100,13 +2174,13 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
- Register external_elements = ToRegister(instr->external_pointer());
+ Register external_pointer = ToRegister(instr->external_pointer());
Register key = ToRegister(instr->key());
Register result = ToRegister(instr->result());
- ASSERT(result.is(external_elements));
+ ASSERT(result.is(external_pointer));
// Load the result.
- __ movzx_b(result, Operand(external_elements, key, times_1, 0));
+ __ movzx_b(result, Operand(external_pointer, key, times_1, 0));
}
@@ -2301,11 +2375,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
if (*function == *graph()->info()->closure()) {
__ CallSelf();
} else {
- // This is an indirect call and will not be recorded in the reloc info.
- // Add a comment to the reloc info in case we need to patch this during
- // deoptimization.
- __ RecordComment(RelocInfo::kFillerCommentString, true);
__ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ EnsureRelocSpaceForDeoptimization();
}
// Setup deoptimization.
@@ -2360,7 +2431,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
if (!tmp.is(eax)) __ mov(tmp, eax);
// Restore input_reg after call to runtime.
- __ mov(input_reg, Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize));
+ __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
__ bind(&allocated);
__ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
@@ -2368,7 +2439,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
__ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
__ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
__ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
- __ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp);
+ __ StoreToSafepointRegisterSlot(input_reg, tmp);
__ bind(&done);
__ PopSafepointRegisters();
@@ -2493,11 +2564,6 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
- ExternalReference negative_infinity =
- ExternalReference::address_of_negative_infinity();
- __ movdbl(xmm_scratch, Operand::StaticVariable(negative_infinity));
- __ ucomisd(xmm_scratch, input_reg);
- DeoptimizeIf(equal, instr->environment());
__ xorpd(xmm_scratch, xmm_scratch);
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
__ sqrtsd(input_reg, input_reg);
@@ -2731,6 +2797,25 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
+void LCodeGen::DoStorePixelArrayElement(LStorePixelArrayElement* instr) {
+ Register external_pointer = ToRegister(instr->external_pointer());
+ Register key = ToRegister(instr->key());
+ Register value = ToRegister(instr->value());
+ ASSERT(ToRegister(instr->TempAt(0)).is(eax));
+
+ __ mov(eax, value);
+ { // Clamp the value to [0..255].
+ NearLabel done;
+ __ test(eax, Immediate(0xFFFFFF00));
+ __ j(zero, &done);
+ __ setcc(negative, eax); // 1 if negative, 0 if positive.
+ __ dec_b(eax); // 0 if negative, 255 if positive.
+ __ bind(&done);
+ }
+ __ mov_b(Operand(external_pointer, key, times_1, 0), eax);
+}
+
+
void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register value = ToRegister(instr->value());
Register elements = ToRegister(instr->object());
@@ -2840,19 +2925,20 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
__ test(result, Immediate(kStringRepresentationMask));
__ j(not_zero, deferred->entry());
- // Check for 1-byte or 2-byte string.
+ // Check for ASCII or two-byte string.
__ bind(&flat_string);
STATIC_ASSERT(kAsciiStringTag != 0);
__ test(result, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii_string);
- // 2-byte string.
- // Load the 2-byte character code into the result register.
+ // Two-byte string.
+ // Load the two-byte character code into the result register.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
if (instr->index()->IsConstantOperand()) {
__ movzx_w(result,
FieldOperand(string,
- SeqTwoByteString::kHeaderSize + 2 * const_index));
+ SeqTwoByteString::kHeaderSize +
+ (kUC16Size * const_index)));
} else {
__ movzx_w(result, FieldOperand(string,
index,
@@ -2908,7 +2994,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ AbortIfNotSmi(eax);
}
__ SmiUntag(eax);
- __ mov(Operand(esp, EspIndexForPushAll(result) * kPointerSize), eax);
+ __ StoreToSafepointRegisterSlot(result, eax);
__ PopSafepointRegisters();
}
@@ -2976,7 +3062,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// TODO(3095996): Put a valid pointer value in the stack slot where the result
// register is stored, as this register is in the pointer map, but contains an
// integer value.
- __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), Immediate(0));
+ __ StoreToSafepointRegisterSlot(reg, Immediate(0));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
@@ -2988,7 +3074,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// number.
__ bind(&done);
__ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
- __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), reg);
+ __ StoreToSafepointRegisterSlot(reg, reg);
__ PopSafepointRegisters();
}
@@ -3030,7 +3116,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
- __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), eax);
+ __ StoreToSafepointRegisterSlot(reg, eax);
__ PopSafepointRegisters();
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 977fbcda7..5ba4bc435 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -60,6 +60,7 @@ class LCodeGen BASE_EMBEDDED {
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
+ deoptimization_reloc_size(),
resolver_(this) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@@ -102,6 +103,8 @@ class LCodeGen BASE_EMBEDDED {
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
+ void EnsureRelocSpaceForDeoptimization();
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -151,6 +154,9 @@ class LCodeGen BASE_EMBEDDED {
bool GeneratePrologue();
bool GenerateBody();
bool GenerateDeferredCode();
+ // Pad the reloc info to ensure that we have enough space to patch during
+ // deoptimization.
+ bool GenerateRelocPadding();
bool GenerateSafepointTable();
void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr,
@@ -204,6 +210,7 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
+ void RecordSafepoint(int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index);
@@ -251,6 +258,13 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
+ struct DeoptimizationRelocSize {
+ int min_size;
+ int last_pc_offset;
+ };
+
+ DeoptimizationRelocSize deoptimization_reloc_size;
+
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index a57e8c928..221a7aa15 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -404,7 +404,7 @@ void LChunk::MarkEmptyBlocks() {
}
-void LStoreNamed::PrintDataTo(StringStream* stream) {
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
@@ -413,7 +413,25 @@ void LStoreNamed::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
@@ -1223,7 +1241,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathSqrt:
return DefineSameAsFirst(result);
case kMathPowHalf:
- return AssignEnvironment(DefineSameAsFirst(result));
+ return DefineSameAsFirst(result);
default:
UNREACHABLE();
return NULL;
@@ -1840,6 +1858,23 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
}
+LInstruction* LChunkBuilder::DoStorePixelArrayElement(
+ HStorePixelArrayElement* instr) {
+ ASSERT(instr->value()->representation().IsInteger32());
+ ASSERT(instr->external_pointer()->representation().IsExternal());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ LOperand* val = UseRegister(instr->value());
+ LOperand* key = UseRegister(instr->key());
+ // The generated code requires that the clamped value is in a byte
+ // register. eax is an arbitrary choice to satisfy this requirement.
+ LOperand* clamped = FixedTemp(eax);
+
+ return new LStorePixelArrayElement(external_pointer, key, val, clamped);
+}
+
+
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->object(), edx);
@@ -1923,8 +1958,8 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LDeleteProperty* result = new LDeleteProperty(Use(instr->object()),
- UseOrConstant(instr->key()));
+ LDeleteProperty* result =
+ new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1957,8 +1992,10 @@ LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object (we bail out in all other
- // cases).
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
return NULL;
}
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index f1b9ffc99..ad0b0ca05 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -42,8 +42,6 @@ class LCodeGen;
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Call) \
- V(StoreKeyed) \
- V(StoreNamed) \
LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
@@ -150,6 +148,7 @@ class LCodeGen;
V(StoreKeyedGeneric) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
+ V(StorePixelArrayElement) \
V(StringCharCodeAt) \
V(StringLength) \
V(SubI) \
@@ -1580,34 +1579,23 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamed: public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
public:
- LStoreNamed(LOperand* obj, LOperand* val) {
+ LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) {
inputs_[0] = obj;
inputs_[1] = val;
+ temps_[0] = temp;
}
- DECLARE_INSTRUCTION(StoreNamed)
- DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LStoreNamedField: public LStoreNamed {
- public:
- LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp)
- : LStoreNamed(obj, val) {
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+ Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
@@ -1626,6 +1614,8 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+ virtual void PrintDataTo(StringStream* stream);
+
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
@@ -1633,15 +1623,17 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
}
- DECLARE_INSTRUCTION(StoreKeyed)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+ "store-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
virtual void PrintDataTo(StringStream* stream);
@@ -1651,14 +1643,25 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedFastElement: public LStoreKeyed {
+class LStorePixelArrayElement: public LTemplateInstruction<0, 3, 1> {
public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val)
- : LStoreKeyed(obj, key, val) {}
+ LStorePixelArrayElement(LOperand* external_pointer,
+ LOperand* key,
+ LOperand* val,
+ LOperand* clamped) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ temps_[0] = clamped;
+ }
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+ DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement,
+ "store-pixel-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(StorePixelArrayElement)
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
};
@@ -1676,6 +1679,8 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+ virtual void PrintDataTo(StringStream* stream);
+
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 7f93b843d..91b6651fe 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -1654,6 +1654,28 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
+// Store the value in register src in the safepoint register stack
+// slot for register dst.
+void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
+ mov(SafepointRegisterSlot(dst), src);
+}
+
+
+void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
+ mov(SafepointRegisterSlot(dst), src);
+}
+
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ mov(dst, SafepointRegisterSlot(src));
+}
+
+
+Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
+ return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the lowest encoding,
// which means that lowest encodings are furthest away from
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 16361ad23..62bb0f363 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -143,7 +143,11 @@ class MacroAssembler: public Assembler {
// Push and pop the registers that can hold pointers.
void PushSafepointRegisters() { pushad(); }
void PopSafepointRegisters() { popad(); }
- static int SafepointRegisterStackIndex(int reg_code);
+ // Store the value in register/immediate src in the safepoint
+ // register stack slot for register dst.
+ void StoreToSafepointRegisterSlot(Register dst, Register src);
+ void StoreToSafepointRegisterSlot(Register dst, Immediate src);
+ void LoadFromSafepointRegisterSlot(Register dst, Register src);
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -667,6 +671,15 @@ class MacroAssembler: public Assembler {
MUST_USE_RESULT MaybeObject* PopHandleScopeHelper(Register saved,
Register scratch,
bool gc_allowed);
+
+
+ // Compute memory operands for safepoint stack slots.
+ Operand SafepointRegisterSlot(Register reg);
+ static int SafepointRegisterStackIndex(int reg_code);
+
+ // Needs access to SafepointRegisterStackIndex for optimized frame
+ // traversal.
+ friend class OptimizedFrame;
};
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index fdb22acea..51cc46a08 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -2204,8 +2204,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
- if (!function->IsBuiltin()) {
- // Calling non-builtins with a value as receiver requires boxing.
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
__ jmp(&miss);
} else {
// Check that the object is a string or a symbol.
@@ -2220,8 +2221,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case NUMBER_CHECK: {
- if (!function->IsBuiltin()) {
- // Calling non-builtins with a value as receiver requires boxing.
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
__ jmp(&miss);
} else {
Label fast;
@@ -2241,8 +2243,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
}
case BOOLEAN_CHECK: {
- if (!function->IsBuiltin()) {
- // Calling non-builtins with a value as receiver requires boxing.
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
__ jmp(&miss);
} else {
Label fast;
@@ -2586,8 +2589,8 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Compute the cell operand to use.
Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell));
if (Serializer::enabled()) {
- __ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
- cell_operand = FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset);
+ __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+ cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
}
// Check that the value in the cell is not the hole. If it is, this
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 968b45d0b..7482830ae 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -435,16 +435,25 @@ Object* CallICBase::TryCallAsFunction(Object* object) {
}
-void CallICBase::ReceiverToObject(Handle<Object> object) {
- HandleScope scope;
- Handle<Object> receiver(object);
+void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
+ Handle<Object> object) {
+ if (callee->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(callee);
+ if (function->shared()->strict_mode() || function->IsBuiltin()) {
+ // Do not wrap receiver for strict mode functions or for builtins.
+ return;
+ }
+ }
- // Change the receiver to the result of calling ToObject on it.
- const int argc = this->target()->arguments_count();
- StackFrameLocator locator;
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- int index = frame->ComputeExpressionsCount() - (argc + 1);
- frame->SetExpression(index, *Factory::ToObject(object));
+ // And only wrap string, number or boolean.
+ if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
+ // Change the receiver to the result of calling ToObject on it.
+ const int argc = this->target()->arguments_count();
+ StackFrameLocator locator;
+ JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
+ int index = frame->ComputeExpressionsCount() - (argc + 1);
+ frame->SetExpression(index, *Factory::ToObject(object));
+ }
}
@@ -458,10 +467,6 @@ MaybeObject* CallICBase::LoadFunction(State state,
return TypeError("non_object_property_call", object, name);
}
- if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- ReceiverToObject(object);
- }
-
// Check if the name is trivially convertible to an index and get
// the element if so.
uint32_t index;
@@ -505,6 +510,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
object->GetProperty(*object, &lookup, *name, &attr);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
+
if (lookup.type() == INTERCEPTOR) {
// If the object does not have the requested property, check which
// exception we need to throw.
@@ -516,31 +522,37 @@ MaybeObject* CallICBase::LoadFunction(State state,
}
}
- ASSERT(result != Heap::the_hole_value());
+ ASSERT(!result->IsTheHole());
- if (result->IsJSFunction()) {
+ HandleScope scope;
+ // Wrap result in a handle because ReceiverToObjectIfRequired may allocate
+ // new object and cause GC.
+ Handle<Object> result_handle(result);
+ // Make receiver an object if the callee requires it. Strict mode or builtin
+ // functions do not wrap the receiver, non-strict functions and objects
+ // called as functions do.
+ ReceiverToObjectIfRequired(result_handle, object);
+
+ if (result_handle->IsJSFunction()) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Handle stepping into a function if step into is active.
if (Debug::StepInActive()) {
// Protect the result in a handle as the debugger can allocate and might
// cause GC.
- HandleScope scope;
- Handle<JSFunction> function(JSFunction::cast(result));
+ Handle<JSFunction> function(JSFunction::cast(*result_handle));
Debug::HandleStepIn(function, object, fp(), false);
return *function;
}
#endif
- return result;
+ return *result_handle;
}
// Try to find a suitable function delegate for the object at hand.
- result = TryCallAsFunction(result);
- MaybeObject* answer = result;
- if (!result->IsJSFunction()) {
- answer = TypeError("property_not_function", object, name);
- }
- return answer;
+ result_handle = Handle<Object>(TryCallAsFunction(*result_handle));
+ if (result_handle->IsJSFunction()) return *result_handle;
+
+ return TypeError("property_not_function", object, name);
}
@@ -565,8 +577,8 @@ bool CallICBase::TryUpdateExtraICState(LookupResult* lookup,
case kStringCharAt:
if (object->IsString()) {
String* string = String::cast(*object);
- // Check that there's the right wrapper in the receiver slot.
- ASSERT(string == JSValue::cast(args[0])->value());
+ // Check there's the right string value or wrapper in the receiver slot.
+ ASSERT(string == args[0] || string == JSValue::cast(args[0])->value());
// If we're in the default (fastest) state and the index is
// out of bounds, update the state to record this fact.
if (*extra_ic_state == DEFAULT_STRING_STUB &&
@@ -775,10 +787,6 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
return TypeError("non_object_property_call", object, key);
}
- if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- ReceiverToObject(object);
- }
-
if (FLAG_use_ic && state != MEGAMORPHIC && !object->IsAccessCheckNeeded()) {
int argc = target()->arguments_count();
InLoopFlag in_loop = target()->ic_in_loop();
@@ -793,17 +801,20 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
#endif
}
}
- Object* result;
- { MaybeObject* maybe_result = Runtime::GetObjectProperty(object, key);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- if (result->IsJSFunction()) return result;
- result = TryCallAsFunction(result);
- MaybeObject* answer = result;
- if (!result->IsJSFunction()) {
- answer = TypeError("property_not_function", object, key);
- }
- return answer;
+
+ HandleScope scope;
+ Handle<Object> result = GetProperty(object, key);
+
+ // Make receiver an object if the callee requires it. Strict mode or builtin
+ // functions do not wrap the receiver, non-strict functions and objects
+ // called as functions do.
+ ReceiverToObjectIfRequired(result, object);
+
+ if (result->IsJSFunction()) return *result;
+ result = Handle<Object>(TryCallAsFunction(*result));
+ if (result->IsJSFunction()) return *result;
+
+ return TypeError("property_not_function", object, key);
}
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 3b10d064f..96838c733 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -224,7 +224,7 @@ class CallICBase: public IC {
// Otherwise, it returns the undefined value.
Object* TryCallAsFunction(Object* object);
- void ReceiverToObject(Handle<Object> object);
+ void ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object);
static void Clear(Address address, Code* target);
friend class IC;
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 9f5f1b97d..a13a18977 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -478,11 +478,6 @@ void LiveRange::ConvertOperands() {
}
-UsePosition* LiveRange::AddUsePosition(LifetimePosition pos) {
- return AddUsePosition(pos, CreateAssignedOperand());
-}
-
-
bool LiveRange::CanCover(LifetimePosition position) const {
if (IsEmpty()) return false;
return Start().Value() <= position.Value() &&
@@ -1098,6 +1093,21 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
} else {
ASSERT(pred->end()->SecondSuccessor() == NULL);
gap = GetLastGap(pred);
+
+ // We are going to insert a move before the branch instruction.
+ // Some branch instructions (e.g. loops' back edges)
+ // can potentially cause a GC so they have a pointer map.
+ // By insterting a move we essentially create a copy of a
+ // value which is invisible to PopulatePointerMaps(), because we store
+ // it into a location different from the operand of a live range
+ // covering a branch instruction.
+ // Thus we need to manually record a pointer.
+ if (HasTaggedValue(range->id())) {
+ LInstruction* branch = InstructionAt(pred->last_instruction_index());
+ if (branch->HasPointerMap()) {
+ branch->pointer_map()->RecordPointer(cur_op);
+ }
+ }
}
gap->GetOrCreateParallelMove(LGap::START)->AddMove(pred_op, cur_op);
}
diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h
index 914a5b686..d53ea7871 100644
--- a/deps/v8/src/lithium-allocator.h
+++ b/deps/v8/src/lithium-allocator.h
@@ -286,7 +286,6 @@ class LiveRange: public ZoneObject {
LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; }
LiveRange* next() const { return next_; }
bool IsChild() const { return parent() != NULL; }
- bool IsParent() const { return parent() == NULL; }
int id() const { return id_; }
bool IsFixed() const { return id_ < 0; }
bool IsEmpty() const { return first_interval() == NULL; }
@@ -360,7 +359,6 @@ class LiveRange: public ZoneObject {
void EnsureInterval(LifetimePosition start, LifetimePosition end);
void AddUseInterval(LifetimePosition start, LifetimePosition end);
UsePosition* AddUsePosition(LifetimePosition pos, LOperand* operand);
- UsePosition* AddUsePosition(LifetimePosition pos);
// Shorten the most recently added interval by setting a new start.
void ShortenTo(LifetimePosition start);
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index a2f9df0fd..d85a87c12 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -536,10 +536,12 @@ class ShallowIterator BASE_EMBEDDED {
inline LEnvironment* env() { return env_; }
private:
+ inline bool ShouldSkip(LOperand* op) {
+ return op == NULL || op->IsConstantOperand() || op->IsArgument();
+ }
+
inline int AdvanceToNext(int start) {
- while (start < limit_ &&
- (env_->values()->at(start) == NULL ||
- env_->values()->at(start)->IsConstantOperand())) {
+ while (start < limit_ && ShouldSkip(env_->values()->at(start))) {
start++;
}
return start;
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index c7b75679e..9a498ec0f 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -300,6 +300,8 @@ void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
Append("\\,");
} else if (c == '\\') {
Append("\\\\");
+ } else if (c == '\"') {
+ Append("\"\"");
} else {
Append("%lc", c);
}
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 6eb3c9b0b..16aeadb0b 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -147,7 +147,7 @@ bool Profiler::paused_ = false;
// StackTracer implementation
//
void StackTracer::Trace(TickSample* sample) {
- sample->function = NULL;
+ sample->tos = NULL;
sample->frames_count = 0;
// Avoid collecting traces while doing GC.
@@ -159,15 +159,9 @@ void StackTracer::Trace(TickSample* sample) {
return;
}
- const Address function_address =
- sample->fp + JavaScriptFrameConstants::kFunctionOffset;
- if (SafeStackFrameIterator::IsWithinBounds(sample->sp, js_entry_sp,
- function_address)) {
- Object* object = Memory::Object_at(function_address);
- if (object->IsHeapObject()) {
- sample->function = HeapObject::cast(object)->address();
- }
- }
+ // Sample potential return address value for frameless invocation of
+ // stubs (we'll figure out later, if this value makes sense).
+ sample->tos = Memory::Address_at(sample->sp);
int i = 0;
const Address callback = Top::external_callback();
@@ -181,10 +175,7 @@ void StackTracer::Trace(TickSample* sample) {
SafeStackTraceFrameIterator it(sample->fp, sample->sp,
sample->sp, js_entry_sp);
while (!it.done() && i < TickSample::kMaxFramesCount) {
- Object* object = it.frame()->function_slot_object();
- if (object->IsHeapObject()) {
- sample->stack[i++] = HeapObject::cast(object)->address();
- }
+ sample->stack[i++] = it.frame()->pc();
it.Advance();
}
sample->frames_count = i;
@@ -710,17 +701,6 @@ void Logger::SetterCallbackEvent(String* name, Address entry_point) {
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
-static const char* ComputeMarker(Code* code) {
- switch (code->kind()) {
- case Code::FUNCTION: return code->optimizable() ? "~" : "";
- case Code::OPTIMIZED_FUNCTION: return "*";
- default: return "";
- }
-}
-#endif
-
-
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
const char* comment) {
@@ -731,7 +711,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[tag]);
msg.AppendAddress(code->address());
- msg.Append(",%d,\"%s", code->ExecutableSize(), ComputeMarker(code));
+ msg.Append(",%d,\"", code->ExecutableSize());
for (const char* p = comment; *p != '\0'; p++) {
if (*p == '"') {
msg.Append('\\');
@@ -746,9 +726,40 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
}
-void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) {
+void Logger::CodeCreateEvent(LogEventsAndTags tag,
+ Code* code,
+ String* name) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (name != NULL) {
+ SmartPointer<char> str =
+ name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ CodeCreateEvent(tag, code, *str);
+ } else {
+ CodeCreateEvent(tag, code, "");
+ }
+#endif
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+// ComputeMarker must only be used when SharedFunctionInfo is known.
+static const char* ComputeMarker(Code* code) {
+ switch (code->kind()) {
+ case Code::FUNCTION: return code->optimizable() ? "~" : "";
+ case Code::OPTIMIZED_FUNCTION: return "*";
+ default: return "";
+ }
+}
+#endif
+
+
+void Logger::CodeCreateEvent(LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ String* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
+ if (code == Builtins::builtin(Builtins::LazyCompile)) return;
LogMessageBuilder msg;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -756,7 +767,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) {
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[tag]);
msg.AppendAddress(code->address());
- msg.Append(",%d,\"%s%s\"", code->ExecutableSize(), ComputeMarker(code), *str);
+ msg.Append(",%d,\"%s\",", code->ExecutableSize(), *str);
+ msg.AppendAddress(shared->address());
+ msg.Append(",%s", ComputeMarker(code));
LowLevelCodeCreateEvent(code, &msg);
msg.Append('\n');
msg.WriteToLogFile();
@@ -764,26 +777,31 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) {
}
+// Although, it is possible to extract source and line from
+// the SharedFunctionInfo object, we left it to caller
+// to leave logging functions free from heap allocations.
void Logger::CodeCreateEvent(LogEventsAndTags tag,
- Code* code, String* name,
+ Code* code,
+ SharedFunctionInfo* shared,
String* source, int line) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
- SmartPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ SmartPointer<char> name =
+ shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartPointer<char> sourcestr =
source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[tag]);
msg.AppendAddress(code->address());
- msg.Append(",%d,\"%s%s %s:%d\"",
+ msg.Append(",%d,\"%s %s:%d\",",
code->ExecutableSize(),
- ComputeMarker(code),
- *str,
+ *name,
*sourcestr,
line);
+ msg.AppendAddress(shared->address());
+ msg.Append(",%s", ComputeMarker(code));
LowLevelCodeCreateEvent(code, &msg);
msg.Append('\n');
msg.WriteToLogFile();
@@ -863,42 +881,9 @@ void Logger::SnapshotPositionEvent(Address addr, int pos) {
}
-void Logger::FunctionCreateEvent(JSFunction* function) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // This function can be called from GC iterators (during Scavenge,
- // MC, and MS), so marking bits can be set on objects. That's
- // why unchecked accessors are used here.
- if (!Log::IsEnabled() || !FLAG_log_code) return;
- LogMessageBuilder msg;
- msg.Append("%s,", kLogEventsNames[FUNCTION_CREATION_EVENT]);
- msg.AppendAddress(function->address());
- msg.Append(',');
- msg.AppendAddress(function->unchecked_code()->address());
- msg.Append('\n');
- msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::FunctionCreateEventFromMove(JSFunction* function) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile)) {
- FunctionCreateEvent(function);
- }
-#endif
-}
-
-
-void Logger::FunctionMoveEvent(Address from, Address to) {
+void Logger::SFIMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
- MoveEventInternal(FUNCTION_MOVE_EVENT, from, to);
-#endif
-}
-
-
-void Logger::FunctionDeleteEvent(Address from) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- DeleteEventInternal(FUNCTION_DELETE_EVENT, from);
+ MoveEventInternal(SFI_MOVE_EVENT, from, to);
#endif
}
@@ -1118,7 +1103,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
msg.Append(',');
msg.AppendAddress(sample->sp);
msg.Append(',');
- msg.AppendAddress(sample->function);
+ msg.AppendAddress(sample->tos);
msg.Append(",%d", static_cast<int>(sample->state));
if (overflow) {
msg.Append(",overflow");
@@ -1187,7 +1172,6 @@ void Logger::ResumeProfiler(int flags, int tag) {
LOG(UncheckedStringEvent("profiler", "resume"));
FLAG_log_code = true;
LogCompiledFunctions();
- LogFunctionObjects();
LogAccessorCallbacks();
if (!FLAG_sliding_state_window && !ticker_->IsActive()) {
ticker_->Start();
@@ -1388,10 +1372,9 @@ void Logger::LogCompiledFunctions() {
// During iteration, there can be heap allocation due to
// GetScriptLineNumber call.
for (int i = 0; i < compiled_funcs_count; ++i) {
+ if (*code_objects[i] == Builtins::builtin(Builtins::LazyCompile)) continue;
Handle<SharedFunctionInfo> shared = sfis[i];
- Handle<String> name(String::cast(shared->name()));
- Handle<String> func_name(name->length() > 0 ?
- *name : shared->inferred_name());
+ Handle<String> func_name(shared->DebugName());
if (shared->script()->IsScript()) {
Handle<Script> script(Script::cast(shared->script()));
if (script->name()->IsString()) {
@@ -1400,18 +1383,18 @@ void Logger::LogCompiledFunctions() {
if (line_num > 0) {
PROFILE(CodeCreateEvent(
Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code_objects[i], *func_name,
+ *code_objects[i], *shared,
*script_name, line_num + 1));
} else {
// Can't distinguish eval and script here, so always use Script.
PROFILE(CodeCreateEvent(
Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *code_objects[i], *script_name));
+ *code_objects[i], *shared, *script_name));
}
} else {
PROFILE(CodeCreateEvent(
Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code_objects[i], *func_name));
+ *code_objects[i], *shared, *func_name));
}
} else if (shared->IsApiFunction()) {
// API function.
@@ -1425,24 +1408,12 @@ void Logger::LogCompiledFunctions() {
}
} else {
PROFILE(CodeCreateEvent(
- Logger::LAZY_COMPILE_TAG, *code_objects[i], *func_name));
+ Logger::LAZY_COMPILE_TAG, *code_objects[i], *shared, *func_name));
}
}
}
-void Logger::LogFunctionObjects() {
- AssertNoAllocation no_alloc;
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (!obj->IsJSFunction()) continue;
- JSFunction* jsf = JSFunction::cast(obj);
- if (!jsf->is_compiled()) continue;
- PROFILE(FunctionCreateEvent(jsf));
- }
-}
-
-
void Logger::LogAccessorCallbacks() {
AssertNoAllocation no_alloc;
HeapIterator iterator;
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 771709c8a..a808cd1d4 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -91,9 +91,7 @@ class LogMessageBuilder;
V(CODE_MOVE_EVENT, "code-move") \
V(CODE_DELETE_EVENT, "code-delete") \
V(CODE_MOVING_GC, "code-moving-gc") \
- V(FUNCTION_CREATION_EVENT, "function-creation") \
- V(FUNCTION_MOVE_EVENT, "function-move") \
- V(FUNCTION_DELETE_EVENT, "function-delete") \
+ V(SFI_MOVE_EVENT, "sfi-move") \
V(SNAPSHOT_POSITION_EVENT, "snapshot-pos") \
V(TICK_EVENT, "tick") \
V(REPEAT_META_EVENT, "repeat") \
@@ -205,8 +203,15 @@ class Logger {
// Emits a code create event.
static void CodeCreateEvent(LogEventsAndTags tag,
Code* code, const char* source);
- static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name);
- static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name,
+ static void CodeCreateEvent(LogEventsAndTags tag,
+ Code* code, String* name);
+ static void CodeCreateEvent(LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
+ String* name);
+ static void CodeCreateEvent(LogEventsAndTags tag,
+ Code* code,
+ SharedFunctionInfo* shared,
String* source, int line);
static void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
static void CodeMovingGCEvent();
@@ -216,13 +221,8 @@ class Logger {
static void CodeMoveEvent(Address from, Address to);
// Emits a code delete event.
static void CodeDeleteEvent(Address from);
- // Emits a function object create event.
- static void FunctionCreateEvent(JSFunction* function);
- static void FunctionCreateEventFromMove(JSFunction* function);
- // Emits a function move event.
- static void FunctionMoveEvent(Address from, Address to);
- // Emits a function delete event.
- static void FunctionDeleteEvent(Address from);
+
+ static void SFIMoveEvent(Address from, Address to);
static void SnapshotPositionEvent(Address addr, int pos);
@@ -273,8 +273,6 @@ class Logger {
// Logs all compiled functions found in the heap.
static void LogCompiledFunctions();
- // Logs all compiled JSFunction objects found in the heap.
- static void LogFunctionObjects();
// Logs all accessor callbacks found in the heap.
static void LogAccessorCallbacks();
// Used for logging stubs found in the snapshot.
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 5c649d17d..a3b769a8b 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -2819,9 +2819,8 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
- if (copied_to->IsJSFunction()) {
- PROFILE(FunctionMoveEvent(old_addr, new_addr));
- PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to)));
+ if (copied_to->IsSharedFunctionInfo()) {
+ PROFILE(SFIMoveEvent(old_addr, new_addr));
}
HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
@@ -2912,9 +2911,8 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
#endif
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
- if (copied_to->IsJSFunction()) {
- PROFILE(FunctionMoveEvent(old_addr, new_addr));
- PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to)));
+ if (copied_to->IsSharedFunctionInfo()) {
+ PROFILE(SFIMoveEvent(old_addr, new_addr));
}
HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
@@ -2931,8 +2929,6 @@ void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (obj->IsCode()) {
PROFILE(CodeDeleteEvent(obj->address()));
- } else if (obj->IsJSFunction()) {
- PROFILE(FunctionDeleteEvent(obj->address()));
}
#endif
}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index e0232d587..2d100529e 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -2813,6 +2813,12 @@ bool JSObject::ReferencesObject(Object* obj) {
MaybeObject* JSObject::PreventExtensions() {
+ if (IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(this, Heap::undefined_value(), v8::ACCESS_KEYS)) {
+ Top::ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
+ return Heap::false_value();
+ }
+
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return this;
@@ -6660,7 +6666,6 @@ bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
break;
}
case PIXEL_ELEMENTS: {
- // TODO(iposva): Add testcase.
PixelArray* pixels = PixelArray::cast(elements());
if (index < static_cast<uint32_t>(pixels->length())) {
return true;
@@ -6674,7 +6679,6 @@ bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: {
- // TODO(kbr): Add testcase.
ExternalArray* array = ExternalArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
return true;
@@ -7265,11 +7269,7 @@ MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver,
}
break;
}
- case PIXEL_ELEMENTS: {
- // TODO(iposva): Add testcase and implement.
- UNIMPLEMENTED();
- break;
- }
+ case PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
@@ -7277,8 +7277,8 @@ MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver,
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: {
- // TODO(kbr): Add testcase and implement.
- UNIMPLEMENTED();
+ MaybeObject* value = GetExternalElement(index);
+ if (!value->ToObjectUnchecked()->IsUndefined()) return value;
break;
}
case DICTIONARY_ELEMENTS: {
@@ -7366,6 +7366,46 @@ MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
}
break;
}
+ case PIXEL_ELEMENTS:
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS: {
+ MaybeObject* value = GetExternalElement(index);
+ if (!value->ToObjectUnchecked()->IsUndefined()) return value;
+ break;
+ }
+ case DICTIONARY_ELEMENTS: {
+ NumberDictionary* dictionary = element_dictionary();
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
+ Object* element = dictionary->ValueAt(entry);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ if (details.type() == CALLBACKS) {
+ return GetElementWithCallback(receiver,
+ element,
+ index,
+ this);
+ }
+ return element;
+ }
+ break;
+ }
+ }
+
+ Object* pt = GetPrototype();
+ if (pt == Heap::null_value()) return Heap::undefined_value();
+ return pt->GetElementWithReceiver(receiver, index);
+}
+
+
+MaybeObject* JSObject::GetExternalElement(uint32_t index) {
+ // Get element works for both JSObject and JSArray since
+ // JSArray::length cannot change.
+ switch (GetElementsKind()) {
case PIXEL_ELEMENTS: {
PixelArray* pixels = PixelArray::cast(elements());
if (index < static_cast<uint32_t>(pixels->length())) {
@@ -7433,27 +7473,12 @@ MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
}
break;
}
- case DICTIONARY_ELEMENTS: {
- NumberDictionary* dictionary = element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS) {
- return GetElementWithCallback(receiver,
- element,
- index,
- this);
- }
- return element;
- }
+ case FAST_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ UNREACHABLE();
break;
- }
}
-
- Object* pt = GetPrototype();
- if (pt == Heap::null_value()) return Heap::undefined_value();
- return pt->GetElementWithReceiver(receiver, index);
+ return Heap::undefined_value();
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index d6349e66e..fbfc5fdc2 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -1549,6 +1549,11 @@ class JSObject: public HeapObject {
MaybeObject* GetElementWithReceiver(Object* receiver, uint32_t index);
MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index);
+ // Get external element value at index if there is one and undefined
+ // otherwise. Can return a failure if allocation of a heap number
+ // failed.
+ MaybeObject* GetExternalElement(uint32_t index);
+
MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity,
int length);
MUST_USE_RESULT MaybeObject* SetSlowElements(Object* length);
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
index f5fc5be07..a7cc5256f 100644
--- a/deps/v8/src/platform-cygwin.cc
+++ b/deps/v8/src/platform-cygwin.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -71,7 +71,7 @@ void OS::Setup() {
uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // Nothing special about cygwin
+ return 0; // Nothing special about Cygwin.
}
@@ -209,7 +209,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "w+");
+ FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
@@ -304,12 +304,12 @@ void OS::LogSharedLibraryAddresses() {
void OS::SignalCodeMovingGC() {
- // Nothing to do on Cygwin
+ // Nothing to do on Cygwin.
}
int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // Not supported on Cygwin
+ // Not supported on Cygwin.
return 0;
}
@@ -443,17 +443,36 @@ void Thread::Join() {
}
+static inline Thread::LocalStorageKey PthreadKeyToLocalKey(
+ pthread_key_t pthread_key) {
+ // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
+ // because pthread_key_t is a pointer type on Cygwin. This will probably not
+ // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
+ STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
+ intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
+ return static_cast<Thread::LocalStorageKey>(ptr_key);
+}
+
+
+static inline pthread_key_t LocalKeyToPthreadKey(
+ Thread::LocalStorageKey local_key) {
+ STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
+ intptr_t ptr_key = static_cast<intptr_t>(local_key);
+ return reinterpret_cast<pthread_key_t>(ptr_key);
+}
+
+
Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
pthread_key_t key;
int result = pthread_key_create(&key, NULL);
USE(result);
ASSERT(result == 0);
- return static_cast<LocalStorageKey>(key);
+ return PthreadKeyToLocalKey(key);
}
void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
int result = pthread_key_delete(pthread_key);
USE(result);
ASSERT(result == 0);
@@ -461,13 +480,13 @@ void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
void* Thread::GetThreadLocal(LocalStorageKey key) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
return pthread_getspecific(pthread_key);
}
void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
- pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
pthread_setspecific(pthread_key, value);
}
@@ -594,7 +613,7 @@ Semaphore* OS::CreateSemaphore(int count) {
// ----------------------------------------------------------------------------
// Cygwin profiler support.
//
-// On cygwin we use the same sampler implementation as on win32
+// On Cygwin we use the same sampler implementation as on win32.
class Sampler::PlatformData : public Malloced {
public:
@@ -698,8 +717,7 @@ void Sampler::Start() {
// Start sampler thread.
DWORD tid;
SetActive(true);
- data_->sampler_thread_ = CreateThread(NULL, 0, SamplerEntry, data_, 0,
- &tid);
+ data_->sampler_thread_ = CreateThread(NULL, 0, SamplerEntry, data_, 0, &tid);
// Set thread to high priority to increase sampling accuracy.
SetThreadPriority(data_->sampler_thread_, THREAD_PRIORITY_TIME_CRITICAL);
}
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index c18049fec..21763b5de 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -224,7 +224,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "w+");
+ FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index 761ff7e20..733956ace 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -327,7 +327,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "w+");
+ FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index ea35c1b13..35724c352 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -205,7 +205,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "w+");
+ FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index 0002dd762..e2796294a 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -222,7 +222,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "w+");
+ FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index 556e26be2..ebe0475f4 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -235,7 +235,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
- FILE* file = fopen(name, "w+");
+ FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index b5a85f668..f24994b5b 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -939,7 +939,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
// Open a physical file
HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
- if (file == NULL) return NULL;
+ if (file == INVALID_HANDLE_VALUE) return NULL;
int size = static_cast<int>(GetFileSize(file, NULL));
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index 8cb1561c5..88825e645 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -383,14 +383,10 @@ class Thread: public ThreadHandle {
// LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified
// to ensure that enumeration type has correct value range (see Issue 830 for
// more details).
-#ifdef __CYGWIN__
- typedef void* LocalStorageKey;
-#else
enum LocalStorageKey {
LOCAL_STORAGE_KEY_MIN_VALUE = kMinInt,
LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
};
-#endif
// Create new thread.
Thread();
@@ -571,13 +567,13 @@ class TickSample {
pc(NULL),
sp(NULL),
fp(NULL),
- function(NULL),
+ tos(NULL),
frames_count(0) {}
StateTag state; // The state of the VM.
- Address pc; // Instruction pointer.
- Address sp; // Stack pointer.
- Address fp; // Frame pointer.
- Address function; // The last called JS function.
+ Address pc; // Instruction pointer.
+ Address sp; // Stack pointer.
+ Address fp; // Frame pointer.
+ Address tos; // Top stack value (*sp).
static const int kMaxFramesCount = 64;
Address stack[kMaxFramesCount]; // Call stack.
int frames_count; // Number of captured frames.
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index 3df6af06f..4bcfa9b1e 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -45,16 +45,6 @@ const char* StringsStorage::GetFunctionName(const char* name) {
}
-CodeEntry::CodeEntry(int security_token_id)
- : tag_(Logger::FUNCTION_TAG),
- name_prefix_(kEmptyNamePrefix),
- name_(""),
- resource_name_(""),
- line_number_(0),
- security_token_id_(security_token_id) {
-}
-
-
CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
const char* name_prefix,
const char* name,
@@ -66,6 +56,7 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
name_(name),
resource_name_(resource_name),
line_number_(line_number),
+ shared_id_(0),
security_token_id_(security_token_id) {
}
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index 06ee333b9..261b3d6ff 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -156,13 +156,18 @@ void CodeEntry::CopyData(const CodeEntry& source) {
uint32_t CodeEntry::GetCallUid() const {
uint32_t hash = ComputeIntegerHash(tag_);
- hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)));
- hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)));
- hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)));
- hash ^= ComputeIntegerHash(line_number_);
+ if (shared_id_ != 0) {
+ hash ^= ComputeIntegerHash(
+ static_cast<uint32_t>(shared_id_));
+ } else {
+ hash ^= ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)));
+ hash ^= ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)));
+ hash ^= ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)));
+ hash ^= ComputeIntegerHash(line_number_);
+ }
return hash;
}
@@ -170,10 +175,12 @@ uint32_t CodeEntry::GetCallUid() const {
bool CodeEntry::IsSameAs(CodeEntry* entry) const {
return this == entry
|| (tag_ == entry->tag_
- && name_prefix_ == entry->name_prefix_
- && name_ == entry->name_
- && resource_name_ == entry->resource_name_
- && line_number_ == entry->line_number_);
+ && shared_id_ == entry->shared_id_
+ && (shared_id_ != 0
+ || (name_prefix_ == entry->name_prefix_
+ && name_ == entry->name_
+ && resource_name_ == entry->resource_name_
+ && line_number_ == entry->line_number_)));
}
@@ -458,23 +465,12 @@ void CpuProfile::Print() {
}
+CodeEntry* const CodeMap::kSfiCodeEntry = NULL;
const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
const CodeMap::CodeTreeConfig::Value CodeMap::CodeTreeConfig::kNoValue =
CodeMap::CodeEntryInfo(NULL, 0);
-void CodeMap::AddAlias(Address start, CodeEntry* entry, Address code_start) {
- CodeTree::Locator locator;
- if (tree_.Find(code_start, &locator)) {
- const CodeEntryInfo& code_info = locator.value();
- if (tree_.Insert(start, &locator)) {
- entry->CopyData(*code_info.entry);
- locator.set_value(CodeEntryInfo(entry, code_info.size));
- }
- }
-}
-
-
CodeEntry* CodeMap::FindEntry(Address addr) {
CodeTree::Locator locator;
if (tree_.FindGreatestLessThan(addr, &locator)) {
@@ -487,6 +483,22 @@ CodeEntry* CodeMap::FindEntry(Address addr) {
}
+int CodeMap::GetSFITag(Address addr) {
+ CodeTree::Locator locator;
+ // For SFI entries, 'size' field is used to store their IDs.
+ if (tree_.Find(addr, &locator)) {
+ const CodeEntryInfo& entry = locator.value();
+ ASSERT(entry.entry == kSfiCodeEntry);
+ return entry.size;
+ } else {
+ tree_.Insert(addr, &locator);
+ int tag = next_sfi_tag_++;
+ locator.set_value(CodeEntryInfo(kSfiCodeEntry, tag));
+ return tag;
+ }
+}
+
+
void CodeMap::CodeTreePrinter::Call(
const Address& key, const CodeMap::CodeEntryInfo& value) {
OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
@@ -715,13 +727,6 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
}
-CodeEntry* CpuProfilesCollection::NewCodeEntry(int security_token_id) {
- CodeEntry* entry = new CodeEntry(security_token_id);
- code_entries_.Add(entry);
- return entry;
-}
-
-
void CpuProfilesCollection::AddPathToCurrentProfiles(
const Vector<CodeEntry*>& path) {
// As starting / stopping profiles is rare relatively to this
@@ -784,19 +789,10 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
if (sample.pc != NULL) {
*entry++ = code_map_.FindEntry(sample.pc);
- if (sample.function != NULL) {
- *entry = code_map_.FindEntry(sample.function);
+ if (sample.tos != NULL) {
+ *entry = code_map_.FindEntry(sample.tos);
if (*entry != NULL && !(*entry)->is_js_function()) {
*entry = NULL;
- } else {
- CodeEntry* pc_entry = *entries.start();
- if (pc_entry == NULL) {
- *entry = NULL;
- } else if (pc_entry->is_js_function()) {
- // Use function entry in favor of pc entry, as function
- // entry has security token.
- *entries.start() = NULL;
- }
}
entry++;
}
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index cacd27eaa..748714dc4 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -88,7 +88,6 @@ class StringsStorage {
class CodeEntry {
public:
- explicit INLINE(CodeEntry(int security_token_id));
// CodeEntry doesn't own name strings, just references them.
INLINE(CodeEntry(Logger::LogEventsAndTags tag,
const char* name_prefix,
@@ -103,6 +102,8 @@ class CodeEntry {
INLINE(const char* name() const) { return name_; }
INLINE(const char* resource_name() const) { return resource_name_; }
INLINE(int line_number() const) { return line_number_; }
+ INLINE(int shared_id() const) { return shared_id_; }
+ INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; }
INLINE(int security_token_id() const) { return security_token_id_; }
INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
@@ -119,6 +120,7 @@ class CodeEntry {
const char* name_;
const char* resource_name_;
int line_number_;
+ int shared_id_;
int security_token_id_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
@@ -234,12 +236,12 @@ class CpuProfile {
class CodeMap {
public:
- CodeMap() { }
+ CodeMap() : next_sfi_tag_(1) { }
INLINE(void AddCode(Address addr, CodeEntry* entry, unsigned size));
INLINE(void MoveCode(Address from, Address to));
INLINE(void DeleteCode(Address addr));
- void AddAlias(Address start, CodeEntry* entry, Address code_start);
CodeEntry* FindEntry(Address addr);
+ int GetSFITag(Address addr);
void Print();
@@ -267,7 +269,11 @@ class CodeMap {
void Call(const Address& key, const CodeEntryInfo& value);
};
+ // Fake CodeEntry pointer to distinguish SFI entries.
+ static CodeEntry* const kSfiCodeEntry;
+
CodeTree tree_;
+ int next_sfi_tag_;
DISALLOW_COPY_AND_ASSIGN(CodeMap);
};
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 48ff69f5d..5a443efc3 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -1051,6 +1051,12 @@ static MaybeObject* Runtime_DeclareGlobals(Arguments args) {
// Fall-through and introduce the absent property by using
// SetProperty.
} else {
+ // For const properties, we treat a callback with this name
+ // even in the prototype as a conflicting declaration.
+ if (is_const_property && (lookup.type() == CALLBACKS)) {
+ return ThrowRedeclarationError("const", name);
+ }
+ // Otherwise, we check for locally conflicting declarations.
if (is_local && (is_read_only || is_const_property)) {
const char* type = (is_read_only) ? "const" : "var";
return ThrowRedeclarationError(type, name);
@@ -1076,29 +1082,34 @@ static MaybeObject* Runtime_DeclareGlobals(Arguments args) {
? static_cast<PropertyAttributes>(base | READ_ONLY)
: base;
- if (lookup.IsProperty()) {
- // There's a local property that we need to overwrite because
- // we're either declaring a function or there's an interceptor
- // that claims the property is absent.
-
- // Check for conflicting re-declarations. We cannot have
- // conflicting types in case of intercepted properties because
- // they are absent.
- if (lookup.type() != INTERCEPTOR &&
- (lookup.IsReadOnly() || is_const_property)) {
- const char* type = (lookup.IsReadOnly()) ? "const" : "var";
- return ThrowRedeclarationError(type, name);
- }
- RETURN_IF_EMPTY_HANDLE(SetProperty(global, name, value, attributes));
+ // There's a local property that we need to overwrite because
+ // we're either declaring a function or there's an interceptor
+ // that claims the property is absent.
+ //
+ // Check for conflicting re-declarations. We cannot have
+ // conflicting types in case of intercepted properties because
+ // they are absent.
+ if (lookup.IsProperty() &&
+ (lookup.type() != INTERCEPTOR) &&
+ (lookup.IsReadOnly() || is_const_property)) {
+ const char* type = (lookup.IsReadOnly()) ? "const" : "var";
+ return ThrowRedeclarationError(type, name);
+ }
+
+ // Safari does not allow the invocation of callback setters for
+ // function declarations. To mimic this behavior, we do not allow
+ // the invocation of setters for function values. This makes a
+ // difference for global functions with the same names as event
+ // handlers such as "function onload() {}". Firefox does call the
+ // onload setter in those case and Safari does not. We follow
+ // Safari for compatibility.
+ if (value->IsJSFunction()) {
+ RETURN_IF_EMPTY_HANDLE(SetLocalPropertyIgnoreAttributes(global,
+ name,
+ value,
+ attributes));
} else {
- // If a property with this name does not already exist on the
- // global object add the property locally. We take special
- // precautions to always add it as a local property even in case
- // of callbacks in the prototype chain (this rules out using
- // SetProperty). Also, we must use the handle-based version to
- // avoid GC issues.
- RETURN_IF_EMPTY_HANDLE(
- SetLocalPropertyIgnoreAttributes(global, name, value, attributes));
+ RETURN_IF_EMPTY_HANDLE(SetProperty(global, name, value, attributes));
}
}
@@ -1186,6 +1197,20 @@ static MaybeObject* Runtime_DeclareContextSlot(Arguments args) {
ASSERT(!context_ext->HasLocalProperty(*name));
Handle<Object> value(Heap::undefined_value());
if (*initial_value != NULL) value = initial_value;
+ // Declaring a const context slot is a conflicting declaration if
+ // there is a callback with that name in a prototype. It is
+ // allowed to introduce const variables in
+ // JSContextExtensionObjects. They are treated specially in
+ // SetProperty and no setters are invoked for those since they are
+ // not real JSObjects.
+ if (initial_value->IsTheHole() &&
+ !context_ext->IsJSContextExtensionObject()) {
+ LookupResult lookup;
+ context_ext->Lookup(*name, &lookup);
+ if (lookup.IsProperty() && (lookup.type() == CALLBACKS)) {
+ return ThrowRedeclarationError("const", name);
+ }
+ }
RETURN_IF_EMPTY_HANDLE(SetProperty(context_ext, name, value, mode));
}
@@ -1212,11 +1237,7 @@ static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) {
// there, there is a property with this name in the prototype chain.
// We follow Safari and Firefox behavior and only set the property
// locally if there is an explicit initialization value that we have
- // to assign to the property. When adding the property we take
- // special precautions to always add it as a local property even in
- // case of callbacks in the prototype chain (this rules out using
- // SetProperty). We have SetLocalPropertyIgnoreAttributes for
- // this.
+ // to assign to the property.
// Note that objects can have hidden prototypes, so we need to traverse
// the whole chain of hidden prototypes to do a 'local' lookup.
JSObject* real_holder = global;
@@ -1277,11 +1298,7 @@ static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) {
}
global = Top::context()->global();
- if (assign) {
- return global->SetLocalPropertyIgnoreAttributes(*name,
- args[1],
- attributes);
- }
+ if (assign) return global->SetProperty(*name, args[1], attributes);
return Heap::undefined_value();
}
@@ -3673,6 +3690,8 @@ static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) {
is_element) {
// Normalize the elements to enable attributes on the property.
if (js_object->IsJSGlobalProxy()) {
+ // We do not need to do access checks here since these has already
+ // been performed by the call to GetOwnProperty.
Handle<Object> proto(js_object->GetPrototype());
// If proxy is detached, ignore the assignment. Alternatively,
// we could throw an exception.
@@ -6927,6 +6946,7 @@ static MaybeObject* Runtime_NewObject(Arguments args) {
bool first_allocation = !shared->live_objects_may_exist();
Handle<JSObject> result = Factory::NewJSObject(function);
+ RETURN_IF_EMPTY_HANDLE(result);
// Delay setting the stub if inobject slack tracking is in progress.
if (first_allocation && !shared->IsInobjectSlackTrackingInProgress()) {
TrySettingInlineConstructStub(function);
diff --git a/deps/v8/src/top.cc b/deps/v8/src/top.cc
index 83d7de3af..78db26a50 100644
--- a/deps/v8/src/top.cc
+++ b/deps/v8/src/top.cc
@@ -735,9 +735,8 @@ Failure* Top::ReThrow(MaybeObject* exception, MessageLocation* location) {
bool can_be_caught_externally = false;
ShouldReportException(&can_be_caught_externally,
is_catchable_by_javascript(exception));
- if (can_be_caught_externally) {
- thread_local_.catcher_ = try_catch_handler();
- }
+ thread_local_.catcher_ = can_be_caught_externally ?
+ try_catch_handler() : NULL;
// Set the exception being re-thrown.
set_pending_exception(exception);
@@ -913,9 +912,10 @@ void Top::DoThrow(MaybeObject* exception,
}
}
- if (can_be_caught_externally) {
- thread_local_.catcher_ = try_catch_handler();
- }
+ // Do not forget to clean catcher_ if currently thrown exception cannot
+ // be caught. If necessary, ReThrow will update the catcher.
+ thread_local_.catcher_ = can_be_caught_externally ?
+ try_catch_handler() : NULL;
// NOTE: Notifying the debugger or generating the message
// may have caused new exceptions. For now, we just ignore
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 0ff764934..945043da9 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -54,7 +54,12 @@ bool V8::Initialize(Deserializer* des) {
if (has_been_disposed_ || has_fatal_error_) return false;
if (IsRunning()) return true;
+#if defined(V8_TARGET_ARCH_ARM) && !defined(USE_ARM_EABI)
+ use_crankshaft_ = false;
+#else
use_crankshaft_ = FLAG_crankshaft;
+#endif
+
// Peephole optimization might interfere with deoptimization.
FLAG_peephole_optimization = !use_crankshaft_;
is_running_ = true;
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 884b6f414..91e19c13d 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -92,7 +92,7 @@ function GlobalIsFinite(number) {
// ECMA-262 - 15.1.2.2
function GlobalParseInt(string, radix) {
- if (IS_UNDEFINED(radix)) {
+ if (IS_UNDEFINED(radix) || radix === 10 || radix === 0) {
// Some people use parseInt instead of Math.floor. This
// optimization makes parseInt on a Smi 12 times faster (60ns
// vs 800ns). The following optimization makes parseInt on a
@@ -105,7 +105,7 @@ function GlobalParseInt(string, radix) {
// Truncate number.
return string | 0;
}
- radix = 0;
+ if (IS_UNDEFINED(radix)) radix = 0;
} else {
radix = TO_INT32(radix);
if (!(radix == 0 || (2 <= radix && radix <= 36)))
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 8c233375e..59824863d 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 1
-#define BUILD_NUMBER 5
+#define BUILD_NUMBER 6
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 697f6cd40..ea41a202d 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -190,13 +190,13 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// -----------------------------------------------------------------------------
// Register constants.
-const int Register::registerCodeByAllocationIndex[kNumAllocatableRegisters] = {
- // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r12
- 0, 3, 2, 1, 7, 8, 9, 11, 14, 12
+const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = {
+ // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r12
+ 0, 3, 2, 1, 7, 8, 9, 11, 14, 12
};
-const int Register::allocationIndexByRegisterCode[kNumRegisters] = {
- 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, 9, -1, 8, -1
+const int Register::kAllocationIndexByRegisterCode[kNumRegisters] = {
+ 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, 9, -1, 8, -1
};
@@ -3114,8 +3114,8 @@ void Assembler::RecordDebugBreakSlot() {
}
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_code_comments) {
+void Assembler::RecordComment(const char* msg, bool force) {
+ if (FLAG_code_comments || force) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 91e7e6cc6..553fbe422 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -99,12 +99,12 @@ struct Register {
static const int kNumAllocatableRegisters = 10;
static int ToAllocationIndex(Register reg) {
- return allocationIndexByRegisterCode[reg.code()];
+ return kAllocationIndexByRegisterCode[reg.code()];
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- Register result = { registerCodeByAllocationIndex[index] };
+ Register result = { kRegisterCodeByAllocationIndex[index] };
return result;
}
@@ -155,8 +155,8 @@ struct Register {
int code_;
private:
- static const int registerCodeByAllocationIndex[kNumAllocatableRegisters];
- static const int allocationIndexByRegisterCode[kNumRegisters];
+ static const int kRegisterCodeByAllocationIndex[kNumAllocatableRegisters];
+ static const int kAllocationIndexByRegisterCode[kNumRegisters];
};
const Register rax = { 0 };
@@ -1312,7 +1312,7 @@ class Assembler : public Malloced {
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
- void RecordComment(const char* msg);
+ void RecordComment(const char* msg, bool force = false);
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 0cfe665ce..240087e17 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -1336,54 +1336,33 @@ void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- // Registers containing left and right operands respectively.
- Register lhs = rdx;
- Register rhs = rax;
-
- // Test for string arguments before calling runtime.
- Label not_strings, both_strings, not_string1, string1, string1_smi2;
-
- __ JumpIfNotString(lhs, r8, &not_string1);
-
- // First argument is a a string, test second.
- __ JumpIfSmi(rhs, &string1_smi2);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &string1);
-
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, rhs, rbx, rcx, r8, true, &string1);
+ ASSERT(op_ == Token::ADD);
+ NearLabel left_not_string, call_runtime;
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ movq(Operand(rsp, 1 * kPointerSize), rbx);
- __ TailCallStub(&string_add_stub);
+ // Registers containing left and right operands respectively.
+ Register left = rdx;
+ Register right = rax;
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+ // Test if left operand is a string.
+ __ JumpIfSmi(left, &left_not_string);
+ __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
+ __ j(above_equal, &left_not_string);
+ StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_left_stub);
- // First argument was not a string, test second.
- __ bind(&not_string1);
- __ JumpIfNotString(rhs, rhs, &not_strings);
+ // Left operand is not a string, test right.
+ __ bind(&left_not_string);
+ __ JumpIfSmi(right, &call_runtime);
+ __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
+ __ j(above_equal, &call_runtime);
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+ StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_right_stub);
- __ bind(&not_strings);
// Neither argument is a string.
- // Pop arguments, because CallRuntimeCode wants to push them again.
- __ pop(rcx);
- __ pop(rax);
- __ pop(rdx);
- __ push(rcx);
+ __ bind(&call_runtime);
}
@@ -1440,9 +1419,11 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == TRBinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
GenerateStringAddCode(masm);
-
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // TRBinaryOpIC type.
GenerateTypeTransition(masm);
}
@@ -3461,6 +3442,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// is and instance of the function and anything else to
// indicate that the value is not an instance.
+ // None of the flags are supported on X64.
+ ASSERT(flags_ == kNoFlags);
+
// Get the object - go slow case if it's a smi.
Label slow;
__ movq(rax, Operand(rsp, 2 * kPointerSize));
@@ -3536,10 +3520,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
-Register InstanceofStub::left() { return rax; }
+// Passing arguments in registers is not supported.
+Register InstanceofStub::left() { return no_reg; }
-Register InstanceofStub::right() { return rdx; }
+Register InstanceofStub::right() { return no_reg; }
int CompareStub::MinorKey() {
@@ -3798,14 +3783,15 @@ void StringCharAtGenerator::GenerateSlow(
void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime;
+ Label string_add_runtime, call_builtin;
+ Builtins::JavaScript builtin_id = Builtins::ADD;
// Load the two arguments.
- __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument.
+ __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left).
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right).
// Make sure that both arguments are strings if not known in advance.
- if (string_check_) {
+ if (flags_ == NO_STRING_ADD_FLAGS) {
Condition is_smi;
is_smi = masm->CheckSmi(rax);
__ j(is_smi, &string_add_runtime);
@@ -3817,6 +3803,20 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ j(is_smi, &string_add_runtime);
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
__ j(above_equal, &string_add_runtime);
+ } else {
+ // Here at least one of the arguments is definitely a string.
+ // We convert the one that is not known to be a string.
+ if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
+ GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
+ &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
+ GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
+ &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
+ }
}
// Both arguments are strings.
@@ -3844,14 +3844,14 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rbx: length of first string
// rcx: length of second string
// rdx: second string
- // r8: map of first string if string check was performed above
- // r9: map of second string if string check was performed above
+ // r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS)
+ // r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS)
Label string_add_flat_result, longer_than_two;
__ bind(&both_not_zero_length);
// If arguments where known to be strings, maps are not loaded to r8 and r9
// by the code above.
- if (!string_check_) {
+ if (flags_ != NO_STRING_ADD_FLAGS) {
__ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
__ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
}
@@ -4037,6 +4037,54 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&string_add_runtime);
__ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+
+ if (call_builtin.is_linked()) {
+ __ bind(&call_builtin);
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ }
+}
+
+
+void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* slow) {
+ // First check if the argument is already a string.
+ Label not_string, done;
+ __ JumpIfSmi(arg, &not_string);
+ __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
+ __ j(below, &done);
+
+ // Check the number to string cache.
+ Label not_cached;
+ __ bind(&not_string);
+ // Puts the cached result into scratch1.
+ NumberToStringStub::GenerateLookupNumberStringCache(masm,
+ arg,
+ scratch1,
+ scratch2,
+ scratch3,
+ false,
+ &not_cached);
+ __ movq(arg, scratch1);
+ __ movq(Operand(rsp, stack_offset), arg);
+ __ jmp(&done);
+
+ // Check if the argument is a safe string wrapper.
+ __ bind(&not_cached);
+ __ JumpIfSmi(arg, slow);
+ __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
+ __ j(not_equal, slow);
+ __ testb(FieldOperand(scratch1, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ j(zero, slow);
+ __ movq(arg, FieldOperand(arg, JSValue::kValueOffset));
+ __ movq(Operand(rsp, stack_offset), arg);
+
+ __ bind(&done);
}
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index 119b69930..1e6fc6514 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -360,24 +360,35 @@ class StringHelper : public AllStatic {
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
- NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+ // Omit left string check in stub (left is definitely a string).
+ NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+ // Omit right string check in stub (right is definitely a string).
+ NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+ // Omit both string checks in stub.
+ NO_STRING_CHECK_IN_STUB =
+ NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
};
class StringAddStub: public CodeStub {
public:
- explicit StringAddStub(StringAddFlags flags) {
- string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
- }
+ explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
private:
Major MajorKey() { return StringAdd; }
- int MinorKey() { return string_check_ ? 0 : 1; }
+ int MinorKey() { return flags_; }
void Generate(MacroAssembler* masm);
- // Should the stub check whether arguments are strings?
- bool string_check_;
+ void GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* slow);
+
+ const StringAddFlags flags_;
};
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 150ed664b..c07bcf904 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -7239,8 +7239,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
if (variable != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
- // so this code can only be reached in non-strict mode.
- ASSERT(strict_mode_flag() == kNonStrictMode);
+ // but "delete this" is.
+ ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
@@ -7249,7 +7249,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
CALL_FUNCTION, 3);
frame_->Push(&answer);
- return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// Call the runtime to delete from the context holding the named
@@ -7260,13 +7259,11 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(variable->name());
Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
frame_->Push(&answer);
- return;
+ } else {
+ // Default: Result of deleting non-global, not dynamically
+ // introduced variables is false.
+ frame_->Push(Factory::false_value());
}
-
- // Default: Result of deleting non-global, not dynamically
- // introduced variables is false.
- frame_->Push(Factory::false_value());
-
} else {
// Default: Result of deleting expressions is true.
Load(node->expression()); // may have side-effects
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 7d6e6d852..595dedc47 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -358,14 +358,16 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
fp_value, output_offset, value);
}
- // The context can be gotten from the function so long as we don't
- // optimize functions that need local contexts.
+ // For the bottommost output frame the context can be gotten from the input
+ // frame. For all subsequent output frames it can be gotten from the function
+ // so long as we don't inline functions that need local contexts.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function->context());
- // The context for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = reinterpret_cast<intptr_t>(function->context());
+ }
output_frame->SetFrameSlot(output_offset, value);
if (is_topmost) output_frame->SetRegister(rsi.code(), value);
if (FLAG_trace_deopt) {
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index a28bcb79f..8711f4238 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -3075,8 +3075,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
} else if (var != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
- // so this code can only be reached in non-strict mode.
- ASSERT(strict_mode_flag() == kNonStrictMode);
+ // but "delete this" is.
+ ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
if (var->is_global()) {
__ push(GlobalObjectOperand());
__ Push(var->name());
@@ -3114,16 +3114,22 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ // Notice that the labels are swapped.
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
+ if (context()->IsTest()) ForwardBailoutToChild(expr);
+ VisitForControl(expr->expression(), if_true, if_false, fall_through);
+ context()->Plug(if_false, if_true); // Labels swapped.
+ }
break;
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index e1ebb3eac..e6904b4da 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -37,6 +37,37 @@ namespace v8 {
namespace internal {
+// When invoking builtins, we need to record the safepoint in the middle of
+// the invoke instruction sequence generated by the macro assembler.
+class SafepointGenerator : public PostCallGenerator {
+ public:
+ SafepointGenerator(LCodeGen* codegen,
+ LPointerMap* pointers,
+ int deoptimization_index,
+ bool ensure_reloc_space = false)
+ : codegen_(codegen),
+ pointers_(pointers),
+ deoptimization_index_(deoptimization_index),
+ ensure_reloc_space_(ensure_reloc_space) { }
+ virtual ~SafepointGenerator() { }
+
+ virtual void Generate() {
+ // Ensure that we have enough space in the reloc info to patch
+ // this with calls when doing deoptimization.
+ if (ensure_reloc_space_) {
+ codegen_->masm()->RecordComment(RelocInfo::kFillerCommentString, true);
+ }
+ codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+ }
+
+ private:
+ LCodeGen* codegen_;
+ LPointerMap* pointers_;
+ int deoptimization_index_;
+ bool ensure_reloc_space_;
+};
+
+
#define __ masm()->
bool LCodeGen::GenerateCode() {
@@ -132,6 +163,45 @@ bool LCodeGen::GeneratePrologue() {
}
}
+ // Possibly allocate a local context.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ // Argument to NewContext is the function, which is still in rdi.
+ __ push(rdi);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+ // Context is returned in both rax and rsi. It replaces the context
+ // passed to us. It's saved in the stack and kept live in rsi.
+ __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = scope()->parameter(i)->AsSlot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ movq(rax, Operand(rbp, parameter_offset));
+ // Store it in the context.
+ int context_offset = Context::SlotOffset(slot->index());
+ __ movq(Operand(rsi, context_offset), rax);
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering rsi.
+ __ movq(rcx, rsi);
+ __ RecordWrite(rcx, context_offset, rax, rbx);
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
// Trace the call.
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
@@ -252,8 +322,7 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const {
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
Handle<Object> literal = chunk_->LookupLiteral(op);
- Representation r = chunk_->LookupLiteralRepresentation(op);
- ASSERT(r.IsTagged());
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
return literal;
}
@@ -539,6 +608,12 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
}
+void LCodeGen::RecordSafepoint(int deoptimization_index) {
+ LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ RecordSafepoint(&empty_pointers, deoptimization_index);
+}
+
+
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
@@ -652,7 +727,42 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
- Abort("Unimplemented: %s", "DoModI");
+ LOperand* right = instr->InputAt(1);
+ ASSERT(ToRegister(instr->result()).is(rdx));
+ ASSERT(ToRegister(instr->InputAt(0)).is(rax));
+ ASSERT(!ToRegister(instr->InputAt(1)).is(rax));
+ ASSERT(!ToRegister(instr->InputAt(1)).is(rdx));
+
+ Register right_reg = ToRegister(right);
+
+ // Check for x % 0.
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(right_reg, right_reg);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Sign extend eax to edx. (We are using only the low 32 bits of the values.)
+ __ cdq();
+
+ // Check for (0 % -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ NearLabel positive_left;
+ NearLabel done;
+ __ testl(rax, rax);
+ __ j(not_sign, &positive_left);
+ __ idivl(right_reg);
+
+ // Test the remainder for 0, because then the result would be -0.
+ __ testl(rdx, rdx);
+ __ j(not_zero, &done);
+
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&positive_left);
+ __ idivl(right_reg);
+ __ bind(&done);
+ } else {
+ __ idivl(right_reg);
+ }
}
@@ -929,7 +1039,19 @@ void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
void LCodeGen::DoValueOf(LValueOf* instr) {
- Abort("Unimplemented: %s", "DoValueOf");
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ ASSERT(input.is(result));
+ NearLabel done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(input, &done);
+
+ // If the object is not a value type, return the object.
+ __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
+ __ j(not_equal, &done);
+ __ movq(result, FieldOperand(input, JSValue::kValueOffset));
+
+ __ bind(&done);
}
@@ -1599,7 +1721,18 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- Abort("Unimplemented: %s", "DoInstanceOf");
+ InstanceofStub stub(InstanceofStub::kNoFlags);
+ __ push(ToRegister(instr->InputAt(0)));
+ __ push(ToRegister(instr->InputAt(1)));
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ NearLabel true_value, done;
+ __ testq(rax, rax);
+ __ j(zero, &true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+ __ bind(&true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+ __ bind(&done);
}
@@ -1607,7 +1740,9 @@ void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+ InstanceofStub stub(InstanceofStub::kNoFlags);
+ __ push(ToRegister(instr->InputAt(0)));
+ __ push(ToRegister(instr->InputAt(1)));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ testq(rax, rax);
EmitBranch(true_block, false_block, zero);
@@ -1615,13 +1750,65 @@ void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- Abort("Unimplemented: %s", "DoInstanceOfKnowGLobal");
+ class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredLInstanceOfKnownGlobal(instr_);
+ }
+
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ };
+
+
+ DeferredInstanceOfKnownGlobal* deferred;
+ deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+
+ Label false_result;
+ Register object = ToRegister(instr->InputAt(0));
+
+ // A Smi is not an instance of anything.
+ __ JumpIfSmi(object, &false_result);
+
+ // Null is not an instance of anything.
+ __ CompareRoot(object, Heap::kNullValueRootIndex);
+ __ j(equal, &false_result);
+
+ // String values are not instances of anything.
+ __ JumpIfNotString(object, kScratchRegister, deferred->entry());
+
+ __ bind(&false_result);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+
+ __ bind(deferred->exit());
}
-void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- Abort("Unimplemented: %s", "DoDeferredLInstanceOfKnownGlobakl");
+void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ __ PushSafepointRegisters();
+
+ InstanceofStub stub(InstanceofStub::kNoFlags);
+
+ __ push(ToRegister(instr->InputAt(0)));
+ __ Push(instr->function());
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ __ movq(kScratchRegister, rax);
+ __ PopSafepointRegisters();
+ __ testq(kScratchRegister, kScratchRegister);
+ Label load_false;
+ Label done;
+ __ j(not_zero, &load_false);
+ __ LoadRoot(rax, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&load_false);
+ __ LoadRoot(rax, Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
@@ -1718,7 +1905,20 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Abort("Unimplemented: %s", "DoLoadContextSlot");
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ movq(result, ContextOperand(context, instr->slot_index()));
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ __ movq(ContextOperand(context, instr->slot_index()), value);
+ if (instr->needs_write_barrier()) {
+ int offset = Context::SlotOffset(instr->slot_index());
+ __ RecordWrite(context, offset, value, kScratchRegister);
+ }
}
@@ -1861,7 +2061,11 @@ void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- Abort("Unimplemented: %s", "DoLoadKeyedGeneric");
+ ASSERT(ToRegister(instr->object()).is(rdx));
+ ASSERT(ToRegister(instr->key()).is(rax));
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -1915,25 +2119,77 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Abort("Unimplemented: %s", "DoApplyArguments");
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ ASSERT(receiver.is(rax)); // Used for parameter count.
+ ASSERT(function.is(rdi)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ // If the receiver is null or undefined, we have to pass the global object
+ // as a receiver.
+ NearLabel global_object, receiver_ok;
+ __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+ __ j(equal, &global_object);
+ __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &global_object);
+
+ // The receiver should be a JS object.
+ Condition is_smi = __ CheckSmi(receiver);
+ DeoptimizeIf(is_smi, instr->environment());
+ __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ DeoptimizeIf(below, instr->environment());
+ __ jmp(&receiver_ok);
+
+ __ bind(&global_object);
+ // TODO(kmillikin): We have a hydrogen value for the global object. See
+ // if it's better to use it than to explicitly fetch it from the context
+ // here.
+ __ movq(receiver, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movq(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
+ __ bind(&receiver_ok);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ __ cmpq(length, Immediate(kArgumentsLimit));
+ DeoptimizeIf(above, instr->environment());
+
+ __ push(receiver);
+ __ movq(receiver, length);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ NearLabel invoke, loop;
+ // length is a small non-negative integer, due to the test above.
+ __ testl(length, length);
+ __ j(zero, &invoke);
+ __ bind(&loop);
+ __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
+ __ decl(length);
+ __ j(not_zero, &loop);
+
+ // Invoke the function.
+ __ bind(&invoke);
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ SafepointGenerator safepoint_generator(this,
+ pointers,
+ env->deoptimization_index(),
+ true);
+ v8::internal::ParameterCount actual(rax);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
}
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->InputAt(0);
if (argument->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(argument);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- __ push(Immediate(static_cast<int32_t>(literal->Number())));
- } else if (r.IsDouble()) {
- Abort("unsupported double immediate");
- } else {
- ASSERT(r.IsTagged());
- __ Push(literal);
- }
+ EmitPushConstantOperand(argument);
} else if (argument->IsRegister()) {
__ push(ToRegister(argument));
} else {
@@ -1949,6 +2205,15 @@ void LCodeGen::DoContext(LContext* instr) {
}
+void LCodeGen::DoOuterContext(LOuterContext* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ movq(result,
+ Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ movq(result, FieldOperand(result, JSFunction::kContextOffset));
+}
+
+
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register result = ToRegister(instr->result());
__ movq(result, GlobalObjectOperand());
@@ -2016,22 +2281,76 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathFloor");
+ XMMRegister xmm_scratch = xmm0;
+ Register output_reg = ToRegister(instr->result());
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
+ __ ucomisd(input_reg, xmm_scratch);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(below_equal, instr->environment());
+ } else {
+ DeoptimizeIf(below, instr->environment());
+ }
+
+ // Use truncating instruction (OK because input is positive).
+ __ cvttsd2si(output_reg, input_reg);
+
+ // Overflow is signalled with minint.
+ __ cmpl(output_reg, Immediate(0x80000000));
+ DeoptimizeIf(equal, instr->environment());
}
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathRound");
+ const XMMRegister xmm_scratch = xmm0;
+ Register output_reg = ToRegister(instr->result());
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+
+ // xmm_scratch = 0.5
+ __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
+ __ movq(xmm_scratch, kScratchRegister);
+
+ // input = input + 0.5
+ __ addsd(input_reg, xmm_scratch);
+
+ // We need to return -0 for the input range [-0.5, 0[, otherwise
+ // compute Math.floor(value + 0.5).
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ ucomisd(input_reg, xmm_scratch);
+ DeoptimizeIf(below_equal, instr->environment());
+ } else {
+ // If we don't need to bailout on -0, we check only bailout
+ // on negative inputs.
+ __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
+ __ ucomisd(input_reg, xmm_scratch);
+ DeoptimizeIf(below, instr->environment());
+ }
+
+ // Compute Math.floor(value + 0.5).
+ // Use truncating instruction (OK because input is positive).
+ __ cvttsd2si(output_reg, input_reg);
+
+ // Overflow is signalled with minint.
+ __ cmpl(output_reg, Immediate(0x80000000));
+ DeoptimizeIf(equal, instr->environment());
}
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathSqrt");
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+ __ sqrtsd(input_reg, input_reg);
}
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathPowHalf");
+ XMMRegister xmm_scratch = xmm0;
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+ __ xorpd(xmm_scratch, xmm_scratch);
+ __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
+ __ sqrtsd(input_reg, input_reg);
}
@@ -2056,12 +2375,45 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoUnaryMathOperation");
+ switch (instr->op()) {
+ case kMathAbs:
+ DoMathAbs(instr);
+ break;
+ case kMathFloor:
+ DoMathFloor(instr);
+ break;
+ case kMathRound:
+ DoMathRound(instr);
+ break;
+ case kMathSqrt:
+ DoMathSqrt(instr);
+ break;
+ case kMathPowHalf:
+ DoMathPowHalf(instr);
+ break;
+ case kMathCos:
+ DoMathCos(instr);
+ break;
+ case kMathSin:
+ DoMathSin(instr);
+ break;
+ case kMathLog:
+ DoMathLog(instr);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
}
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- Abort("Unimplemented: %s", "DoCallKeyed");
+ ASSERT(ToRegister(instr->key()).is(rcx));
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ int arity = instr->arity();
+ Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2077,7 +2429,13 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
void LCodeGen::DoCallFunction(LCallFunction* instr) {
- Abort("Unimplemented: %s", "DoCallFunction");
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ int arity = instr->arity();
+ CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Drop(1);
}
@@ -2144,7 +2502,30 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- Abort("Unimplemented: %s", "DoStoreNamedGeneric");
+ ASSERT(ToRegister(instr->object()).is(rdx));
+ ASSERT(ToRegister(instr->value()).is(rax));
+
+ __ Move(rcx, instr->hydrogen()->name());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStorePixelArrayElement(LStorePixelArrayElement* instr) {
+ Register external_pointer = ToRegister(instr->external_pointer());
+ Register key = ToRegister(instr->key());
+ Register value = ToRegister(instr->value());
+
+ { // Clamp the value to [0..255].
+ NearLabel done;
+ __ testl(value, Immediate(0xFFFFFF00));
+ __ j(zero, &done);
+ __ setcc(negative, value); // 1 if negative, 0 if positive.
+ __ decb(value); // 0 if negative, 255 if positive.
+ __ bind(&done);
+ }
+
+ __ movb(Operand(external_pointer, key, times_1, 0), value);
}
@@ -2190,7 +2571,152 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- Abort("Unimplemented: %s", "DoStoreKeyedGeneric");
+ ASSERT(ToRegister(instr->object()).is(rdx));
+ ASSERT(ToRegister(instr->key()).is(rcx));
+ ASSERT(ToRegister(instr->value()).is(rax));
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt: public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ Register string = ToRegister(instr->string());
+ Register index = no_reg;
+ int const_index = -1;
+ if (instr->index()->IsConstantOperand()) {
+ const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ if (!Smi::IsValid(const_index)) {
+ // Guaranteed to be out of bounds because of the assert above.
+ // So the bounds check that must dominate this instruction must
+ // have deoptimized already.
+ if (FLAG_debug_code) {
+ __ Abort("StringCharCodeAt: out of bounds index.");
+ }
+ // No code needs to be generated.
+ return;
+ }
+ } else {
+ index = ToRegister(instr->index());
+ }
+ Register result = ToRegister(instr->result());
+
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(this, instr);
+
+ NearLabel flat_string, ascii_string, done;
+
+ // Fetch the instance type of the receiver into result register.
+ __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for non-sequential strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(result, Immediate(kStringRepresentationMask));
+ __ j(zero, &flat_string);
+
+ // Handle cons strings and go to deferred code for the rest.
+ __ testb(result, Immediate(kIsConsStringMask));
+ __ j(zero, deferred->entry());
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
+ Heap::kEmptyStringRootIndex);
+ __ j(not_equal, deferred->entry());
+ // Get the first of the two strings and load its instance type.
+ __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
+ __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(result, Immediate(kStringRepresentationMask));
+ __ j(not_zero, deferred->entry());
+
+ // Check for ASCII or two-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ testb(result, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string);
+
+ // Two-byte string.
+ // Load the two-byte character code into the result register.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ if (instr->index()->IsConstantOperand()) {
+ __ movzxwl(result,
+ FieldOperand(string,
+ SeqTwoByteString::kHeaderSize +
+ (kUC16Size * const_index)));
+ } else {
+ __ movzxwl(result, FieldOperand(string,
+ index,
+ times_2,
+ SeqTwoByteString::kHeaderSize));
+ }
+ __ jmp(&done);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ if (instr->index()->IsConstantOperand()) {
+ __ movzxbl(result, FieldOperand(string,
+ SeqAsciiString::kHeaderSize + const_index));
+ } else {
+ __ movzxbl(result, FieldOperand(string,
+ index,
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ }
+ __ bind(&done);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Set(result, 0);
+
+ __ PushSafepointRegisters();
+ __ push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ __ Push(Smi::FromInt(const_index));
+ } else {
+ Register index = ToRegister(instr->index());
+ __ Integer32ToSmi(index, index);
+ __ push(index);
+ }
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(rax);
+ }
+ __ SmiToInteger32(rax, rax);
+ __ StoreToSafepointRegisterSlot(result, rax);
+ __ PopSafepointRegisters();
}
@@ -2403,7 +2929,42 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- Abort("Unimplemented: %s", "DoDoubleToI");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsDoubleRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsRegister());
+
+ XMMRegister input_reg = ToDoubleRegister(input);
+ Register result_reg = ToRegister(result);
+
+ if (instr->truncating()) {
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations.
+ __ cvttsd2siq(result_reg, input_reg);
+ __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
+ __ cmpl(result_reg, kScratchRegister);
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ cvttsd2si(result_reg, input_reg);
+ __ cvtlsi2sd(xmm0, result_reg);
+ __ ucomisd(xmm0, input_reg);
+ DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(parity_even, instr->environment()); // NaN.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ NearLabel done;
+ // The integer converted back is equal to the original. We
+ // only have to test if we got -0 as an input.
+ __ testl(result_reg, result_reg);
+ __ j(not_zero, &done);
+ __ movmskpd(result_reg, input_reg);
+ // Bit 0 contains the sign of the double in input_reg.
+ // If input was positive, we are ok and return 0, otherwise
+ // deoptimize.
+ __ andl(result_reg, Immediate(1));
+ DeoptimizeIf(not_zero, instr->environment());
+ __ bind(&done);
+ }
+ }
}
@@ -2552,7 +3113,54 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- Abort("Unimplemented: %s", "DoRegExpLiteral");
+ NearLabel materialized;
+ // Registers will be used as follows:
+ // rdi = JS function.
+ // rcx = literals array.
+ // rbx = regexp literal.
+ // rax = regexp literal clone.
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ int literal_offset = FixedArray::kHeaderSize +
+ instr->hydrogen()->literal_index() * kPointerSize;
+ __ movq(rbx, FieldOperand(rcx, literal_offset));
+ __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &materialized);
+
+ // Create regexp literal using runtime function
+ // Result will be in rax.
+ __ push(rcx);
+ __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Push(instr->hydrogen()->pattern());
+ __ Push(instr->hydrogen()->flags());
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ __ movq(rbx, rax);
+
+ __ bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+ __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ push(rbx);
+ __ Push(Smi::FromInt(size));
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ __ pop(rbx);
+
+ __ bind(&allocated);
+ // Copy the content into the newly allocated memory.
+ // (Unroll copy loop once for better throughput).
+ for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+ __ movq(rdx, FieldOperand(rbx, i));
+ __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
+ __ movq(FieldOperand(rax, i), rdx);
+ __ movq(FieldOperand(rax, i + kPointerSize), rcx);
+ }
+ if ((size % (2 * kPointerSize)) != 0) {
+ __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
+ __ movq(FieldOperand(rax, size - kPointerSize), rdx);
+ }
}
@@ -2575,60 +3183,56 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
- Abort("Unimplemented: %s", "DoTypeof");
+ LOperand* input = instr->InputAt(0);
+ if (input->IsConstantOperand()) {
+ __ Push(ToHandle(LConstantOperand::cast(input)));
+ } else if (input->IsRegister()) {
+ __ push(ToRegister(input));
+ } else {
+ ASSERT(input->IsStackSlot());
+ __ push(ToOperand(input));
+ }
+ CallRuntime(Runtime::kTypeof, 1, instr);
}
void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Abort("Unimplemented: %s", "DoTypeofIs");
-}
-
-
-void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
+ Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- NearLabel true_label;
- NearLabel false_label;
+ Label true_label;
+ Label false_label;
NearLabel done;
- EmitIsConstructCall(result);
- __ j(equal, &true_label);
-
+ Condition final_branch_condition = EmitTypeofIs(&true_label,
+ &false_label,
+ input,
+ instr->type_literal());
+ __ j(final_branch_condition, &true_label);
+ __ bind(&false_label);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ jmp(&done);
__ bind(&true_label);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
-
__ bind(&done);
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->TempAt(0));
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- EmitIsConstructCall(temp);
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp) {
- // Get the frame pointer for the calling frame.
- __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- NearLabel check_frame_marker;
- __ SmiCompare(Operand(temp, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker);
- __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ SmiCompare(Operand(temp, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
+void LCodeGen::EmitPushConstantOperand(LOperand* operand) {
+ ASSERT(operand->IsConstantOperand());
+ LConstantOperand* const_op = LConstantOperand::cast(operand);
+ Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(literal->IsNumber());
+ __ push(Immediate(static_cast<int32_t>(literal->Number())));
+ } else if (r.IsDouble()) {
+ Abort("unsupported double immediate");
+ } else {
+ ASSERT(r.IsTagged());
+ __ Push(literal);
+ }
}
@@ -2712,6 +3316,54 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
+void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
+ Register result = ToRegister(instr->result());
+ NearLabel true_label;
+ NearLabel false_label;
+ NearLabel done;
+
+ EmitIsConstructCall(result);
+ __ j(equal, &true_label);
+
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&true_label);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+ Register temp = ToRegister(instr->TempAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ EmitIsConstructCall(temp);
+ EmitBranch(true_block, false_block, equal);
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp) {
+ // Get the frame pointer for the calling frame.
+ __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ NearLabel check_frame_marker;
+ __ SmiCompare(Operand(temp, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &check_frame_marker);
+ __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ SmiCompare(Operand(temp, StandardFrameConstants::kMarkerOffset),
+ Smi::FromInt(StackFrame::CONSTRUCT));
+}
+
+
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
// No code for lazy bailout instruction. Used to capture environment after a
// call for populating the safepoint data with deoptimization data.
@@ -2724,7 +3376,36 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- Abort("Unimplemented: %s", "DoDeleteProperty");
+ LOperand* obj = instr->object();
+ LOperand* key = instr->key();
+ // Push object.
+ if (obj->IsRegister()) {
+ __ push(ToRegister(obj));
+ } else {
+ __ push(ToOperand(obj));
+ }
+ // Push key.
+ if (key->IsConstantOperand()) {
+ EmitPushConstantOperand(key);
+ } else if (key->IsRegister()) {
+ __ push(ToRegister(key));
+ } else {
+ __ push(ToOperand(key));
+ }
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ // Create safepoint generator that will also ensure enough space in the
+ // reloc info for patching in deoptimization (since this is invoking a
+ // builtin)
+ SafepointGenerator safepoint_generator(this,
+ pointers,
+ env->deoptimization_index(),
+ true);
+ __ Push(Smi::FromInt(strict_mode_flag()));
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 6f8f06e34..52409f207 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -90,8 +90,8 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr);
- void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -117,6 +117,10 @@ class LCodeGen BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
+ int strict_mode_flag() const {
+ return info_->is_strict() ? kStrictMode : kNonStrictMode;
+ }
+
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
@@ -197,6 +201,7 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
+ void RecordSafepoint(int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index);
@@ -225,6 +230,9 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
+ // Emits code for pushing a constant operand.
+ void EmitPushConstantOperand(LOperand* operand);
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index fba29a69e..2ed109d13 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -296,8 +296,15 @@ void LLoadContextSlot::PrintDataTo(StringStream* stream) {
}
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ InputAt(1)->PrintTo(stream);
+}
+
+
void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[ecx] #%d / ", arity());
+ stream->Add("[rcx] #%d / ", arity());
}
@@ -398,7 +405,7 @@ void LChunk::MarkEmptyBlocks() {
}
-void LStoreNamed::PrintDataTo(StringStream* stream) {
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
@@ -407,7 +414,25 @@ void LStoreNamed::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
@@ -1081,9 +1106,8 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
LInstanceOfAndBranch* result =
- new LInstanceOfAndBranch(
- UseFixed(instance_of->left(), InstanceofStub::left()),
- UseFixed(instance_of->right(), InstanceofStub::right()));
+ new LInstanceOfAndBranch(UseFixed(instance_of->left(), rax),
+ UseFixed(instance_of->right(), rdx));
return MarkAsCall(result, instr);
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
@@ -1124,21 +1148,32 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- Abort("Unimplemented: %s", "DoInstanceOf");
- return NULL;
+ LOperand* left = UseFixed(instr->left(), rax);
+ LOperand* right = UseFixed(instr->right(), rdx);
+ LInstanceOf* result = new LInstanceOf(left, right);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
- Abort("Unimplemented: %s", "DoInstanceOfKnownGlobal");
- return NULL;
+ LInstanceOfKnownGlobal* result =
+ new LInstanceOfKnownGlobal(UseRegisterAtStart(instr->value()));
+ MarkAsSaveDoubles(result);
+ return AssignEnvironment(AssignPointerMap(DefineFixed(result, rax)));
}
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- Abort("Unimplemented: %s", "DoApplyArguments");
- return NULL;
+ LOperand* function = UseFixed(instr->function(), rdi);
+ LOperand* receiver = UseFixed(instr->receiver(), rax);
+ LOperand* length = UseFixed(instr->length(), rbx);
+ LOperand* elements = UseFixed(instr->elements(), rcx);
+ LApplyArguments* result = new LApplyArguments(function,
+ receiver,
+ length,
+ elements);
+ return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1155,8 +1190,8 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- Abort("Unimplemented: DoOuterContext");
- return NULL;
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LOuterContext(context));
}
@@ -1178,14 +1213,39 @@ LInstruction* LChunkBuilder::DoCallConstantFunction(
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoUnaryMathOperation");
- return NULL;
+ BuiltinFunctionId op = instr->op();
+ if (op == kMathLog || op == kMathSin || op == kMathCos) {
+ LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ LUnaryMathOperation* result = new LUnaryMathOperation(input);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ } else {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LUnaryMathOperation* result = new LUnaryMathOperation(input);
+ switch (op) {
+ case kMathAbs:
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ case kMathFloor:
+ return AssignEnvironment(DefineAsRegister(result));
+ case kMathRound:
+ return AssignEnvironment(DefineAsRegister(result));
+ case kMathSqrt:
+ return DefineSameAsFirst(result);
+ case kMathPowHalf:
+ return DefineSameAsFirst(result);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+ }
}
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- Abort("Unimplemented: %s", "DoCallKeyed");
- return NULL;
+ ASSERT(instr->key()->representation().IsTagged());
+ LOperand* key = UseFixed(instr->key(), rcx);
+ argument_count_ -= instr->argument_count();
+ LCallKeyed* result = new LCallKeyed(key);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1216,8 +1276,9 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- Abort("Unimplemented: %s", "DoCallFunction");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ LCallFunction* result = new LCallFunction();
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1285,8 +1346,32 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- Abort("Unimplemented: %s", "DoMod");
- return NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ // The temporary operand is necessary to ensure that right is not allocated
+ // into edx.
+ LOperand* temp = FixedTemp(rdx);
+ LOperand* value = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LModI* mod = new LModI(value, divisor, temp);
+ LInstruction* result = DefineFixed(mod, rdx);
+ return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanBeDivByZero))
+ ? AssignEnvironment(result)
+ : result;
+ } else if (instr->representation().IsTagged()) {
+ return DoArithmeticT(Token::MOD, instr);
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double modulo. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ // TODO(fschneider): Allow any register as input registers.
+ LOperand* left = UseFixedDouble(instr->left(), xmm1);
+ LOperand* right = UseFixedDouble(instr->right(), xmm2);
+ LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ }
}
@@ -1461,8 +1546,9 @@ LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) {
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- Abort("Unimplemented: %s", "DoValueOf");
- return NULL;
+ LOperand* object = UseRegister(instr->value());
+ LValueOf* result = new LValueOf(object);
+ return AssignEnvironment(DefineSameAsFirst(result));
}
@@ -1519,12 +1605,8 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignPointerMap(Define(result, result_temp));
} else {
ASSERT(to.IsInteger32());
- bool needs_temp = instr->CanTruncateToInt32() &&
- !CpuFeatures::IsSupported(SSE3);
- LOperand* value = needs_temp ?
- UseTempRegister(instr->value()) : UseRegister(instr->value());
- LOperand* temp = needs_temp ? TempRegister() : NULL;
- return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
+ LOperand* value = UseRegister(instr->value());
+ return AssignEnvironment(DefineAsRegister(new LDoubleToI(value)));
}
} else if (from.IsInteger32()) {
if (to.IsTagged()) {
@@ -1622,14 +1704,23 @@ LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- Abort("Unimplemented: %s", "DoLoadContextSlot");
- return NULL;
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadContextSlot(context));
}
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- Abort("Unimplemented: DoStoreContextSlot");
- return NULL;
+ Abort("Unimplemented: DoStoreContextSlot"); // Temporarily disabled (whesse).
+ LOperand* context;
+ LOperand* value;
+ if (instr->NeedsWriteBarrier()) {
+ context = UseTempRegister(instr->context());
+ value = UseTempRegister(instr->value());
+ } else {
+ context = UseRegister(instr->context());
+ value = UseRegister(instr->value());
+ }
+ return new LStoreContextSlot(context, value);
}
@@ -1692,8 +1783,11 @@ LInstruction* LChunkBuilder::DoLoadPixelArrayElement(
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- Abort("Unimplemented: %s", "DoLoadKeyedGeneric");
- return NULL;
+ LOperand* object = UseFixed(instr->object(), rdx);
+ LOperand* key = UseFixed(instr->key(), rax);
+
+ LLoadKeyedGeneric* result = new LLoadKeyedGeneric(object, key);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1716,9 +1810,31 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
}
+LInstruction* LChunkBuilder::DoStorePixelArrayElement(
+ HStorePixelArrayElement* instr) {
+ ASSERT(instr->value()->representation().IsInteger32());
+ ASSERT(instr->external_pointer()->representation().IsExternal());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ LOperand* val = UseTempRegister(instr->value());
+ LOperand* key = UseRegister(instr->key());
+
+ return new LStorePixelArrayElement(external_pointer, key, val);
+}
+
+
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- Abort("Unimplemented: %s", "DoStoreKeyedGeneric");
- return NULL;
+ LOperand* object = UseFixed(instr->object(), rdx);
+ LOperand* key = UseFixed(instr->key(), rcx);
+ LOperand* value = UseFixed(instr->value(), rax);
+
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsTagged());
+
+ LStoreKeyedGeneric* result = new LStoreKeyedGeneric(object, key, value);
+ return MarkAsCall(result, instr);
}
@@ -1743,14 +1859,19 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- Abort("Unimplemented: %s", "DoStoreNamedGeneric");
- return NULL;
+ LOperand* object = UseFixed(instr->object(), rdx);
+ LOperand* value = UseFixed(instr->value(), rax);
+
+ LStoreNamedGeneric* result = new LStoreNamedGeneric(object, value);
+ return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- Abort("Unimplemented: %s", "DoStringCharCodeAt");
- return NULL;
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegisterOrConstant(instr->index());
+ LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
@@ -1771,8 +1892,7 @@ LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- Abort("Unimplemented: %s", "DoRegExpLiteral");
- return NULL;
+ return MarkAsCall(DefineFixed(new LRegExpLiteral, rax), instr);
}
@@ -1782,14 +1902,16 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- Abort("Unimplemented: %s", "DoDeleteProperty");
- return NULL;
+ LDeleteProperty* result =
+ new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- Abort("Unimplemented: %s", "DoOsrEntry");
- return NULL;
+ allocator_->MarkAsOsrEntry();
+ current_block_->last_environment()->set_ast_id(instr->ast_id());
+ return AssignEnvironment(new LOsrEntry);
}
@@ -1800,8 +1922,8 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- Abort("Unimplemented: %s", "DoUnknownOSRValue");
- return NULL;
+ int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
+ return DefineAsSpilled(new LUnknownOSRValue, spill_index);
}
@@ -1812,7 +1934,10 @@ LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- Abort("Unimplemented: %s", "DoArgumentsObject");
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
return NULL;
}
@@ -1827,14 +1952,13 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- Abort("Unimplemented: %s", "DoTypeof");
- return NULL;
+ LTypeof* result = new LTypeof(UseAtStart(instr->value()));
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
- Abort("Unimplemented: %s", "DoTypeofIs");
- return NULL;
+ return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index abffe50b1..fed5b8cb8 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -42,8 +42,6 @@ class LCodeGen;
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Call) \
- V(StoreKeyed) \
- V(StoreNamed) \
LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
@@ -131,6 +129,7 @@ class LCodeGen;
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
+ V(OuterContext) \
V(Parameter) \
V(PixelArrayLength) \
V(Power) \
@@ -141,11 +140,14 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreContextSlot) \
V(StoreGlobal) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
+ V(StorePixelArrayElement) \
+ V(StringCharCodeAt) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
@@ -830,11 +832,10 @@ class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
};
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 0> {
public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
+ explicit LInstanceOfKnownGlobal(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
@@ -1005,11 +1006,10 @@ class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
};
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
+class LValueOf: public LTemplateInstruction<1, 1, 0> {
public:
- LValueOf(LOperand* value, LOperand* temp) {
+ explicit LValueOf(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
@@ -1246,6 +1246,25 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
};
+class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ LOperand* context() { return InputAt(0); }
+ LOperand* value() { return InputAt(1); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
@@ -1262,6 +1281,18 @@ class LContext: public LTemplateInstruction<1, 0, 0> {
};
+class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LOuterContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+
+ LOperand* context() { return InputAt(0); }
+};
+
+
class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
@@ -1295,6 +1326,8 @@ class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+ LOperand* key() { return inputs_[0]; }
+
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
@@ -1315,6 +1348,8 @@ class LCallNamed: public LTemplateInstruction<1, 0, 0> {
class LCallFunction: public LTemplateInstruction<1, 0, 0> {
public:
+ LCallFunction() {}
+
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
@@ -1403,11 +1438,10 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
+class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
public:
- LDoubleToI(LOperand* value, LOperand* temp) {
+ explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
@@ -1468,34 +1502,23 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
-class LStoreNamed: public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
public:
- LStoreNamed(LOperand* obj, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = val;
+ LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+ inputs_[0] = object;
+ inputs_[1] = value;
+ temps_[0] = temp;
}
- DECLARE_INSTRUCTION(StoreNamed)
- DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LStoreNamedField: public LStoreNamed {
- public:
- LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp)
- : LStoreNamed(obj, val) {
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+ Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
@@ -1503,25 +1526,35 @@ class LStoreNamedField: public LStoreNamed {
};
-class LStoreNamedGeneric: public LStoreNamed {
+class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
public:
- LStoreNamedGeneric(LOperand* obj, LOperand* val)
- : LStoreNamed(obj, val) { }
+ LStoreNamedGeneric(LOperand* object, LOperand* value) {
+ inputs_[0] = object;
+ inputs_[1] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ Handle<Object> name() const { return hydrogen()->name(); }
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
}
- DECLARE_INSTRUCTION(StoreKeyed)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+ "store-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
virtual void PrintDataTo(StringStream* stream);
@@ -1531,23 +1564,56 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyedFastElement: public LStoreKeyed {
+class LStorePixelArrayElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val)
- : LStoreKeyed(obj, key, val) {}
+ LStorePixelArrayElement(LOperand* external_pointer,
+ LOperand* key,
+ LOperand* val) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+ DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement,
+ "store-pixel-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(StorePixelArrayElement)
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
};
-class LStoreKeyedGeneric: public LStoreKeyed {
+class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val)
- : LStoreKeyed(obj, key, val) { }
+ LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) {
+ inputs_[0] = object;
+ inputs_[1] = key;
+ inputs_[2] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+};
+
+
+class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringCharCodeAt(LOperand* string, LOperand* index) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
};
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index e7d02d200..48e42c85c 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -623,7 +623,9 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference(
}
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
// Calls are not allowed in some stubs.
ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
@@ -632,7 +634,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
GetBuiltinEntry(rdx, id);
- InvokeCode(rdx, expected, expected, flag);
+ InvokeCode(rdx, expected, expected, flag, post_call_generator);
}
@@ -1444,15 +1446,17 @@ void MacroAssembler::Pushad() {
// r15 is kSmiConstantRegister
STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
// Use lea for symmetry with Popad.
- lea(rsp, Operand(rsp,
- -(kNumSafepointRegisters-kNumSafepointSavedRegisters) * kPointerSize));
+ int sp_delta =
+ (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
+ lea(rsp, Operand(rsp, -sp_delta));
}
void MacroAssembler::Popad() {
// Popad must not change the flags, so use lea instead of addq.
- lea(rsp, Operand(rsp,
- (kNumSafepointRegisters-kNumSafepointSavedRegisters) * kPointerSize));
+ int sp_delta =
+ (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
+ lea(rsp, Operand(rsp, sp_delta));
pop(r14);
pop(r12);
pop(r11);
@@ -1494,6 +1498,16 @@ int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
};
+void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
+ movq(SafepointRegisterSlot(dst), src);
+}
+
+
+Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
+ return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
// Adjust this code if not the case.
@@ -1835,11 +1849,19 @@ void MacroAssembler::DebugBreak() {
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
NearLabel done;
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+ InvokePrologue(expected,
+ actual,
+ Handle<Code>::null(),
+ code,
+ &done,
+ flag,
+ post_call_generator);
if (flag == CALL_FUNCTION) {
call(code);
+ if (post_call_generator != NULL) post_call_generator->Generate();
} else {
ASSERT(flag == JUMP_FUNCTION);
jmp(code);
@@ -1852,12 +1874,20 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
NearLabel done;
Register dummy = rax;
- InvokePrologue(expected, actual, code, dummy, &done, flag);
+ InvokePrologue(expected,
+ actual,
+ code,
+ dummy,
+ &done,
+ flag,
+ post_call_generator);
if (flag == CALL_FUNCTION) {
Call(code, rmode);
+ if (post_call_generator != NULL) post_call_generator->Generate();
} else {
ASSERT(flag == JUMP_FUNCTION);
Jump(code, rmode);
@@ -1868,7 +1898,8 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
ASSERT(function.is(rdi));
movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
@@ -1879,13 +1910,14 @@ void MacroAssembler::InvokeFunction(Register function,
movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount expected(rbx);
- InvokeCode(rdx, expected, actual, flag);
+ InvokeCode(rdx, expected, actual, flag, post_call_generator);
}
void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
ASSERT(function->is_compiled());
// Get the function and setup the context.
Move(rdi, Handle<JSFunction>(function));
@@ -1896,12 +1928,17 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
// the Code object every time we call the function.
movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(rdx, expected, actual, flag);
+ InvokeCode(rdx, expected, actual, flag, post_call_generator);
} else {
// Invoke the cached code.
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+ InvokeCode(code,
+ expected,
+ actual,
+ RelocInfo::CODE_TARGET,
+ flag,
+ post_call_generator);
}
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 835251832..4c5c60c8f 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -58,6 +58,7 @@ typedef Operand MemOperand;
// Forward declaration.
class JumpTarget;
+class PostCallGenerator;
struct SmiIndex {
SmiIndex(Register index_register, ScaleFactor scale)
@@ -170,10 +171,9 @@ class MacroAssembler: public Assembler {
// Push and pop the registers that can hold pointers.
void PushSafepointRegisters() { Pushad(); }
void PopSafepointRegisters() { Popad(); }
- static int SafepointRegisterStackIndex(int reg_code) {
- return kNumSafepointRegisters - 1 -
- kSafepointPushRegisterIndices[reg_code];
- }
+ // Store the value in register src in the safepoint register stack
+ // slot for register dst.
+ void StoreToSafepointRegisterSlot(Register dst, Register src);
// ---------------------------------------------------------------------------
@@ -183,27 +183,33 @@ class MacroAssembler: public Assembler {
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
- InvokeFlag flag);
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator = NULL);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
- InvokeFlag flag);
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator = NULL);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
- InvokeFlag flag);
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator = NULL);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
- InvokeFlag flag);
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator = NULL);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator = NULL);
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
@@ -996,7 +1002,8 @@ class MacroAssembler: public Assembler {
Handle<Code> code_constant,
Register code_register,
LabelType* done,
- InvokeFlag flag);
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator);
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -1027,6 +1034,17 @@ class MacroAssembler: public Assembler {
Object* PopHandleScopeHelper(Register saved,
Register scratch,
bool gc_allowed);
+
+
+ // Compute memory operands for safepoint stack slots.
+ Operand SafepointRegisterSlot(Register reg);
+ static int SafepointRegisterStackIndex(int reg_code) {
+ return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
+ }
+
+ // Needs access to SafepointRegisterStackIndex for optimized frame
+ // traversal.
+ friend class OptimizedFrame;
};
@@ -1050,6 +1068,17 @@ class CodePatcher {
};
+// Helper class for generating code or data associated with the code
+// right after a call instruction. As an example this can be used to
+// generate safepoint data after calls for crankshaft.
+class PostCallGenerator {
+ public:
+ PostCallGenerator() { }
+ virtual ~PostCallGenerator() { }
+ virtual void Generate() = 0;
+};
+
+
// -----------------------------------------------------------------------------
// Static helper functions.
@@ -1756,7 +1785,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Handle<Code> code_constant,
Register code_register,
LabelType* done,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
bool definitely_matches = false;
NearLabel invoke;
if (expected.is_immediate()) {
@@ -1807,6 +1837,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (flag == CALL_FUNCTION) {
Call(adaptor, RelocInfo::CODE_TARGET);
+ if (post_call_generator != NULL) post_call_generator->Generate();
jmp(done);
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 973fece32..c27e1b8c4 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -2060,8 +2060,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
- if (!function->IsBuiltin()) {
- // Calling non-builtins with a value as receiver requires boxing.
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
__ jmp(&miss);
} else {
// Check that the object is a two-byte string or a symbol.
@@ -2076,8 +2077,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case NUMBER_CHECK: {
- if (!function->IsBuiltin()) {
- // Calling non-builtins with a value as receiver requires boxing.
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
__ jmp(&miss);
} else {
Label fast;
@@ -2096,8 +2098,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
}
case BOOLEAN_CHECK: {
- if (!function->IsBuiltin()) {
- // Calling non-builtins with a value as receiver requires boxing.
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
__ jmp(&miss);
} else {
Label fast;