summaryrefslogtreecommitdiff
path: root/deps/v8/src/ia32
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/ia32')
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc11
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc112
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc18
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc8
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc16
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc254
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc164
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc174
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h8
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc70
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h63
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc63
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.h8
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc333
14 files changed, 506 insertions, 796 deletions
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index a36763db2..a5d42cfbe 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -831,7 +831,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Copy all arguments from the array to the stack.
Label entry, loop;
- __ mov(ecx, Operand(ebp, kIndexOffset));
+ __ mov(eax, Operand(ebp, kIndexOffset));
__ jmp(&entry);
__ bind(&loop);
__ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments
@@ -848,17 +848,16 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(eax);
// Update the index on the stack and in register eax.
- __ mov(ecx, Operand(ebp, kIndexOffset));
- __ add(ecx, Immediate(1 << kSmiTagSize));
- __ mov(Operand(ebp, kIndexOffset), ecx);
+ __ mov(eax, Operand(ebp, kIndexOffset));
+ __ add(eax, Immediate(1 << kSmiTagSize));
+ __ mov(Operand(ebp, kIndexOffset), eax);
__ bind(&entry);
- __ cmp(ecx, Operand(ebp, kLimitOffset));
+ __ cmp(eax, Operand(ebp, kLimitOffset));
__ j(not_equal, &loop);
// Invoke the function.
Label call_proxy;
- __ mov(eax, ecx);
ParameterCount actual(eax);
__ SmiUntag(eax);
__ mov(edi, Operand(ebp, kFunctionOffset));
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index a1c6edd0f..4faa6a4b2 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -1681,11 +1681,6 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
}
-// Input:
-// edx: left operand (tagged)
-// eax: right operand (tagged)
-// Output:
-// eax: result (tagged)
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label call_runtime;
ASSERT(operands_type_ == BinaryOpIC::INT32);
@@ -1695,37 +1690,31 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
+ case Token::DIV: {
Label not_floats;
Label not_int32;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
- if (op_ == Token::MOD) {
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Check result type if it is currently Int32.
- if (result_type_ <= BinaryOpIC::INT32) {
- __ cvttsd2si(ecx, Operand(xmm0));
- __ cvtsi2sd(xmm2, ecx);
- __ ucomisd(xmm0, xmm2);
- __ j(not_zero, &not_int32);
- __ j(carry, &not_int32);
- }
- GenerateHeapResultAllocation(masm, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
}
+ // Check result type if it is currently Int32.
+ if (result_type_ <= BinaryOpIC::INT32) {
+ __ cvttsd2si(ecx, Operand(xmm0));
+ __ cvtsi2sd(xmm2, ecx);
+ __ ucomisd(xmm0, xmm2);
+ __ j(not_zero, &not_int32);
+ __ j(carry, &not_int32);
+ }
+ GenerateHeapResultAllocation(masm, &call_runtime);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ __ ret(0);
} else { // SSE2 not available, use FPU.
FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
FloatingPointHelper::LoadFloatOperands(
@@ -1733,28 +1722,20 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
ecx,
FloatingPointHelper::ARGS_IN_REGISTERS);
FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
- if (op_ == Token::MOD) {
- // The operands are now on the FPU stack, but we don't need them.
- __ fstp(0);
- __ fstp(0);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
}
+ Label after_alloc_failure;
+ GenerateHeapResultAllocation(masm, &after_alloc_failure);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(0);
+ __ bind(&after_alloc_failure);
+ __ ffree();
+ __ jmp(&call_runtime);
}
__ bind(&not_floats);
@@ -1763,6 +1744,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
break;
}
+ case Token::MOD: {
+ // For MOD we go directly to runtime in the non-smi case.
+ break;
+ }
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR:
@@ -1773,6 +1758,11 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label not_floats;
Label not_int32;
Label non_smi_result;
+ /* {
+ CpuFeatures::Scope use_sse2(SSE2);
+ FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
+ FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
+ }*/
FloatingPointHelper::LoadUnknownsAsIntegers(masm,
use_sse3_,
&not_floats);
@@ -1843,8 +1833,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
default: UNREACHABLE(); break;
}
- // If an allocation fails, or SHR hits a hard case, use the runtime system to
- // get the correct result.
+ // If an allocation fails, or SHR or MOD hit a hard case,
+ // use the runtime system to get the correct result.
__ bind(&call_runtime);
switch (op_) {
@@ -1865,6 +1855,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
case Token::MOD:
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
break;
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
@@ -1965,7 +1957,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
+ __ ffree();
__ jmp(&call_runtime);
}
@@ -2169,8 +2161,8 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
+ __ ffree();
+ __ jmp(&call_runtime);
}
__ bind(&not_floats);
break;
@@ -5014,9 +5006,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ j(not_equal, &not_outermost_js, Label::kNear);
__ mov(Operand::StaticVariable(js_entry_sp), ebp);
__ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- __ jmp(&invoke, Label::kNear);
+ Label cont;
+ __ jmp(&cont, Label::kNear);
__ bind(&not_outermost_js);
__ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+ __ bind(&cont);
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
@@ -6168,11 +6162,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ sub(ecx, edx);
__ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
Label not_original_string;
- // Shorter than original string's length: an actual substring.
- __ j(below, &not_original_string, Label::kNear);
- // Longer than original string's length or negative: unsafe arguments.
- __ j(above, &runtime);
- // Return original string.
+ __ j(not_equal, &not_original_string, Label::kNear);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index cff6454ff..ea6191032 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -397,25 +397,9 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// Allocate new FixedDoubleArray.
// edx: receiver
// edi: length of source FixedArray (smi-tagged)
- __ lea(esi, Operand(edi,
- times_4,
- FixedDoubleArray::kHeaderSize + kPointerSize));
+ __ lea(esi, Operand(edi, times_4, FixedDoubleArray::kHeaderSize));
__ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
- Label aligned, aligned_done;
- __ test(eax, Immediate(kDoubleAlignmentMask - kHeapObjectTag));
- __ j(zero, &aligned, Label::kNear);
- __ mov(FieldOperand(eax, 0),
- Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
- __ add(eax, Immediate(kPointerSize));
- __ jmp(&aligned_done);
-
- __ bind(&aligned);
- __ mov(Operand(eax, esi, times_1, -kPointerSize-1),
- Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
-
- __ bind(&aligned_done);
-
// eax: destination FixedDoubleArray
// edi: number of elements
// edx: receiver
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index 710cbaf19..d13fa759c 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -172,10 +172,10 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Register state for IC load call (from ic-ia32.cc).
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
- // -- edx : receiver
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
+ Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), 0, false);
}
@@ -194,10 +194,10 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-ia32.cc).
// ----------- S t a t e -------------
- // -- ecx : key
// -- edx : receiver
+ // -- eax : key
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
+ Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), 0, false);
}
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 73961e141..6de2c81b9 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -239,13 +239,13 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
// ok:
if (FLAG_count_based_interrupts) {
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ ASSERT_EQ(*(call_target_address - 3), kJnsInstruction);
+ ASSERT_EQ(*(call_target_address - 2), kJnsOffset);
} else {
- ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
+ ASSERT_EQ(*(call_target_address - 3), kJaeInstruction);
+ ASSERT_EQ(*(call_target_address - 2), kJaeOffset);
}
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+ ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
Assembler::set_target_address_at(call_target_address,
@@ -266,9 +266,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
- ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+ ASSERT_EQ(*(call_target_address - 3), kNopByteOne);
+ ASSERT_EQ(*(call_target_address - 2), kNopByteTwo);
+ ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
if (FLAG_count_based_interrupts) {
*(call_target_address - 3) = kJnsInstruction;
*(call_target_address - 2) = kJnsOffset;
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 266afce20..cf16c5b6e 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -101,6 +101,13 @@ class JumpPatchSite BASE_EMBEDDED {
};
+// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
+int FullCodeGenerator::self_optimization_header_size() {
+ UNREACHABLE();
+ return 13;
+}
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
@@ -262,11 +269,11 @@ void FullCodeGenerator::Generate() {
// For named function expressions, declare the function name as a
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
- ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
- VisitVariableDeclaration(function);
+ VariableProxy* proxy = scope()->function();
+ ASSERT(proxy->var()->mode() == CONST ||
+ proxy->var()->mode() == CONST_HARMONY);
+ ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
+ EmitDeclaration(proxy, proxy->var()->mode(), NULL);
}
VisitDeclarations(scope()->declarations());
}
@@ -756,51 +763,60 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
}
-void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current function
- // context.
- ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
- // Check that we're not inside a with or catch context.
- __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
- __ cmp(ebx, isolate()->factory()->with_context_map());
- __ Check(not_equal, "Declaration in with context.");
- __ cmp(ebx, isolate()->factory()->catch_context_map());
- __ Check(not_equal, "Declaration in catch context.");
- }
-}
-
-
-void FullCodeGenerator::VisitVariableDeclaration(
- VariableDeclaration* declaration) {
+void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
+ VariableMode mode,
+ FunctionLiteral* function) {
// If it was not possible to allocate the variable at compile time, we
// need to "declare" it at runtime to make sure it actually exists in the
// local context.
- VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool binding_needs_init = (function == NULL) &&
+ (mode == CONST || mode == CONST_HARMONY || mode == LET);
switch (variable->location()) {
case Variable::UNALLOCATED:
- globals_->Add(variable->name());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value());
+ ++global_count_;
break;
case Variable::PARAMETER:
case Variable::LOCAL:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
+ if (function != NULL) {
+ Comment cmnt(masm_, "[ Declaration");
+ VisitForAccumulatorValue(function);
+ __ mov(StackOperand(variable), result_register());
+ } else if (binding_needs_init) {
+ Comment cmnt(masm_, "[ Declaration");
__ mov(StackOperand(variable),
Immediate(isolate()->factory()->the_hole_value()));
}
break;
case Variable::CONTEXT:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- EmitDebugCheckDeclarationContext(variable);
+ // The variable in the decl always resides in the current function
+ // context.
+ ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+ if (FLAG_debug_code) {
+ // Check that we're not inside a with or catch context.
+ __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
+ __ cmp(ebx, isolate()->factory()->with_context_map());
+ __ Check(not_equal, "Declaration in with context.");
+ __ cmp(ebx, isolate()->factory()->catch_context_map());
+ __ Check(not_equal, "Declaration in catch context.");
+ }
+ if (function != NULL) {
+ Comment cmnt(masm_, "[ Declaration");
+ VisitForAccumulatorValue(function);
+ __ mov(ContextOperand(esi, variable->index()), result_register());
+ // We know that we have written a function, which is not a smi.
+ __ RecordWriteContextSlot(esi,
+ Context::SlotOffset(variable->index()),
+ result_register(),
+ ecx,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ } else if (binding_needs_init) {
+ Comment cmnt(masm_, "[ Declaration");
__ mov(ContextOperand(esi, variable->index()),
Immediate(isolate()->factory()->the_hole_value()));
// No write barrier since the hole value is in old space.
@@ -809,12 +825,14 @@ void FullCodeGenerator::VisitVariableDeclaration(
break;
case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
+ Comment cmnt(masm_, "[ Declaration");
__ push(esi);
__ push(Immediate(variable->name()));
- // VariableDeclaration nodes are always introduced in one of four modes.
- ASSERT(mode == VAR || mode == LET ||
- mode == CONST || mode == CONST_HARMONY);
+ // Declaration nodes are always introduced in one of four modes.
+ ASSERT(mode == VAR ||
+ mode == CONST ||
+ mode == CONST_HARMONY ||
+ mode == LET);
PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
? READ_ONLY : NONE;
__ push(Immediate(Smi::FromInt(attr)));
@@ -822,7 +840,9 @@ void FullCodeGenerator::VisitVariableDeclaration(
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
- if (hole_init) {
+ if (function != NULL) {
+ VisitForStackValue(function);
+ } else if (binding_needs_init) {
__ push(Immediate(isolate()->factory()->the_hole_value()));
} else {
__ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
@@ -834,118 +854,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
-void FullCodeGenerator::VisitFunctionDeclaration(
- FunctionDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- globals_->Add(variable->name());
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script());
- // Check for stack-overflow exception.
- if (function.is_null()) return SetStackOverflow();
- globals_->Add(function);
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- VisitForAccumulatorValue(declaration->fun());
- __ mov(StackOperand(variable), result_register());
- break;
- }
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- VisitForAccumulatorValue(declaration->fun());
- __ mov(ContextOperand(esi, variable->index()), result_register());
- // We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(esi,
- Context::SlotOffset(variable->index()),
- result_register(),
- ecx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- break;
- }
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- __ push(esi);
- __ push(Immediate(variable->name()));
- __ push(Immediate(Smi::FromInt(NONE)));
- VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- Handle<JSModule> instance = declaration->module()->interface()->Instance();
- ASSERT(!instance.is_null());
-
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name());
- globals_->Add(instance);
- Visit(declaration->module());
- break;
- }
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ mov(ContextOperand(esi, variable->index()), Immediate(instance));
- Visit(declaration->module());
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(esi); // The context is the first argument.
@@ -1286,7 +1194,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// All extension objects were empty and it is safe to use a global
// load IC call.
- __ mov(edx, GlobalObjectOperand());
+ __ mov(eax, GlobalObjectOperand());
__ mov(ecx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
@@ -1370,7 +1278,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object in eax.
- __ mov(edx, GlobalObjectOperand());
+ __ mov(eax, GlobalObjectOperand());
__ mov(ecx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
@@ -1764,9 +1672,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case NAMED_PROPERTY:
if (expr->is_compound()) {
- // We need the receiver both on the stack and in edx.
- VisitForStackValue(property->obj());
- __ mov(edx, Operand(esp, 0));
+ // We need the receiver both on the stack and in the accumulator.
+ VisitForAccumulatorValue(property->obj());
+ __ push(result_register());
} else {
VisitForStackValue(property->obj());
}
@@ -1774,9 +1682,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case KEYED_PROPERTY: {
if (expr->is_compound()) {
VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- __ mov(edx, Operand(esp, kPointerSize)); // Object.
- __ mov(ecx, Operand(esp, 0)); // Key.
+ VisitForAccumulatorValue(property->key());
+ __ mov(edx, Operand(esp, 0));
+ __ push(eax);
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -2019,7 +1927,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(ecx, eax);
- __ pop(edx); // Receiver.
+ __ pop(edx);
__ pop(eax); // Restore value.
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
@@ -2125,9 +2033,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
- // eax : value
- // esp[0] : receiver
-
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
ASSERT(prop->key()->AsLiteral() != NULL);
@@ -2170,9 +2075,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
- // eax : value
- // esp[0] : key
- // esp[kPointerSize] : receiver
// If the assignment starts a block of assignments to the same object,
// change to slow case to avoid the quadratic behavior of repeatedly
@@ -2185,7 +2087,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(result_register());
}
- __ pop(ecx); // Key.
+ __ pop(ecx);
if (expr->ends_initialization_block()) {
__ mov(edx, Operand(esp, 0)); // Leave receiver on the stack for later.
} else {
@@ -2218,14 +2120,12 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (key->IsPropertyName()) {
VisitForAccumulatorValue(expr->obj());
- __ mov(edx, result_register());
EmitNamedPropertyLoad(expr);
context()->Plug(eax);
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ pop(edx); // Object.
- __ mov(ecx, result_register()); // Key.
+ __ pop(edx);
EmitKeyedPropertyLoad(expr);
context()->Plug(eax);
}
@@ -4024,16 +3924,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ push(Immediate(Smi::FromInt(0)));
}
if (assign_type == NAMED_PROPERTY) {
- // Put the object both on the stack and in edx.
+ // Put the object both on the stack and in the accumulator.
VisitForAccumulatorValue(prop->obj());
__ push(eax);
- __ mov(edx, eax);
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
- VisitForStackValue(prop->key());
- __ mov(edx, Operand(esp, kPointerSize)); // Object.
- __ mov(ecx, Operand(esp, 0)); // Key.
+ VisitForAccumulatorValue(prop->key());
+ __ mov(edx, Operand(esp, 0));
+ __ push(eax);
EmitKeyedPropertyLoad(prop);
}
}
@@ -4180,7 +4079,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "Global variable");
- __ mov(edx, GlobalObjectOperand());
+ __ mov(eax, GlobalObjectOperand());
__ mov(ecx, Immediate(proxy->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
@@ -4445,8 +4344,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_global_scope() ||
- declaration_scope->is_module_scope()) {
+ if (declaration_scope->is_global_scope()) {
// Contexts nested in the global context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index a591af125..eac273952 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -218,13 +218,13 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
- // -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss;
- StubCompiler::GenerateLoadArrayLength(masm, edx, eax, &miss);
+ StubCompiler::GenerateLoadArrayLength(masm, eax, edx, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
@@ -233,13 +233,13 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
void LoadIC::GenerateStringLength(MacroAssembler* masm,
bool support_wrappers) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
- // -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss;
- StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
+ StubCompiler::GenerateLoadStringLength(masm, eax, edx, ebx, &miss,
support_wrappers);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -248,13 +248,13 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm,
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
- // -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss;
- StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
+ StubCompiler::GenerateLoadFunctionPrototype(masm, eax, edx, ebx, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
@@ -443,7 +443,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -451,34 +451,39 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Label probe_dictionary, check_number_dictionary;
// Check that the key is a smi.
- __ JumpIfNotSmi(ecx, &check_string);
+ __ JumpIfNotSmi(eax, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from
// where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(
- masm, edx, eax, Map::kHasIndexedInterceptor, &slow);
+ masm, edx, ecx, Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(eax, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, edx, ecx, eax, eax, NULL, &slow);
+ __ CheckFastElements(ecx, &check_number_dictionary);
+
+ GenerateFastArrayLoad(masm,
+ edx,
+ eax,
+ ecx,
+ eax,
+ NULL,
+ &slow);
Isolate* isolate = masm->isolate();
Counters* counters = isolate->counters();
__ IncrementCounter(counters->keyed_load_generic_smi(), 1);
__ ret(0);
-
__ bind(&check_number_dictionary);
- __ mov(ebx, ecx);
+ __ mov(ebx, eax);
__ SmiUntag(ebx);
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
// Check whether the elements is a number dictionary.
// edx: receiver
// ebx: untagged index
- // ecx: key
- // eax: elements
- __ CheckMap(eax,
+ // eax: key
+ // ecx: elements
+ __ CheckMap(ecx,
isolate->factory()->hash_table_map(),
&slow,
DONT_DO_SMI_CHECK);
@@ -486,7 +491,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Push receiver on the stack to free up a register for the dictionary
// probing.
__ push(edx);
- __ LoadFromNumberDictionary(&slow_pop_receiver, eax, ecx, ebx, edx, edi, eax);
+ __ LoadFromNumberDictionary(&slow_pop_receiver,
+ ecx,
+ eax,
+ ebx,
+ edx,
+ edi,
+ eax);
// Pop receiver before returning.
__ pop(edx);
__ ret(0);
@@ -498,15 +509,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&slow);
// Slow case: jump to runtime.
// edx: receiver
- // ecx: key
+ // eax: key
__ IncrementCounter(counters->keyed_load_generic_slow(), 1);
GenerateRuntimeGetProperty(masm);
__ bind(&check_string);
- GenerateKeyStringCheck(masm, ecx, eax, ebx, &index_string, &slow);
+ GenerateKeyStringCheck(masm, eax, ecx, ebx, &index_string, &slow);
GenerateKeyedLoadReceiverCheck(
- masm, edx, eax, Map::kHasNamedInterceptor, &slow);
+ masm, edx, ecx, Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary.
@@ -515,18 +526,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Immediate(isolate->factory()->hash_table_map()));
__ j(equal, &probe_dictionary);
- // The receiver's map is still in eax, compute the keyed lookup cache hash
+ // Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
- if (FLAG_debug_code) {
- __ cmp(eax, FieldOperand(edx, HeapObject::kMapOffset));
- __ Check(equal, "Map is no longer in eax.");
- }
- __ mov(ebx, eax); // Keep the map around for later.
- __ shr(eax, KeyedLookupCache::kMapHashShift);
- __ mov(edi, FieldOperand(ecx, String::kHashFieldOffset));
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ mov(ecx, ebx);
+ __ shr(ecx, KeyedLookupCache::kMapHashShift);
+ __ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
__ shr(edi, String::kHashShift);
- __ xor_(eax, edi);
- __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
+ __ xor_(ecx, edi);
+ __ and_(ecx, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
// Load the key (consisting of map and symbol) from the cache and
// check for match.
@@ -538,7 +546,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
- __ mov(edi, eax);
+ __ mov(edi, ecx);
__ shl(edi, kPointerSizeLog2 + 1);
if (i != 0) {
__ add(edi, Immediate(kPointerSize * i * 2));
@@ -546,25 +554,25 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &try_next_entry);
__ add(edi, Immediate(kPointerSize));
- __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
+ __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
__ j(equal, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
- __ lea(edi, Operand(eax, 1));
+ __ lea(edi, Operand(ecx, 1));
__ shl(edi, kPointerSizeLog2 + 1);
__ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
__ add(edi, Immediate(kPointerSize));
- __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
+ __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
// Get field offset.
// edx : receiver
// ebx : receiver's map
- // ecx : key
- // eax : lookup cache index
+ // eax : key
+ // ecx : lookup cache index
ExternalReference cache_field_offsets =
ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
@@ -572,12 +580,12 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
__ bind(&hit_on_nth_entry[i]);
if (i != 0) {
- __ add(eax, Immediate(i));
+ __ add(ecx, Immediate(i));
}
__ mov(edi,
- Operand::StaticArray(eax, times_pointer_size, cache_field_offsets));
- __ movzx_b(eax, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ sub(edi, eax);
+ Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
+ __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
+ __ sub(edi, ecx);
__ j(above_equal, &property_array_property);
if (i != 0) {
__ jmp(&load_in_object_property);
@@ -586,9 +594,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load in-object property.
__ bind(&load_in_object_property);
- __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ add(eax, edi);
- __ mov(eax, FieldOperand(edx, eax, times_pointer_size, 0));
+ __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
+ __ add(ecx, edi);
+ __ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
@@ -604,16 +612,16 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// exists.
__ bind(&probe_dictionary);
- __ mov(eax, FieldOperand(edx, JSObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
+ __ mov(ecx, FieldOperand(edx, JSObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ GenerateGlobalInstanceTypeCheck(masm, ecx, &slow);
- GenerateDictionaryLoad(masm, &slow, ebx, ecx, eax, edi, eax);
+ GenerateDictionaryLoad(masm, &slow, ebx, eax, ecx, edi, eax);
__ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
__ ret(0);
__ bind(&index_string);
- __ IndexFromHash(ebx, ecx);
+ __ IndexFromHash(ebx, eax);
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@@ -621,15 +629,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- ecx : key (index)
+ // -- eax : key (index)
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss;
Register receiver = edx;
- Register index = ecx;
- Register scratch = ebx;
+ Register index = eax;
+ Register scratch = ecx;
Register result = eax;
StringCharAtGenerator char_at_generator(receiver,
@@ -653,7 +661,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -663,24 +671,24 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ JumpIfSmi(edx, &slow);
// Check that the key is an array index, that is Uint32.
- __ test(ecx, Immediate(kSmiTagMask | kSmiSignMask));
+ __ test(eax, Immediate(kSmiTagMask | kSmiSignMask));
__ j(not_zero, &slow);
// Get the map of the receiver.
- __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
- __ movzx_b(eax, FieldOperand(eax, Map::kBitFieldOffset));
- __ and_(eax, Immediate(kSlowCaseBitFieldMask));
- __ cmp(eax, Immediate(1 << Map::kHasIndexedInterceptor));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
+ __ and_(ecx, Immediate(kSlowCaseBitFieldMask));
+ __ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor));
__ j(not_zero, &slow);
// Everything is fine, call runtime.
- __ pop(eax);
+ __ pop(ecx);
__ push(edx); // receiver
- __ push(ecx); // key
- __ push(eax); // return address
+ __ push(eax); // key
+ __ push(ecx); // return address
// Perform tail call to the entry.
ExternalReference ref =
@@ -695,20 +703,20 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label slow, notin;
Factory* factory = masm->isolate()->factory();
Operand mapped_location =
- GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, &notin, &slow);
+ GenerateMappedArgumentsLookup(masm, edx, eax, ebx, ecx, &notin, &slow);
__ mov(eax, mapped_location);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in ebx.
Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
+ GenerateUnmappedArgumentsLookup(masm, eax, ebx, ecx, &slow);
__ cmp(unmapped_location, factory->the_hole_value());
__ j(equal, &slow);
__ mov(eax, unmapped_location);
@@ -1300,15 +1308,15 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
- // -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
- eax);
+ Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, eax, ecx, ebx,
+ edx);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1317,17 +1325,17 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
void LoadIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
- // -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss;
- GenerateStringDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
+ GenerateStringDictionaryReceiverCheck(masm, eax, edx, ebx, &miss);
- // eax: elements
+ // edx: elements
// Search the dictionary placing the result in eax.
- GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, eax);
+ GenerateDictionaryLoad(masm, &miss, edx, ecx, edi, ebx, eax);
__ ret(0);
// Cache miss: Jump to runtime.
@@ -1338,15 +1346,15 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
- // -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
__ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
__ pop(ebx);
- __ push(edx); // receiver
+ __ push(eax); // receiver
__ push(ecx); // name
__ push(ebx); // return address
@@ -1359,7 +1367,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -1368,7 +1376,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
__ pop(ebx);
__ push(edx); // receiver
- __ push(ecx); // name
+ __ push(eax); // name
__ push(ebx); // return address
// Perform tail call to the entry.
@@ -1382,14 +1390,14 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
__ pop(ebx);
__ push(edx); // receiver
- __ push(ecx); // name
+ __ push(eax); // name
__ push(ebx); // return address
// Perform tail call to the entry.
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index d416662a1..8fb4c7919 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -2059,9 +2059,8 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- // Get the deoptimization index of the LLazyBailout-environment that
- // corresponds to this instruction.
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
+ ASSERT(instr->HasDeoptimizationEnvironment());
+ LEnvironment* env = instr->deoptimization_environment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Put the result value into the eax slot and restore all registers.
@@ -2115,7 +2114,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->global_object()).is(edx));
+ ASSERT(ToRegister(instr->global_object()).is(eax));
ASSERT(ToRegister(instr->result()).is(eax));
__ mov(ecx, instr->name());
@@ -2313,7 +2312,7 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->object()).is(edx));
+ ASSERT(ToRegister(instr->object()).is(eax));
ASSERT(ToRegister(instr->result()).is(eax));
__ mov(ecx, instr->name());
@@ -2534,7 +2533,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
- ASSERT(ToRegister(instr->key()).is(ecx));
+ ASSERT(ToRegister(instr->key()).is(eax));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -2544,29 +2543,25 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
- if (instr->hydrogen()->from_inlined()) {
- __ lea(result, Operand(esp, -2 * kPointerSize));
- } else {
- // Check for arguments adapter frame.
- Label done, adapted;
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(result),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adapted, Label::kNear);
-
- // No arguments adaptor frame.
- __ mov(result, Operand(ebp));
- __ jmp(&done, Label::kNear);
+ // Check for arguments adapter frame.
+ Label done, adapted;
+ __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(result),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adapted, Label::kNear);
- // Arguments adaptor frame present.
- __ bind(&adapted);
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ // No arguments adaptor frame.
+ __ mov(result, Operand(ebp));
+ __ jmp(&done, Label::kNear);
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ bind(&done);
- }
+ // Arguments adaptor frame present.
+ __ bind(&adapted);
+ __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ bind(&done);
}
@@ -2671,7 +2666,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Invoke the function.
__ bind(&invoke);
- ASSERT(instr->HasPointerMap());
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
@@ -2688,11 +2683,6 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
}
-void LCodeGen::DoDrop(LDrop* instr) {
- __ Drop(instr->count());
-}
-
-
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
__ LoadHeapObject(result, instr->hydrogen()->closure());
@@ -2739,8 +2729,7 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr,
- CallKind call_kind,
- EDIState edi_state) {
+ CallKind call_kind) {
bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
function->shared()->formal_parameter_count() == arity;
@@ -2748,9 +2737,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
RecordPosition(pointers->position());
if (can_invoke_directly) {
- if (edi_state == EDI_UNINITIALIZED) {
- __ LoadHeapObject(edi, function);
- }
+ __ LoadHeapObject(edi, function);
// Change context if needed.
bool change_context =
@@ -2793,8 +2780,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
CallKnownFunction(instr->function(),
instr->arity(),
instr,
- CALL_AS_METHOD,
- EDI_UNINITIALIZED);
+ CALL_AS_METHOD);
}
@@ -3240,21 +3226,13 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->function()).is(edi));
ASSERT(instr->HasPointerMap());
-
- if (instr->known_function().is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- } else {
- CallKnownFunction(instr->known_function(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- EDI_CONTAINS_TARGET);
- }
+ ASSERT(instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ RecordPosition(pointers->position());
+ SafepointGenerator generator(
+ this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(instr->arity());
+ __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
}
@@ -3309,11 +3287,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
- CallKnownFunction(instr->target(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- EDI_UNINITIALIZED);
+ CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@@ -3486,18 +3460,15 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
void LCodeGen::DoStoreKeyedFastDoubleElement(
LStoreKeyedFastDoubleElement* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
+ Label have_value;
- if (instr->NeedsCanonicalization()) {
- Label have_value;
-
- __ ucomisd(value, value);
- __ j(parity_odd, &have_value); // NaN.
+ __ ucomisd(value, value);
+ __ j(parity_odd, &have_value); // NaN.
- ExternalReference canonical_nan_reference =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
- __ bind(&have_value);
- }
+ ExternalReference canonical_nan_reference =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
+ __ bind(&have_value);
Operand double_store_operand = BuildFastArrayOperand(
instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@@ -4210,21 +4181,12 @@ void LCodeGen::DoCheckMapCommon(Register reg,
}
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+void LCodeGen::DoCheckMap(LCheckMap* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
-
- Label success;
- SmallMapList* map_set = instr->hydrogen()->map_set();
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
- __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
- __ j(equal, &success);
- }
- Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
- __ bind(&success);
+ Handle<Map> map = instr->hydrogen()->map();
+ DoCheckMapCommon(reg, map, instr->hydrogen()->mode(), instr->environment());
}
@@ -4335,14 +4297,6 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
deferred->entry(),
TAG_OBJECT);
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
// Load the initial map.
Register map = scratch;
__ LoadHeapObject(scratch, constructor);
@@ -4377,14 +4331,14 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
__ mov(FieldOperand(result, property_offset), scratch);
}
}
+
+ __ bind(deferred->exit());
}
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
Register result = ToRegister(instr->result());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
@@ -4392,9 +4346,8 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
__ Set(result, Immediate(0));
PushSafepointRegistersScope scope(this);
- __ push(Immediate(Smi::FromInt(instance_size)));
- CallRuntimeFromDeferred(
- Runtime::kAllocateInNewSpace, 1, instr, instr->context());
+ __ PushHeapObject(constructor);
+ CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
@@ -4462,13 +4415,6 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
__ LoadHeapObject(ecx, object);
__ cmp(source, ecx);
__ Assert(equal, "Unexpected object literal boilerplate");
- __ mov(ecx, FieldOperand(source, HeapObject::kMapOffset));
- __ cmp(ecx, Handle<Map>(object->map()));
- __ Assert(equal, "Unexpected boilerplate map");
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- __ and_(ecx, Map::kElementsKindMask);
- __ cmp(ecx, object->GetElementsKind() << Map::kElementsKindShift);
- __ Assert(equal, "Unexpected boilerplate elements kind");
}
// Only elements backing stores for non-COW arrays need to be copied.
@@ -4538,10 +4484,9 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
__ mov(FieldOperand(result, total_offset + 4), Immediate(value_high));
}
} else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
for (int i = 0; i < elements_length; i++) {
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i));
+ Handle<Object> value = JSObject::GetElement(object, i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ lea(ecx, Operand(result, *offset));
@@ -4565,23 +4510,6 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
int size = instr->hydrogen()->total_size();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate()->GetElementsKind();
-
- // Deopt if the literal boilerplate ElementsKind is of a type different than
- // the expected one. The check isn't necessary if the boilerplate has already
- // been converted to FAST_ELEMENTS.
- if (boilerplate_elements_kind != FAST_ELEMENTS) {
- __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
- __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
- // Load the map's "bit field 2". We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(ecx, Map::kElementsKindMask);
- __ cmp(ecx, boilerplate_elements_kind << Map::kElementsKindShift);
- DeoptimizeIf(not_equal, instr->environment());
- }
// Allocate all objects that are part of the literal in one big
// allocation. This avoids multiple limit checks.
@@ -4866,7 +4794,7 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
LOperand* key = instr->key();
__ push(ToOperand(obj));
EmitPushTaggedOperand(key);
- ASSERT(instr->HasPointerMap());
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
// Create safepoint generator that will also ensure enough space in the
@@ -4964,7 +4892,7 @@ void LCodeGen::DoIn(LIn* instr) {
LOperand* key = instr->key();
EmitPushTaggedOperand(key);
EmitPushTaggedOperand(obj);
- ASSERT(instr->HasPointerMap());
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index a2810f05c..52befc697 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -206,18 +206,12 @@ class LCodeGen BASE_EMBEDDED {
LInstruction* instr,
LOperand* context);
- enum EDIState {
- EDI_UNINITIALIZED,
- EDI_CONTAINS_TARGET
- };
-
// Generate a direct call to a known function. Expects the function
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr,
- CallKind call_kind,
- EDIState edi_state);
+ CallKind call_kind);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 5adaf431b..186b346c7 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -729,6 +729,22 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
}
+LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
+ LInstruction* instr, int ast_id) {
+ ASSERT(instruction_pending_deoptimization_environment_ == NULL);
+ ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+ instruction_pending_deoptimization_environment_ = instr;
+ pending_deoptimization_ast_id_ = ast_id;
+ return instr;
+}
+
+
+void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
+ instruction_pending_deoptimization_environment_ = NULL;
+ pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+}
+
+
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
@@ -741,10 +757,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
if (hinstr->HasObservableSideEffects()) {
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = sim->ast_id();
+ instr = SetInstructionPendingDeoptimizationEnvironment(
+ instr, sim->ast_id());
}
// If instruction does not have side-effects lazy deoptimization
@@ -762,6 +776,12 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
}
+LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
+ instr->MarkAsSaveDoubles();
+ return instr;
+}
+
+
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
instr->set_pointer_map(new(zone()) LPointerMap(position_));
@@ -1310,7 +1330,6 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
ASSERT(instr->value()->representation().IsInteger32());
ASSERT(instr->representation().IsInteger32());
- if (instr->HasNoUses()) return NULL;
LOperand* input = UseRegisterAtStart(instr->value());
LBitNotI* result = new(zone()) LBitNotI(input);
return DefineSameAsFirst(result);
@@ -1335,12 +1354,6 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
@@ -1787,9 +1800,9 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
}
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
- LCheckMaps* result = new(zone()) LCheckMaps(value);
+ LCheckMap* result = new(zone()) LCheckMap(value);
return AssignEnvironment(result);
}
@@ -1849,7 +1862,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* global_object = UseFixed(instr->global_object(), edx);
+ LOperand* global_object = UseFixed(instr->global_object(), eax);
LLoadGlobalGeneric* result =
new(zone()) LLoadGlobalGeneric(context, global_object);
return MarkAsCall(DefineFixed(result, eax), instr);
@@ -1909,7 +1922,7 @@ LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
ASSERT(instr->representation().IsTagged());
if (instr->need_generic()) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* obj = UseFixed(instr->object(), edx);
+ LOperand* obj = UseFixed(instr->object(), eax);
LLoadNamedFieldPolymorphic* result =
new(zone()) LLoadNamedFieldPolymorphic(context, obj);
return MarkAsCall(DefineFixed(result, eax), instr);
@@ -1925,7 +1938,7 @@ LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), edx);
+ LOperand* object = UseFixed(instr->object(), eax);
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2004,7 +2017,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->object(), edx);
- LOperand* key = UseFixed(instr->key(), ecx);
+ LOperand* key = UseFixed(instr->key(), eax);
LLoadKeyedGeneric* result =
new(zone()) LLoadKeyedGeneric(context, object, key);
@@ -2335,12 +2348,9 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
LInstruction* result = AssignEnvironment(lazy_bailout);
- // Store the lazy deopt environment with the instruction if needed. Right
- // now it is only used for LInstanceOfKnownGlobal.
instruction_pending_deoptimization_environment_->
- SetDeferredLazyDeoptimizationEnvironment(result->environment());
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+ set_deoptimization_environment(result->environment());
+ ClearInstructionPendingDeoptimizationEnvironment();
return result;
}
@@ -2370,8 +2380,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
undefined,
instr->call_kind(),
instr->is_construct());
- if (instr->arguments_var() != NULL) {
- inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
+ if (instr->arguments() != NULL) {
+ inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
}
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
@@ -2380,20 +2390,10 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
-
- HEnvironment* env = current_block_->last_environment();
-
- if (instr->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
- }
-
HEnvironment* outer = current_block_->last_environment()->
DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
- return pop;
+ return NULL;
}
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 09f0b0d72..4ecce96d0 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -65,7 +65,7 @@ class LCodeGen;
V(CallStub) \
V(CheckFunction) \
V(CheckInstanceType) \
- V(CheckMaps) \
+ V(CheckMap) \
V(CheckNonSmi) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
@@ -174,8 +174,7 @@ class LCodeGen;
V(CheckMapValue) \
V(LoadFieldByIndex) \
V(DateField) \
- V(WrapReceiver) \
- V(Drop)
+ V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -199,7 +198,8 @@ class LInstruction: public ZoneObject {
LInstruction()
: environment_(NULL),
hydrogen_value_(NULL),
- is_call_(false) { }
+ is_call_(false),
+ is_save_doubles_(false) { }
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
@@ -242,12 +242,22 @@ class LInstruction: public ZoneObject {
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
+ void set_deoptimization_environment(LEnvironment* env) {
+ deoptimization_environment_.set(env);
+ }
+ LEnvironment* deoptimization_environment() const {
+ return deoptimization_environment_.get();
+ }
+ bool HasDeoptimizationEnvironment() const {
+ return deoptimization_environment_.is_set();
+ }
void MarkAsCall() { is_call_ = true; }
+ void MarkAsSaveDoubles() { is_save_doubles_ = true; }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
+ bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
@@ -268,7 +278,9 @@ class LInstruction: public ZoneObject {
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
+ SetOncePointer<LEnvironment> deoptimization_environment_;
bool is_call_;
+ bool is_save_doubles_;
};
@@ -513,8 +525,9 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
public:
+ LArgumentsElements() { }
+
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
@@ -831,15 +844,6 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
- lazy_deopt_env_ = env;
- }
-
- private:
- LEnvironment* lazy_deopt_env_;
};
@@ -1397,19 +1401,6 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
-class LDrop: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) { }
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
@@ -1498,7 +1489,6 @@ class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
- Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
};
@@ -1797,8 +1787,6 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
-
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@@ -1961,14 +1949,14 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
};
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
+class LCheckMap: public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckMaps(LOperand* value) {
+ explicit LCheckMap(LOperand* value) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+ DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
+ DECLARE_HYDROGEN_ACCESSOR(CheckMap)
};
@@ -2483,6 +2471,11 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+ LInstruction* MarkAsSaveDoubles(LInstruction* instr);
+
+ LInstruction* SetInstructionPendingDeoptimizationEnvironment(
+ LInstruction* instr, int ast_id);
+ void ClearInstructionPendingDeoptimizationEnvironment();
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator);
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index 0029f33b1..04d6b62c8 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -501,13 +501,9 @@ void RegExpMacroAssemblerIA32::CheckNotCharacter(uint32_t c,
void RegExpMacroAssemblerIA32::CheckCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_equal) {
- if (c == 0) {
- __ test(current_character(), Immediate(mask));
- } else {
- __ mov(eax, mask);
- __ and_(eax, current_character());
- __ cmp(eax, c);
- }
+ __ mov(eax, current_character());
+ __ and_(eax, mask);
+ __ cmp(eax, c);
BranchOrBacktrack(equal, on_equal);
}
@@ -515,13 +511,9 @@ void RegExpMacroAssemblerIA32::CheckCharacterAfterAnd(uint32_t c,
void RegExpMacroAssemblerIA32::CheckNotCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_not_equal) {
- if (c == 0) {
- __ test(current_character(), Immediate(mask));
- } else {
- __ mov(eax, mask);
- __ and_(eax, current_character());
- __ cmp(eax, c);
- }
+ __ mov(eax, current_character());
+ __ and_(eax, mask);
+ __ cmp(eax, c);
BranchOrBacktrack(not_equal, on_not_equal);
}
@@ -533,51 +525,12 @@ void RegExpMacroAssemblerIA32::CheckNotCharacterAfterMinusAnd(
Label* on_not_equal) {
ASSERT(minus < String::kMaxUtf16CodeUnit);
__ lea(eax, Operand(current_character(), -minus));
- if (c == 0) {
- __ test(eax, Immediate(mask));
- } else {
- __ and_(eax, mask);
- __ cmp(eax, c);
- }
+ __ and_(eax, mask);
+ __ cmp(eax, c);
BranchOrBacktrack(not_equal, on_not_equal);
}
-void RegExpMacroAssemblerIA32::CheckCharacterInRange(
- uc16 from,
- uc16 to,
- Label* on_in_range) {
- __ lea(eax, Operand(current_character(), -from));
- __ cmp(eax, to - from);
- BranchOrBacktrack(below_equal, on_in_range);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterNotInRange(
- uc16 from,
- uc16 to,
- Label* on_not_in_range) {
- __ lea(eax, Operand(current_character(), -from));
- __ cmp(eax, to - from);
- BranchOrBacktrack(above, on_not_in_range);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckBitInTable(
- Handle<ByteArray> table,
- Label* on_bit_set) {
- __ mov(eax, Immediate(table));
- Register index = current_character();
- if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
- __ mov(ebx, kTableSize - 1);
- __ and_(ebx, current_character());
- index = ebx;
- }
- __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize), 0);
- BranchOrBacktrack(not_equal, on_bit_set);
-}
-
-
bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
index 78cd06958..d50447028 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -78,14 +78,6 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
uc16 minus,
uc16 mask,
Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
-
// Checks whether the given offset from the current position is before
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index e148e2f52..eb86b2f52 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -406,7 +406,6 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(receiver);
__ push(holder);
__ push(FieldOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(Immediate(reinterpret_cast<int>(masm->isolate())));
}
@@ -420,12 +419,12 @@ static void CompileCallLoadPropertyWithInterceptor(
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate()),
- 6);
+ 5);
}
// Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = 4;
+static const int kFastApiCallArguments = 3;
// Reserves space for the extra arguments to API function in the
@@ -473,11 +472,10 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// -- esp[8] : api function
// (first fast api call extra argument)
// -- esp[12] : api call data
- // -- esp[16] : isolate
- // -- esp[20] : last argument
+ // -- esp[16] : last argument
// -- ...
- // -- esp[(argc + 4) * 4] : first argument
- // -- esp[(argc + 5) * 4] : receiver
+ // -- esp[(argc + 3) * 4] : first argument
+ // -- esp[(argc + 4) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
@@ -495,11 +493,9 @@ static void GenerateFastApiCall(MacroAssembler* masm,
} else {
__ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
}
- __ mov(Operand(esp, 4 * kPointerSize),
- Immediate(reinterpret_cast<int>(masm->isolate())));
// Prepare arguments.
- __ lea(eax, Operand(esp, 4 * kPointerSize));
+ __ lea(eax, Operand(esp, 3 * kPointerSize));
const int kApiArgc = 1; // API function gets reference to the v8::Arguments.
@@ -683,7 +679,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
- 6);
+ 5);
// Restore the name_ register.
__ pop(name_);
@@ -1038,7 +1034,6 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
} else {
__ push(Immediate(Handle<Object>(callback->data())));
}
- __ push(Immediate(reinterpret_cast<int>(isolate())));
// Save a pointer to where we pushed the arguments pointer.
// This will be passed as the const AccessorInfo& to the C++ callback.
@@ -1049,9 +1044,9 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
__ push(scratch3); // Restore return address.
- // 4 elements array for v8::Arguments::values_, handler for name and pointer
+ // 3 elements array for v8::Arguments::values_, handler for name and pointer
// to the values (it considered as smi in GC).
- const int kStackSpace = 6;
+ const int kStackSpace = 5;
const int kApiArgc = 2;
__ PrepareCallApiFunction(kApiArgc);
@@ -1218,7 +1213,6 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
__ push(holder_reg);
__ mov(holder_reg, Immediate(callback));
__ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
- __ push(Immediate(reinterpret_cast<int>(isolate())));
__ push(holder_reg);
__ push(name_reg);
__ push(scratch2); // restore return address
@@ -1226,7 +1220,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
masm()->isolate());
- __ TailCallExternalReference(ref, 6, 1);
+ __ TailCallExternalReference(ref, 5, 1);
}
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
@@ -1242,7 +1236,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
isolate());
- __ TailCallExternalReference(ref, 6, 1);
+ __ TailCallExternalReference(ref, 5, 1);
}
}
@@ -2180,7 +2174,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
name, depth, &miss);
// Move the return address on top of the stack.
- __ mov(eax, Operand(esp, 4 * kPointerSize));
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
__ mov(Operand(esp, 0 * kPointerSize), eax);
// esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
@@ -2709,27 +2703,27 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
Handle<JSObject> object,
Handle<JSObject> last) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
- // -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss;
// Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
+ __ JumpIfSmi(eax, &miss);
ASSERT(last->IsGlobalObject() || last->HasFastProperties());
// Check the maps of the full prototype chain. Also check that
// global property cells up to (but not including) the last object
// in the prototype chain are empty.
- CheckPrototypes(object, edx, last, ebx, eax, edi, name, &miss);
+ CheckPrototypes(object, eax, last, ebx, edx, edi, name, &miss);
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
if (last->IsGlobalObject()) {
GenerateCheckPropertyCell(
- masm(), Handle<GlobalObject>::cast(last), name, eax, &miss);
+ masm(), Handle<GlobalObject>::cast(last), name, edx, &miss);
}
// Return undefined if maps of the full prototype chain are still the
@@ -2750,13 +2744,13 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
int index,
Handle<String> name) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
- // -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss;
- GenerateLoadField(object, holder, edx, ebx, eax, edi, index, name, &miss);
+ GenerateLoadField(object, holder, eax, ebx, edx, edi, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2771,13 +2765,13 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
Handle<JSObject> holder,
Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
- // -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss;
- GenerateLoadCallback(object, holder, edx, ecx, ebx, eax, edi, callback,
+ GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi, callback,
name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2792,13 +2786,13 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSFunction> value,
Handle<String> name) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
- // -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss;
- GenerateLoadConstant(object, holder, edx, ebx, eax, edi, value, name, &miss);
+ GenerateLoadConstant(object, holder, eax, ebx, edx, edi, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2811,8 +2805,8 @@ Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<String> name) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
- // -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss;
@@ -2822,7 +2816,7 @@ Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
- GenerateLoadInterceptor(receiver, holder, &lookup, edx, ecx, eax, ebx, edi,
+ GenerateLoadInterceptor(receiver, holder, &lookup, eax, ecx, edx, ebx, edi,
name, &miss);
__ bind(&miss);
@@ -2840,15 +2834,15 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<String> name,
bool is_dont_delete) {
// ----------- S t a t e -------------
+ // -- eax : receiver
// -- ecx : name
- // -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss;
// Check that the maps haven't changed.
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(object, edx, holder, ebx, eax, edi, name, &miss);
+ __ JumpIfSmi(eax, &miss);
+ CheckPrototypes(object, eax, holder, ebx, edx, edi, name, &miss);
// Get the value from the cell.
if (Serializer::enabled()) {
@@ -2886,7 +2880,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
Handle<JSObject> holder,
int index) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -2896,10 +2890,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
__ IncrementCounter(counters->keyed_load_field(), 1);
// Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
+ __ cmp(eax, Immediate(name));
__ j(not_equal, &miss);
- GenerateLoadField(receiver, holder, edx, ebx, eax, edi, index, name, &miss);
+ GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_load_field(), 1);
@@ -2916,7 +2910,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
Handle<JSObject> holder,
Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -2926,10 +2920,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
__ IncrementCounter(counters->keyed_load_callback(), 1);
// Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
+ __ cmp(eax, Immediate(name));
__ j(not_equal, &miss);
- GenerateLoadCallback(receiver, holder, edx, ecx, ebx, eax, edi, callback,
+ GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi, callback,
name, &miss);
__ bind(&miss);
@@ -2947,7 +2941,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
Handle<JSObject> holder,
Handle<JSFunction> value) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -2957,11 +2951,11 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
__ IncrementCounter(counters->keyed_load_constant_function(), 1);
// Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
+ __ cmp(eax, Immediate(name));
__ j(not_equal, &miss);
GenerateLoadConstant(
- receiver, holder, edx, ebx, eax, edi, value, name, &miss);
+ receiver, holder, edx, ebx, ecx, edi, value, name, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_load_constant_function(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2976,7 +2970,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
Handle<JSObject> holder,
Handle<String> name) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -2986,12 +2980,12 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
__ IncrementCounter(counters->keyed_load_interceptor(), 1);
// Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
+ __ cmp(eax, Immediate(name));
__ j(not_equal, &miss);
LookupResult lookup(isolate());
LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver, holder, &lookup, edx, ecx, eax, ebx, edi,
+ GenerateLoadInterceptor(receiver, holder, &lookup, edx, eax, ecx, ebx, edi,
name, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_load_interceptor(), 1);
@@ -3005,7 +2999,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
Handle<String> name) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -3015,10 +3009,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
__ IncrementCounter(counters->keyed_load_array_length(), 1);
// Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
+ __ cmp(eax, Immediate(name));
__ j(not_equal, &miss);
- GenerateLoadArrayLength(masm(), edx, eax, &miss);
+ GenerateLoadArrayLength(masm(), edx, ecx, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_load_array_length(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3031,7 +3025,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
Handle<String> name) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -3041,10 +3035,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
__ IncrementCounter(counters->keyed_load_string_length(), 1);
// Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
+ __ cmp(eax, Immediate(name));
__ j(not_equal, &miss);
- GenerateLoadStringLength(masm(), edx, eax, ebx, &miss, true);
+ GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
__ bind(&miss);
__ DecrementCounter(counters->keyed_load_string_length(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3057,7 +3051,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
Handle<String> name) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -3067,10 +3061,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
__ IncrementCounter(counters->keyed_load_function_prototype(), 1);
// Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
+ __ cmp(eax, Immediate(name));
__ j(not_equal, &miss);
- GenerateLoadFunctionPrototype(masm(), edx, eax, ebx, &miss);
+ GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_load_function_prototype(), 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3083,7 +3077,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
Handle<Map> receiver_map) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -3104,7 +3098,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_ics) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -3268,7 +3262,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -3276,15 +3270,21 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
- __ JumpIfNotSmi(ecx, &miss_force_generic);
- __ mov(ebx, ecx);
+ __ JumpIfNotSmi(eax, &miss_force_generic);
+ __ mov(ebx, eax);
__ SmiUntag(ebx);
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
// Push receiver on the stack to free up a register for the dictionary
// probing.
__ push(edx);
- __ LoadFromNumberDictionary(&slow, eax, ecx, ebx, edx, edi, eax);
+ __ LoadFromNumberDictionary(&slow,
+ ecx,
+ eax,
+ ebx,
+ edx,
+ edi,
+ eax);
// Pop receiver before returning.
__ pop(edx);
__ ret(0);
@@ -3293,6 +3293,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
__ pop(edx);
// ----------- S t a t e -------------
+ // -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
@@ -3304,6 +3305,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
__ bind(&miss_force_generic);
// ----------- S t a t e -------------
+ // -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
@@ -3315,44 +3317,11 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch,
- XMMRegister xmm_scratch0,
- XMMRegister xmm_scratch1,
- Label* fail) {
- // Check that key is a smi and if SSE2 is available a heap number
- // containing a smi and branch if the check fails.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- Label key_ok;
- __ JumpIfSmi(key, &key_ok);
- __ cmp(FieldOperand(key, HeapObject::kMapOffset),
- Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
- __ j(not_equal, fail);
- __ movdbl(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
- __ cvttsd2si(scratch, Operand(xmm_scratch0));
- __ cvtsi2sd(xmm_scratch1, scratch);
- __ ucomisd(xmm_scratch1, xmm_scratch0);
- __ j(not_equal, fail);
- __ j(parity_even, fail); // NaN.
- // Check if the key fits in the smi range.
- __ cmp(scratch, 0xc0000000);
- __ j(sign, fail);
- __ SmiTag(scratch);
- __ mov(key, scratch);
- __ bind(&key_ok);
- } else {
- __ JumpIfNotSmi(key, fail);
- }
-}
-
-
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -3361,41 +3330,41 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(eax, &miss_force_generic);
// Check that the index is in range.
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
+ __ cmp(eax, FieldOperand(ebx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &miss_force_generic);
__ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
// ebx: base pointer of external storage
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
- __ SmiUntag(ecx); // Untag the index.
- __ movsx_b(eax, Operand(ebx, ecx, times_1, 0));
+ __ SmiUntag(eax); // Untag the index.
+ __ movsx_b(eax, Operand(ebx, eax, times_1, 0));
break;
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_PIXEL_ELEMENTS:
- __ SmiUntag(ecx); // Untag the index.
- __ movzx_b(eax, Operand(ebx, ecx, times_1, 0));
+ __ SmiUntag(eax); // Untag the index.
+ __ movzx_b(eax, Operand(ebx, eax, times_1, 0));
break;
case EXTERNAL_SHORT_ELEMENTS:
- __ movsx_w(eax, Operand(ebx, ecx, times_1, 0));
+ __ movsx_w(eax, Operand(ebx, eax, times_1, 0));
break;
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzx_w(eax, Operand(ebx, ecx, times_1, 0));
+ __ movzx_w(eax, Operand(ebx, eax, times_1, 0));
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
- __ mov(eax, Operand(ebx, ecx, times_2, 0));
+ __ mov(ecx, Operand(ebx, eax, times_2, 0));
break;
case EXTERNAL_FLOAT_ELEMENTS:
- __ fld_s(Operand(ebx, ecx, times_2, 0));
+ __ fld_s(Operand(ebx, eax, times_2, 0));
break;
case EXTERNAL_DOUBLE_ELEMENTS:
- __ fld_d(Operand(ebx, ecx, times_4, 0));
+ __ fld_d(Operand(ebx, eax, times_4, 0));
break;
default:
UNREACHABLE();
@@ -3403,7 +3372,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
// For integer array types:
- // eax: value
+ // ecx: value
// For floating-point array type:
// FP(0): value
@@ -3414,17 +3383,18 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// it to a HeapNumber.
Label box_int;
if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- __ cmp(eax, 0xc0000000);
+ __ cmp(ecx, 0xC0000000);
__ j(sign, &box_int);
} else {
ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
- __ test(eax, Immediate(0xc0000000));
+ __ test(ecx, Immediate(0xC0000000));
__ j(not_zero, &box_int);
}
+ __ mov(eax, ecx);
__ SmiTag(eax);
__ ret(0);
@@ -3433,31 +3403,33 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- __ push(eax);
+ __ push(ecx);
__ fild_s(Operand(esp, 0));
- __ pop(eax);
+ __ pop(ecx);
} else {
ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
// Need to zero-extend the value.
// There's no fild variant for unsigned values, so zero-extend
// to a 64-bit int manually.
__ push(Immediate(0));
- __ push(eax);
+ __ push(ecx);
__ fild_d(Operand(esp, 0));
- __ pop(eax);
- __ pop(eax);
+ __ pop(ecx);
+ __ pop(ecx);
}
// FP(0): value
- __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
+ __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
// Set the value.
+ __ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
- __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
+ __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
// Set the value.
+ __ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
} else {
@@ -3477,7 +3449,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -3486,7 +3458,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ jmp(ic, RelocInfo::CODE_TARGET);
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -3503,8 +3475,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
// ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -3513,8 +3484,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(ecx, &miss_force_generic);
// Check that the index is in range.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
@@ -3609,39 +3580,12 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// (code-stubs-ia32.cc) is roughly what is needed here though the
// conversion failure case does not need to be handled.
if (CpuFeatures::IsSupported(SSE2)) {
- if ((elements_kind == EXTERNAL_INT_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) &&
- CpuFeatures::IsSupported(SSE3)) {
- CpuFeatures::Scope scope(SSE3);
- // fisttp stores values as signed integers. To represent the
- // entire range of int and unsigned int arrays, store as a
- // 64-bit int and discard the high 32 bits.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ sub(esp, Immediate(2 * kPointerSize));
- __ fisttp_d(Operand(esp, 0));
-
- // If conversion failed (NaN, infinity, or a number outside
- // signed int64 range), the result is 0x8000000000000000, and
- // we must handle this case in the runtime.
- Label ok;
- __ cmp(Operand(esp, kPointerSize), Immediate(0x80000000u));
- __ j(not_equal, &ok);
- __ cmp(Operand(esp, 0), Immediate(0));
- __ j(not_equal, &ok);
- __ add(esp, Immediate(2 * kPointerSize)); // Restore the stack.
- __ jmp(&slow);
-
- __ bind(&ok);
- __ pop(ebx);
- __ add(esp, Immediate(kPointerSize));
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- } else {
+ if (elements_kind != EXTERNAL_INT_ELEMENTS &&
+ elements_kind != EXTERNAL_UNSIGNED_INT_ELEMENTS) {
ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope scope(SSE2);
__ cvttsd2si(ebx, FieldOperand(eax, HeapNumber::kValueOffset));
- __ cmp(ebx, 0x80000000u);
- __ j(equal, &slow);
- // ebx: untagged integer value
+ // ecx: untagged integer value
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
__ ClampUint8(ebx);
@@ -3655,14 +3599,41 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ mov_w(Operand(edi, ecx, times_1, 0), ebx);
break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- break;
default:
UNREACHABLE();
break;
}
+ } else {
+ if (CpuFeatures::IsSupported(SSE3)) {
+ CpuFeatures::Scope scope(SSE3);
+ // fisttp stores values as signed integers. To represent the
+ // entire range of int and unsigned int arrays, store as a
+ // 64-bit int and discard the high 32 bits.
+ // If the value is NaN or +/-infinity, the result is 0x80000000,
+ // which is automatically zero when taken mod 2^n, n < 32.
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ sub(esp, Immediate(2 * kPointerSize));
+ __ fisttp_d(Operand(esp, 0));
+ __ pop(ebx);
+ __ add(esp, Immediate(kPointerSize));
+ } else {
+ ASSERT(CpuFeatures::IsSupported(SSE2));
+ CpuFeatures::Scope scope(SSE2);
+ // We can easily implement the correct rounding behavior for the
+ // range [0, 2^31-1]. For the time being, to keep this code simple,
+ // make the slow runtime call for values outside this range.
+ // Note: we could do better for signed int arrays.
+ __ movd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+ // We will need the key if we have to make the slow runtime call.
+ __ push(ebx);
+ __ LoadPowerOf2(xmm1, ebx, 31);
+ __ pop(ebx);
+ __ ucomisd(xmm1, xmm0);
+ __ j(above_equal, &slow);
+ __ cvttsd2si(ebx, Operand(xmm0));
+ }
+ // ebx: untagged integer value
+ __ mov(Operand(edi, ecx, times_2, 0), ebx);
}
__ ret(0); // Return original value.
}
@@ -3700,7 +3671,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -3709,19 +3680,19 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(eax, &miss_force_generic);
// Get the elements array.
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(eax);
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ AssertFastElements(ecx);
// Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
+ __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
__ j(above_equal, &miss_force_generic);
// Load the result and make sure it's not the hole.
- __ mov(ebx, Operand(eax, ecx, times_2,
+ __ mov(ebx, Operand(ecx, eax, times_2,
FixedArray::kHeaderSize - kHeapObjectTag));
__ cmp(ebx, masm->isolate()->factory()->the_hole_value());
__ j(equal, &miss_force_generic);
@@ -3738,7 +3709,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- ecx : key
+ // -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -3747,38 +3718,39 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(eax, &miss_force_generic);
// Get the elements array.
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(eax);
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ AssertFastElements(ecx);
// Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(eax, FixedDoubleArray::kLengthOffset));
+ __ cmp(eax, FieldOperand(ecx, FixedDoubleArray::kLengthOffset));
__ j(above_equal, &miss_force_generic);
// Check for the hole
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmp(FieldOperand(eax, ecx, times_4, offset), Immediate(kHoleNanUpper32));
+ __ cmp(FieldOperand(ecx, eax, times_4, offset), Immediate(kHoleNanUpper32));
__ j(equal, &miss_force_generic);
// Always allocate a heap number for the result.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(xmm0, FieldOperand(eax, ecx, times_4,
+ __ movdbl(xmm0, FieldOperand(ecx, eax, times_4,
FixedDoubleArray::kHeaderSize));
} else {
- __ fld_d(FieldOperand(eax, ecx, times_4, FixedDoubleArray::kHeaderSize));
+ __ fld_d(FieldOperand(ecx, eax, times_4, FixedDoubleArray::kHeaderSize));
}
- __ AllocateHeapNumber(eax, ebx, edi, &slow_allocate_heapnumber);
+ __ AllocateHeapNumber(ecx, ebx, edi, &slow_allocate_heapnumber);
// Set the value.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
} else {
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
}
+ __ mov(eax, ecx);
__ ret(0);
__ bind(&slow_allocate_heapnumber);
@@ -3815,8 +3787,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(ecx, &miss_force_generic);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ JumpIfNotSmi(eax, &transition_elements_kind);
@@ -3970,8 +3942,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(ecx, &miss_force_generic);
// Get the elements array.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
@@ -4032,7 +4004,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
-
// Restore the key, which is known to be the array length.
__ mov(ecx, Immediate(0));