summaryrefslogtreecommitdiff
path: root/deps/v8/src/ia32
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/ia32')
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h11
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc91
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h50
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc93
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc1699
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h28
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc59
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc95
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc16
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc96
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc4
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc562
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h69
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc20
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc266
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h98
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc241
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h56
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc243
19 files changed, 1217 insertions, 2580 deletions
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 5a35b207f7..05cc23a71d 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -47,6 +47,7 @@ namespace internal {
static const byte kCallOpcode = 0xE8;
+static const int kNoCodeAgeSequenceLength = 5;
// The modes possibly affected by apply must be in kApplyMask.
@@ -190,6 +191,13 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
}
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return Memory::Object_Handle_at(pc_ + 1);
+}
+
+
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(*pc_ == kCallOpcode);
@@ -379,7 +387,8 @@ void Assembler::emit(Handle<Object> handle) {
void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
- } else if (!RelocInfo::IsNone(rmode)) {
+ } else if (!RelocInfo::IsNone(rmode)
+ && rmode != RelocInfo::CODE_AGE_SEQUENCE) {
RecordRelocInfo(rmode);
}
emit(x);
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index e5456da474..0557ed8853 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -53,6 +53,7 @@ bool CpuFeatures::initialized_ = false;
#endif
uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
+uint64_t CpuFeatures::cross_compile_ = 0;
ExternalReference ExternalReference::cpu_features() {
@@ -1131,30 +1132,21 @@ void Assembler::sub(const Operand& dst, Register src) {
void Assembler::test(Register reg, const Immediate& imm) {
+ if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
+ test_b(reg, imm.x_);
+ return;
+ }
+
EnsureSpace ensure_space(this);
- // Only use test against byte for registers that have a byte
- // variant: eax, ebx, ecx, and edx.
- if (RelocInfo::IsNone(imm.rmode_) &&
- is_uint8(imm.x_) &&
- reg.is_byte_register()) {
- uint8_t imm8 = imm.x_;
- if (reg.is(eax)) {
- EMIT(0xA8);
- EMIT(imm8);
- } else {
- emit_arith_b(0xF6, 0xC0, reg, imm8);
- }
+ // This is not using emit_arith because test doesn't support
+ // sign-extension of 8-bit operands.
+ if (reg.is(eax)) {
+ EMIT(0xA9);
} else {
- // This is not using emit_arith because test doesn't support
- // sign-extension of 8-bit operands.
- if (reg.is(eax)) {
- EMIT(0xA9);
- } else {
- EMIT(0xF7);
- EMIT(0xC0 | reg.code());
- }
- emit(imm);
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
}
+ emit(imm);
}
@@ -1178,6 +1170,9 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
test(op.reg(), imm);
return;
}
+ if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
+ return test_b(op, imm.x_);
+ }
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(eax, op);
@@ -1185,9 +1180,26 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
}
+void Assembler::test_b(Register reg, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ // Only use test against byte for registers that have a byte
+ // variant: eax, ebx, ecx, and edx.
+ if (reg.is(eax)) {
+ EMIT(0xA8);
+ EMIT(imm8);
+ } else if (reg.is_byte_register()) {
+ emit_arith_b(0xF6, 0xC0, reg, imm8);
+ } else {
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
+ emit(imm8);
+ }
+}
+
+
void Assembler::test_b(const Operand& op, uint8_t imm8) {
- if (op.is_reg_only() && !op.reg().is_byte_register()) {
- test(op, Immediate(imm8));
+ if (op.is_reg_only()) {
+ test_b(op.reg(), imm8);
return;
}
EnsureSpace ensure_space(this);
@@ -1402,7 +1414,8 @@ void Assembler::call(Handle<Code> code,
TypeFeedbackId ast_id) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ ASSERT(RelocInfo::IsCodeTarget(rmode)
+ || rmode == RelocInfo::CODE_AGE_SEQUENCE);
EMIT(0xE8);
emit(code, rmode, ast_id);
}
@@ -2055,6 +2068,7 @@ void Assembler::xorps(XMMRegister dst, XMMRegister src) {
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2064,6 +2078,7 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2073,6 +2088,7 @@ void Assembler::andpd(XMMRegister dst, XMMRegister src) {
void Assembler::orpd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2235,18 +2251,6 @@ void Assembler::prefetch(const Operand& src, int level) {
}
-void Assembler::movdbl(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- movsd(dst, src);
-}
-
-
-void Assembler::movdbl(const Operand& dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- movsd(dst, src);
-}
-
-
void Assembler::movsd(const Operand& dst, XMMRegister src ) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2335,11 +2339,19 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
EMIT(0x0F);
EMIT(0x3A);
EMIT(0x17);
- emit_sse_operand(dst, src);
+ emit_sse_operand(src, dst);
EMIT(imm8);
}
+void Assembler::andps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::pand(XMMRegister dst, XMMRegister src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2474,6 +2486,11 @@ void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
}
+void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
+ EMIT(0xC0 | (dst.code() << 3) | src.code());
+}
+
+
void Assembler::Print() {
Disassembler::Decode(isolate(), stdout, buffer_, pc_);
}
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 55eff93190..f46c6478db 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -535,32 +535,54 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
+ if (Check(f, cross_compile_)) return true;
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
- return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, supported_);
}
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
- return (found_by_runtime_probing_only_ &
- (static_cast<uint64_t>(1) << f)) != 0;
+ return Check(f, found_by_runtime_probing_only_);
}
static bool IsSafeForSnapshot(CpuFeature f) {
- return (IsSupported(f) &&
+ return Check(f, cross_compile_) ||
+ (IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
+ static bool VerifyCrossCompiling() {
+ return cross_compile_ == 0;
+ }
+
+ static bool VerifyCrossCompiling(CpuFeature f) {
+ uint64_t mask = flag2set(f);
+ return cross_compile_ == 0 ||
+ (cross_compile_ & mask) == mask;
+ }
+
private:
+ static bool Check(CpuFeature f, uint64_t set) {
+ return (set & flag2set(f)) != 0;
+ }
+
+ static uint64_t flag2set(CpuFeature f) {
+ return static_cast<uint64_t>(1) << f;
+ }
+
#ifdef DEBUG
static bool initialized_;
#endif
static uint64_t supported_;
static uint64_t found_by_runtime_probing_only_;
+ static uint64_t cross_compile_;
+
friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -852,7 +874,7 @@ class Assembler : public AssemblerBase {
void test(Register reg, const Operand& op);
void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
- void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
+ void test_b(Register reg, uint8_t imm8);
void test_b(const Operand& op, uint8_t imm8);
void xor_(Register dst, int32_t imm32);
@@ -995,6 +1017,10 @@ class Assembler : public AssemblerBase {
void cpuid();
+ // SSE instructions
+ void andps(XMMRegister dst, XMMRegister src);
+ void xorps(XMMRegister dst, XMMRegister src);
+
// SSE2 instructions
void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src);
@@ -1012,7 +1038,6 @@ class Assembler : public AssemblerBase {
void mulsd(XMMRegister dst, const Operand& src);
void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
- void xorps(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void andpd(XMMRegister dst, XMMRegister src);
@@ -1050,15 +1075,14 @@ class Assembler : public AssemblerBase {
}
}
- // Use either movsd or movlpd.
- void movdbl(XMMRegister dst, const Operand& src);
- void movdbl(const Operand& dst, XMMRegister src);
-
void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
void movd(XMMRegister dst, const Operand& src);
void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
void movd(const Operand& dst, XMMRegister src);
void movsd(XMMRegister dst, XMMRegister src);
+ void movsd(XMMRegister dst, const Operand& src);
+ void movsd(const Operand& dst, XMMRegister src);
+
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
@@ -1136,16 +1160,14 @@ class Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
- byte byte_at(int pos) { return buffer_[pos]; }
+ byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
protected:
- void movsd(XMMRegister dst, const Operand& src);
- void movsd(const Operand& dst, XMMRegister src);
-
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(Register dst, XMMRegister src);
+ void emit_sse_operand(XMMRegister dst, Register src);
byte* addr_at(int pos) { return buffer_ + pos; }
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index a1597481aa..e5e6ec50d1 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -539,10 +539,12 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ mov(eax, Operand(esp, 8 * kPointerSize));
{
FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1, ebx);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
__ mov(Operand(esp, 0), eax);
__ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
}
__ popad();
__ ret(0);
@@ -561,6 +563,44 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+ // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+ // that make_code_young doesn't do any garbage collection which allows us to
+ // save/restore the registers without worrying about which of them contain
+ // pointers.
+ __ pushad();
+ __ mov(eax, Operand(esp, 8 * kPointerSize));
+ __ sub(eax, Immediate(Assembler::kCallInstructionLength));
+ { // NOLINT
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
+ __ mov(Operand(esp, 0), eax);
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 2);
+ }
+ __ popad();
+
+ // Perform prologue operations usually performed by the young code stub.
+ __ pop(eax); // Pop return address into scratch register.
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS Function.
+ __ push(eax); // Push return address after frame prologue.
+
+ // Jump to point after the code-age stub.
+ __ ret(0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+ GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
// Enter an internal frame.
{
@@ -628,25 +668,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // TODO(kasperl): Do we need to save/restore the XMM registers too?
- // TODO(mvstanton): We should save these regs, do this in a future
- // checkin.
-
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ popad();
- __ ret(0);
-}
-
-
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
Factory* factory = masm->isolate()->factory();
@@ -1063,13 +1084,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Lookup the argument in the number to string cache.
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- eax, // Input.
- ebx, // Result.
- ecx, // Scratch 1.
- edx, // Scratch 2.
- &not_cached);
+ __ LookupNumberStringCache(eax, // Input.
+ ebx, // Result.
+ ecx, // Scratch 1.
+ edx, // Scratch 2.
+ &not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1);
__ bind(&argument_is_string);
// ----------- S t a t e -------------
@@ -1326,6 +1345,24 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+ // We check the stack limit as indicator that recompilation might be done.
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm->isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ }
+ __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&ok);
+ __ ret(0);
+}
+
#undef __
}
} // namespace v8::internal
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index a83c1ae91d..b6bbe04b33 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -64,6 +64,17 @@ void ToNumberStub::InitializeInterfaceDescriptor(
}
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -82,7 +93,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
@@ -162,7 +173,7 @@ static void InitializeArrayConstructorDescriptor(
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &eax;
+ descriptor->stack_parameter_count_ = eax;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->register_params_ = registers;
@@ -184,7 +195,7 @@ static void InitializeInternalArrayConstructorDescriptor(
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &eax;
+ descriptor->stack_parameter_count_ = eax;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->register_params_ = registers;
@@ -283,6 +294,18 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
+void BinaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, eax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -432,7 +455,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(Operand(esp, i * kDoubleSize), reg);
+ __ movsd(Operand(esp, i * kDoubleSize), reg);
}
}
const int argument_count = 1;
@@ -448,7 +471,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
CpuFeatureScope scope(masm, SSE2);
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(reg, Operand(esp, i * kDoubleSize));
+ __ movsd(reg, Operand(esp, i * kDoubleSize));
}
__ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
}
@@ -470,18 +493,6 @@ class FloatingPointHelper : public AllStatic {
// on FPU stack.
static void LoadFloatOperand(MacroAssembler* masm, Register number);
- // Code pattern for loading floating point values. Input values must
- // be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
- // Returns operands as floating point numbers on FPU stack.
- static void LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location = ARGS_ON_STACK);
-
- // Similar to LoadFloatOperand but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
-
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in eax, operand_2 in edx; falls through on float
// operands, jumps to the non_float label otherwise.
@@ -489,32 +500,11 @@ class FloatingPointHelper : public AllStatic {
Label* non_float,
Register scratch);
- // Takes the operands in edx and eax and loads them as integers in eax
- // and ecx.
- static void LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- Label* operand_conversion_failure);
-
// Test if operands are numbers (smi or HeapNumber objects), and load
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
// Leaves operands unchanged.
static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
-
- // Similar to LoadSSE2Operands but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
-
- // Checks that |operand| has an int32 value. If |int32_result| is different
- // from |scratch|, it will contain that int32 value.
- static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
- Label* non_int32,
- XMMRegister operand,
- Register int32_result,
- Register scratch,
- XMMRegister xmm_scratch);
};
@@ -658,1259 +648,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- __ push(edx);
- __ push(eax);
- // Left and right arguments are now on top.
- __ push(Immediate(Smi::FromInt(MinorKey())));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-// Prepare for a type transition runtime call when the args are already on
-// the stack, under the return address.
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- // Left and right arguments are already on top of the stack.
- __ push(Immediate(Smi::FromInt(MinorKey())));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-static void BinaryOpStub_GenerateRegisterArgsPop(MacroAssembler* masm) {
- __ pop(ecx);
- __ pop(eax);
- __ pop(edx);
- __ push(ecx);
-}
-
-
-static void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* slow,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- Token::Value op) {
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the
- // dividend in eax and edx free for the division. Use eax, ebx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = edx;
- Register right = eax;
- if (op == Token::DIV || op == Token::MOD) {
- left = eax;
- right = ebx;
- __ mov(ebx, eax);
- __ mov(eax, edx);
- }
-
-
- // 2. Prepare the smi check of both operands by oring them together.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- Label not_smis;
- Register combined = ecx;
- ASSERT(!left.is(combined) && !right.is(combined));
- switch (op) {
- case Token::BIT_OR:
- // Perform the operation into eax and smi check the result. Preserve
- // eax in case the result is not a smi.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, left); // Bitwise or is commutative.
- combined = right;
- break;
-
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- __ mov(combined, right);
- __ or_(combined, left);
- break;
-
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Move the right operand into ecx for the shift operation, use eax
- // for the smi check register.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, left);
- combined = right;
- break;
-
- default:
- break;
- }
-
- // 3. Perform the smi check of the operands.
- STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ JumpIfNotSmi(combined, &not_smis);
-
- // 4. Operands are both smis, perform the operation leaving the result in
- // eax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op) {
- case Token::BIT_OR:
- // Nothing to do.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(eax));
- __ xor_(right, left); // Bitwise xor is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(eax));
- __ and_(right, left); // Bitwise and is commutative.
- break;
-
- case Token::SHL:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shl_cl(left);
- // Check that the *signed* result fits in a smi.
- __ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SAR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ sar_cl(left);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SHR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shr_cl(left);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(left, Immediate(0xc0000000));
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::ADD:
- ASSERT(right.is(eax));
- __ add(right, left); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis);
- break;
-
- case Token::SUB:
- __ sub(left, right);
- __ j(overflow, &use_fp_on_smis);
- __ mov(eax, left);
- break;
-
- case Token::MUL:
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // We can't revert the multiplication if the result is not a smi
- // so save the right operand.
- __ mov(ebx, right);
- // Remove tag from one of the operands (but keep sign).
- __ SmiUntag(right);
- // Do multiplication.
- __ imul(right, left); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(right, combined, &use_fp_on_smis);
- break;
-
- case Token::DIV:
- // We can't revert the division if the result is not a smi so
- // save the left operand.
- __ mov(edi, left);
- // Check for 0 divisor.
- __ test(right, right);
- __ j(zero, &use_fp_on_smis);
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by idiv
- // instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- __ j(equal, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
- // Check that the remainder is zero.
- __ test(edx, edx);
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
- break;
-
- case Token::MOD:
- // Check for 0 divisor.
- __ test(right, right);
- __ j(zero, &not_smis);
-
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(edx, combined, slow);
- // Move remainder to register eax.
- __ mov(eax, edx);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in eax. Some operations have registers pushed.
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- __ ret(0);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- __ ret(2 * kPointerSize);
- break;
- default:
- UNREACHABLE();
- }
-
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) {
- __ bind(&use_fp_on_smis);
- switch (op) {
- // Undo the effects of some operations, and some register moves.
- case Token::SHL:
- // The arguments are saved on the stack, and only used from there.
- break;
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, left);
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, right);
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division. They should be in eax, ebx for jump to not_smi.
- __ mov(eax, edi);
- break;
- default:
- // No other operators jump to use_fp_on_smis.
- break;
- }
- __ jmp(&not_smis);
- } else {
- ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS);
- switch (op) {
- case Token::SHL:
- case Token::SHR: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Result we want is in left == edx, so we can put the allocated heap
- // number in eax.
- __ AllocateHeapNumber(eax, ecx, ebx, slow);
- // Store the result in the HeapNumber and return.
- // It's OK to overwrite the arguments on the stack because we
- // are about to return.
- if (op == Token::SHR) {
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
- __ fild_d(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- } else {
- ASSERT_EQ(Token::SHL, op);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, left);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- }
- __ ret(2 * kPointerSize);
- break;
- }
-
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Restore arguments to edx, eax.
- switch (op) {
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, left);
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, right);
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division.
- __ mov(edx, edi);
- __ mov(eax, right);
- break;
- default: UNREACHABLE();
- break;
- }
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
- }
- __ mov(eax, ecx);
- __ ret(0);
- break;
- }
-
- default:
- break;
- }
- }
-
- // 7. Non-smi operands, fall out to the non-smi code with the operands in
- // edx and eax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
- switch (op) {
- case Token::BIT_OR:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Right operand is saved in ecx and eax was destroyed by the smi
- // check.
- __ mov(eax, ecx);
- break;
-
- case Token::DIV:
- case Token::MOD:
- // Operands are in eax, ebx at this point.
- __ mov(edx, eax);
- __ mov(eax, ebx);
- break;
-
- default:
- break;
- }
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label right_arg_changed, call_runtime;
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- if (op_ == Token::MOD && encoded_right_arg_.has_value) {
- // It is guaranteed that the value will fit into a Smi, because if it
- // didn't, we wouldn't be here, see BinaryOp_Patch.
- __ cmp(eax, Immediate(Smi::FromInt(fixed_right_arg_value())));
- __ j(not_equal, &right_arg_changed);
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
- } else {
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- __ bind(&right_arg_changed);
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- GenerateTypeTransition(masm);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_stub(
- (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode);
-
-
-// Input:
-// edx: left operand (tagged)
-// eax: right operand (tagged)
-// Output:
-// eax: result (tagged)
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- Label not_floats, not_int32, right_arg_changed;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- // In theory, we would need the same check in the non-SSE2 case,
- // but since we don't support Crankshaft on such hardware we can
- // afford not to care about precise type feedback.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, &not_int32);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, &not_int32);
- }
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_int32, xmm0, ebx, ecx, xmm2);
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_int32, xmm1, edi, ecx, xmm2);
- if (op_ == Token::MOD) {
- if (encoded_right_arg_.has_value) {
- __ cmp(edi, Immediate(fixed_right_arg_value()));
- __ j(not_equal, &right_arg_changed);
- }
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Check result type if it is currently Int32.
- if (result_type_ <= BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_int32, xmm0, ecx, ecx, xmm2);
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- }
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- if (op_ == Token::MOD) {
- // The operands are now on the FPU stack, but we don't need them.
- __ fstp(0);
- __ fstp(0);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
- }
-
- __ bind(&not_floats);
- __ bind(&not_int32);
- __ bind(&right_arg_changed);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label not_int32;
- Label non_smi_result;
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(
- masm, use_sse3, left_type_, right_type_, &not_floats);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(&not_floats);
- __ bind(&not_int32);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR hits a hard case, use the runtime system to
- // get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- return; // Handled above.
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- Factory* factory = masm->isolate()->factory();
-
- // Convert odd ball arguments to numbers.
- Label check, done;
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, &check, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(edx, edx);
- } else {
- __ mov(edx, Immediate(factory->nan_value()));
- }
- __ jmp(&done, Label::kNear);
- __ bind(&check);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, &done, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(eax, eax);
- } else {
- __ mov(eax, Immediate(factory->nan_value()));
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime;
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
-
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- // In theory, we would need the same check in the non-SSE2 case,
- // but since we don't support Crankshaft on such hardware we can
- // afford not to care about precise type feedback.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, &not_floats);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, &not_floats);
- }
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- if (left_type_ == BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_floats, xmm0, ecx, ecx, xmm2);
- }
- if (right_type_ == BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_floats, xmm1, ecx, ecx, xmm2);
- }
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
-
- __ bind(&not_floats);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label non_smi_result;
- // We do not check the input arguments here, as any value is
- // unconditionally truncated to an int32 anyway. To get the
- // right optimized code, int32 type feedback is just right.
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(
- masm, use_sse3, left_type_, right_type_, &not_floats);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(&not_floats);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR or MOD hit a hard case,
- // use the runtime system to get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- break;
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
- __ bind(&not_floats);
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_result;
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3,
- BinaryOpIC::GENERIC,
- BinaryOpIC::GENERIC,
- &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop the arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If all else fails, use the runtime system to get the correct
- // result.
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- GenerateAddStrings(masm);
- // Fall through.
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- BinaryOpStub_GenerateRegisterArgsPop(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ push(eax);
- GenerateCallRuntime(masm);
- }
- __ ret(0);
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &left_not_string, Label::kNear);
-
- StringAddStub string_add_left_stub(
- (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_right_stub(
- (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode) {
- Label skip_allocation;
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in edx is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now edx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(edx, ebx);
- __ bind(&skip_allocation);
- // Use object in edx as a result holder
- __ mov(eax, edx);
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
- __ push(ecx);
-}
-
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// TAGGED case:
// Input:
@@ -2034,7 +771,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ ret(kPointerSize);
} else { // UNTAGGED.
CpuFeatureScope scope(masm, SSE2);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
@@ -2049,7 +786,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
CpuFeatureScope scope(masm, SSE2);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
+ __ movsd(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
@@ -2062,17 +799,17 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ ret(kPointerSize);
} else { // UNTAGGED.
CpuFeatureScope scope(masm, SSE2);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
// Skip cache and return answer directly, only in untagged case.
__ bind(&skip_cache);
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
+ __ movsd(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
GenerateOperation(masm, type_);
__ fstp_d(Operand(esp, 0));
- __ movdbl(xmm1, Operand(esp, 0));
+ __ movsd(xmm1, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
// We return the value in xmm1 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
@@ -2098,13 +835,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&runtime_call_clear_stack);
__ bind(&runtime_call);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(eax);
__ CallRuntime(RuntimeFunction(), 1);
}
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
}
@@ -2221,79 +958,6 @@ void TranscendentalCacheStub::GenerateOperation(
}
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-// Warning: can clobber inputs even when it jumps to |conversion_failure|!
-void FloatingPointHelper::LoadUnknownsAsIntegers(
- MacroAssembler* masm,
- bool use_sse3,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- // Test if arg1 is a Smi.
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, conversion_failure);
- } else {
- __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
- }
-
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- Factory* factory = masm->isolate()->factory();
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(edx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg1);
-
- __ TruncateHeapNumberToI(edx, edx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
-
- // Test if arg2 is a Smi.
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, conversion_failure);
- } else {
- __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
- }
-
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(ecx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the eax heap number in ecx.
-
- __ TruncateHeapNumberToI(ecx, eax);
-
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
Label load_smi, done;
@@ -2320,7 +984,7 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
Factory* factory = masm->isolate()->factory();
__ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
__ j(not_equal, not_numbers); // Argument in edx is not a number.
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ bind(&load_eax);
// Load operand in eax into xmm1, or branch to not_numbers.
__ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
@@ -2329,109 +993,20 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
__ jmp(not_numbers); // Argument in eax is not a number.
__ bind(&load_smi_edx);
__ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, edx);
+ __ Cvtsi2sd(xmm0, edx);
__ SmiTag(edx); // Retag smi for heap number overwriting test.
__ jmp(&load_eax);
__ bind(&load_smi_eax);
__ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, eax);
+ __ Cvtsi2sd(xmm1, eax);
__ SmiTag(eax); // Retag smi for heap number overwriting test.
__ jmp(&done, Label::kNear);
__ bind(&load_float_eax);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ bind(&done);
}
-void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm0, scratch);
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm1, scratch);
-}
-
-
-void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
- Label* non_int32,
- XMMRegister operand,
- Register int32_result,
- Register scratch,
- XMMRegister xmm_scratch) {
- __ cvttsd2si(int32_result, Operand(operand));
- __ cvtsi2sd(xmm_scratch, int32_result);
- __ pcmpeqd(xmm_scratch, operand);
- __ movmskps(scratch, xmm_scratch);
- // Two least significant bits should be both set.
- __ not_(scratch);
- __ test(scratch, Immediate(3));
- __ j(not_zero, non_int32);
-}
-
-
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location) {
- Label load_smi_1, load_smi_2, done_load_1, done;
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, edx);
- } else {
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- }
- __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ bind(&done_load_1);
-
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, eax);
- } else {
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
- }
- __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi_1);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
- __ jmp(&done_load_1);
-
- __ bind(&load_smi_2);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ mov(Operand(esp, 0), scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-}
-
-
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float,
Register scratch) {
@@ -2470,7 +1045,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Save 1 in double_result - we need this several times later on.
__ mov(scratch, Immediate(1));
- __ cvtsi2sd(double_result, scratch);
+ __ Cvtsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
@@ -2485,12 +1060,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
factory->heap_number_map());
__ j(not_equal, &call_runtime);
- __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+ __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent, Label::kNear);
__ bind(&base_is_smi);
__ SmiUntag(base);
- __ cvtsi2sd(double_base, base);
+ __ Cvtsi2sd(double_base, base);
__ bind(&unpack_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -2501,7 +1076,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
factory->heap_number_map());
__ j(not_equal, &call_runtime);
- __ movdbl(double_exponent,
+ __ movsd(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) {
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -2509,7 +1084,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&int_exponent);
__ bind(&exponent_not_smi);
- __ movdbl(double_exponent,
+ __ movsd(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset));
}
@@ -2604,9 +1179,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ fnclex(); // Clear flags to catch exceptions later.
// Transfer (B)ase and (E)xponent onto the FPU register stack.
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), double_exponent);
+ __ movsd(Operand(esp, 0), double_exponent);
__ fld_d(Operand(esp, 0)); // E
- __ movdbl(Operand(esp, 0), double_base);
+ __ movsd(Operand(esp, 0), double_base);
__ fld_d(Operand(esp, 0)); // B, E
// Exponent is in st(1) and base is in st(0)
@@ -2629,7 +1204,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ test_b(eax, 0x5F); // We check for all but precision exception.
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(esp, 0));
- __ movdbl(double_result, Operand(esp, 0));
+ __ movsd(double_result, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
__ jmp(&done);
@@ -2683,7 +1258,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// and may not have contained the exponent value in the first place when the
// exponent is a smi. We reset it with exponent value before bailing out.
__ j(not_equal, &done);
- __ cvtsi2sd(double_exponent, exponent);
+ __ Cvtsi2sd(double_exponent, exponent);
// Returning or bailing out.
Counters* counters = masm->isolate()->counters();
@@ -2696,7 +1271,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// as heap number in exponent.
__ bind(&done);
__ AllocateHeapNumber(eax, scratch, base, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
__ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize);
} else {
@@ -2704,8 +1279,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(4, scratch);
- __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
- __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
+ __ movsd(Operand(esp, 0 * kDoubleSize), double_base);
+ __ movsd(Operand(esp, 1 * kDoubleSize), double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()), 4);
}
@@ -2713,7 +1288,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Store it into the (fixed) result register.
__ sub(esp, Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
- __ movdbl(double_result, Operand(esp, 0));
+ __ movsd(double_result, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
__ bind(&done);
@@ -2756,8 +1331,7 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &miss);
}
- StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
- support_wrapper_);
+ StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(
masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
@@ -3495,7 +2069,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ call(edx);
// Drop arguments and come back to JS mode.
- __ LeaveApiExitFrame();
+ __ LeaveApiExitFrame(true);
// Check the result.
Label success;
@@ -3768,106 +2342,6 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- __ sub(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label smi_hash_calculated;
- Label load_result_from_cache;
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(object, &not_smi, Label::kNear);
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- __ jmp(&smi_hash_calculated, Label::kNear);
- __ bind(&not_smi);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, not_found);
- STATIC_ASSERT(8 == kDoubleSize);
- __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- // Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- Register index = scratch;
- Register probe = mask;
- __ mov(probe,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope fscope(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- } else {
- __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
- __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
- __ FCmp();
- }
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache, Label::kNear);
-
- __ bind(&smi_hash_calculated);
- // Object is smi and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- // Check if the entry is the smi we are looking for.
- __ cmp(object,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ mov(result,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->number_to_string_native(), 1);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ mov(ebx, Operand(esp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@@ -4205,6 +2679,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // eax : number of arguments to the construct function
// ebx : cache cell for call target
// edi : the function to call
Isolate* isolate = masm->isolate();
@@ -4224,9 +2699,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the cell either some other function or an
// AllocationSite. Do a map check on the object in ecx.
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
__ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
__ j(not_equal, &miss);
@@ -4265,6 +2739,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Arguments register must be smi-tagged to call out.
__ SmiTag(eax);
__ push(eax);
__ push(edi);
@@ -4444,6 +2919,12 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ if (Serializer::enabled()) {
+ PlatformFeatureScope sse2(SSE2);
+ BinaryOpStub::GenerateAheadOfTime(isolate);
+ } else {
+ BinaryOpStub::GenerateAheadOfTime(isolate);
+ }
}
@@ -4508,6 +2989,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// stack alignment is known to be correct. This function takes one argument
// which is passed on the stack, and we know that the stack has been
// prepared to pass at least one argument.
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
__ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
__ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
}
@@ -5455,33 +3938,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Drop(2);
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm, ecx);
- // Build a frame
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ CallRuntime(Runtime::kStringAdd, 2);
- }
- __ ret(0);
- } else {
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
- }
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm, ecx);
- // Build a frame
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
- }
- __ ret(0);
- } else {
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
}
}
@@ -5517,12 +3978,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
// Check the number to string cache.
__ bind(&not_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- slow);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
__ mov(arg, scratch1);
__ mov(Operand(esp, stack_offset), arg);
__ bind(&done);
@@ -6253,24 +4709,24 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ jmp(&left, Label::kNear);
__ bind(&right_smi);
__ mov(ecx, eax); // Can't clobber eax because we can still jump away.
__ SmiUntag(ecx);
- __ cvtsi2sd(xmm1, ecx);
+ __ Cvtsi2sd(xmm1, ecx);
__ bind(&left);
__ JumpIfSmi(edx, &left_smi, Label::kNear);
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&left_smi);
__ mov(ecx, edx); // Can't clobber edx because we can still jump away.
__ SmiUntag(ecx);
- __ cvtsi2sd(xmm0, ecx);
+ __ Cvtsi2sd(xmm0, ecx);
__ bind(&done);
// Compare operands.
@@ -7300,9 +5756,8 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ inc(edx);
__ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
if (FLAG_debug_code) {
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
__ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
__ Assert(equal, kExpectedAllocationSiteInCell);
}
@@ -7447,8 +5902,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
__ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
- __ cmp(FieldOperand(edx, 0), Immediate(Handle<Map>(
- masm->isolate()->heap()->allocation_site_map())));
+ __ cmp(FieldOperand(edx, 0), Immediate(
+ masm->isolate()->factory()->allocation_site_map()));
__ j(not_equal, &no_info);
__ mov(edx, FieldOperand(edx, AllocationSite::kTransitionInfoOffset));
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index 5c8eca37b5..006651c9c8 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -217,30 +217,6 @@ class StringCompareStub: public PlatformCodeStub {
};
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
@@ -468,7 +444,7 @@ class RecordWriteStub: public PlatformCodeStub {
// Save all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
+ masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
}
}
}
@@ -480,7 +456,7 @@ class RecordWriteStub: public PlatformCodeStub {
// Restore all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
+ masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
}
masm->add(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 84a4d238bd..d09a85f8b1 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -117,7 +117,7 @@ UnaryMathFunction CreateExpFunction() {
CpuFeatureScope use_sse2(&masm, SSE2);
XMMRegister input = xmm1;
XMMRegister result = xmm2;
- __ movdbl(input, Operand(esp, 1 * kPointerSize));
+ __ movsd(input, Operand(esp, 1 * kPointerSize));
__ push(eax);
__ push(ebx);
@@ -125,7 +125,7 @@ UnaryMathFunction CreateExpFunction() {
__ pop(ebx);
__ pop(eax);
- __ movdbl(Operand(esp, 1 * kPointerSize), result);
+ __ movsd(Operand(esp, 1 * kPointerSize), result);
__ fld_d(Operand(esp, 1 * kPointerSize));
__ Ret();
}
@@ -155,9 +155,9 @@ UnaryMathFunction CreateSqrtFunction() {
// Move double input into registers.
{
CpuFeatureScope use_sse2(&masm, SSE2);
- __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
+ __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
__ sqrtsd(xmm0, xmm0);
- __ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
+ __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
// Load result into floating point register as return value.
__ fld_d(Operand(esp, 1 * kPointerSize));
__ Ret();
@@ -462,10 +462,10 @@ OS::MemMoveFunction CreateMemMoveFunction() {
Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
__ bind(&f9_16);
- __ movdbl(xmm0, Operand(src, 0));
- __ movdbl(xmm1, Operand(src, count, times_1, -8));
- __ movdbl(Operand(dst, 0), xmm0);
- __ movdbl(Operand(dst, count, times_1, -8), xmm1);
+ __ movsd(xmm0, Operand(src, 0));
+ __ movsd(xmm1, Operand(src, count, times_1, -8));
+ __ movsd(Operand(dst, 0), xmm0);
+ __ movsd(Operand(dst, count, times_1, -8), xmm1);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f17_32);
@@ -666,8 +666,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
- __ TestJSArrayForAllocationMemento(edx, edi);
- __ j(equal, allocation_memento_found);
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found);
}
// Set transitioned map.
@@ -694,8 +693,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Label loop, entry, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationMemento(edx, edi);
- __ j(equal, fail);
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -743,7 +741,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
XMMRegister the_hole_nan = xmm1;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(the_hole_nan,
+ __ movsd(the_hole_nan,
Operand::StaticVariable(canonical_the_hole_nan_reference));
}
__ jmp(&entry);
@@ -768,8 +766,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ SmiUntag(ebx);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope fscope(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+ __ Cvtsi2sd(xmm0, ebx);
+ __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
xmm0);
} else {
__ push(ebx);
@@ -789,7 +787,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+ __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
the_hole_nan);
} else {
__ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
@@ -833,8 +831,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label loop, entry, convert_hole, gc_required, only_change_map, success;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationMemento(edx, edi);
- __ j(equal, fail);
+ __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
}
// Check for empty arrays, which only require a map transition and no changes
@@ -899,9 +896,9 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// edx: new heap number
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope fscope(masm, SSE2);
- __ movdbl(xmm0,
+ __ movsd(xmm0,
FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
- __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
__ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
@@ -1081,20 +1078,20 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
Label done;
- __ movdbl(double_scratch, ExpConstant(0));
+ __ movsd(double_scratch, ExpConstant(0));
__ xorpd(result, result);
__ ucomisd(double_scratch, input);
__ j(above_equal, &done);
__ ucomisd(input, ExpConstant(1));
- __ movdbl(result, ExpConstant(2));
+ __ movsd(result, ExpConstant(2));
__ j(above_equal, &done);
- __ movdbl(double_scratch, ExpConstant(3));
- __ movdbl(result, ExpConstant(4));
+ __ movsd(double_scratch, ExpConstant(3));
+ __ movsd(result, ExpConstant(4));
__ mulsd(double_scratch, input);
__ addsd(double_scratch, result);
__ movd(temp2, double_scratch);
__ subsd(double_scratch, result);
- __ movdbl(result, ExpConstant(6));
+ __ movsd(result, ExpConstant(6));
__ mulsd(double_scratch, ExpConstant(5));
__ subsd(double_scratch, input);
__ subsd(result, double_scratch);
@@ -1111,7 +1108,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ shl(temp1, 20);
__ movd(input, temp1);
__ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
- __ movdbl(double_scratch, Operand::StaticArray(
+ __ movsd(double_scratch, Operand::StaticArray(
temp2, times_8, ExternalReference::math_exp_log_table()));
__ por(input, double_scratch);
__ mulsd(result, input);
@@ -1120,7 +1117,6 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
-static const int kNoCodeAgeSequenceLength = 5;
static byte* GetNoCodeAgeSequence(uint32_t* length) {
static bool initialized = false;
@@ -1153,7 +1149,7 @@ bool Code::IsYoungSequence(byte* sequence) {
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
- *age = kNoAge;
+ *age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
sequence++; // Skip the kCallOpcode byte
@@ -1165,16 +1161,17 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
}
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
+ if (age == kNoAgeCodeAge) {
CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length);
patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
}
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 13a70afe52..e339b3ad11 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -177,87 +177,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x11;
-static const byte kCallInstruction = 0xe8;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-
-// The back edge bookkeeping code matches the pattern:
-//
-// sub <profiling_counter>, <delta>
-// jns ok
-// call <interrupt stub>
-// ok:
-//
-// The patched back edge looks like this:
-//
-// sub <profiling_counter>, <delta> ;; Not changed
-// nop
-// nop
-// call <on-stack replacment>
-// ok:
-
-void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code) {
- // Turn the jump into nops.
- Address call_target_address = pc_after - kIntSize;
- *(call_target_address - 3) = kNopByteOne;
- *(call_target_address - 2) = kNopByteTwo;
- // Replace the call address.
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, replacement_code);
-}
-
-
-void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code) {
- // Restore the original jump.
- Address call_target_address = pc_after - kIntSize;
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- // Restore the original call address.
- Assembler::set_target_address_at(call_target_address,
- interrupt_code->entry());
-
- interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, interrupt_code);
-}
-
-
-#ifdef DEBUG
-Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- Code* osr_builtin =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- ASSERT_EQ(osr_builtin->entry(),
- Assembler::target_address_at(call_target_address));
- return PATCHED_FOR_OSR;
- } else {
- // Get the interrupt stub code object to match against from cache.
- Code* interrupt_builtin =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
- ASSERT_EQ(interrupt_builtin->entry(),
- Assembler::target_address_at(call_target_address));
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return NOT_PATCHED;
- }
-}
-#endif // DEBUG
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
@@ -283,16 +202,14 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
intptr_t handler =
reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
+ int params = descriptor->environment_length();
output_frame->SetRegister(eax.code(), params);
output_frame->SetRegister(ebx.code(), handler);
}
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+ if (!CpuFeatures::IsSupported(SSE2)) return;
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
@@ -330,7 +247,7 @@ void Deoptimizer::EntryGenerator::Generate() {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
- __ movdbl(Operand(esp, offset), xmm_reg);
+ __ movsd(Operand(esp, offset), xmm_reg);
}
}
@@ -382,8 +299,8 @@ void Deoptimizer::EntryGenerator::Generate() {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize;
- __ movdbl(xmm0, Operand(esp, src_offset));
- __ movdbl(Operand(ebx, dst_offset), xmm0);
+ __ movsd(xmm0, Operand(esp, src_offset));
+ __ movsd(Operand(ebx, dst_offset), xmm0);
}
}
@@ -468,7 +385,7 @@ void Deoptimizer::EntryGenerator::Generate() {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
- __ movdbl(xmm_reg, Operand(ebx, src_offset));
+ __ movsd(xmm_reg, Operand(ebx, src_offset));
}
}
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 01fa999645..13cf6bc49a 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -942,13 +942,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case SHORT_IMMEDIATE_INSTR: {
byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
- AppendToBuffer("%s eax, %s", idesc.mnem, NameOfAddress(addr));
+ AppendToBuffer("%s eax,%s", idesc.mnem, NameOfAddress(addr));
data += 5;
break;
}
case BYTE_IMMEDIATE_INSTR: {
- AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]);
+ AppendToBuffer("%s al,0x%x", idesc.mnem, data[1]);
data += 2;
break;
}
@@ -1042,6 +1042,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (f0byte == 0x54) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("andps %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (f0byte == 0x57) {
data += 2;
int mod, regop, rm;
@@ -1239,8 +1247,8 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("extractps %s,%s,%d",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm),
+ NameOfCPURegister(rm),
+ NameOfXMMRegister(regop),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0x22) {
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 6d39cc1e6e..704fb4e7d2 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -158,10 +158,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS Function.
+ __ Prologue(BUILD_FUNCTION_FRAME);
info->AddNoFrameRange(0, masm_->pc_offset());
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -1586,21 +1583,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_properties));
- __ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_properties));
__ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
@@ -3316,7 +3307,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
__ cvtss2sd(xmm1, xmm1);
__ xorps(xmm0, xmm1);
__ subsd(xmm0, xmm1);
- __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
} else {
// 0x4130000000000000 is 1.0 x 2^20 as a double.
__ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
@@ -3555,8 +3546,8 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into eax and call the stub.
+ VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
@@ -4897,6 +4888,79 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
+
+static const byte kJnsInstruction = 0x79;
+static const byte kJnsOffset = 0x11;
+static const byte kCallInstruction = 0xe8;
+static const byte kNopByteOne = 0x66;
+static const byte kNopByteTwo = 0x90;
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ Address jns_offset_address = call_target_address - 2;
+
+ switch (target_state) {
+ case INTERRUPT:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // jns ok
+ // call <interrupt stub>
+ // ok:
+ *jns_instr_address = kJnsInstruction;
+ *jns_offset_address = kJnsOffset;
+ break;
+ case ON_STACK_REPLACEMENT:
+ case OSR_AFTER_STACK_CHECK:
+ // sub <profiling_counter>, <delta> ;; Not changed
+ // nop
+ // nop
+ // call <on-stack replacment>
+ // ok:
+ *jns_instr_address = kNopByteOne;
+ *jns_offset_address = kNopByteTwo;
+ break;
+ }
+
+ Assembler::set_target_address_at(call_target_address,
+ replacement_code->entry());
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ Address call_target_address = pc - kIntSize;
+ Address jns_instr_address = call_target_address - 3;
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+
+ if (*jns_instr_address == kJnsInstruction) {
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
+ Assembler::target_address_at(call_target_address));
+ return INTERRUPT;
+ }
+
+ ASSERT_EQ(kNopByteOne, *jns_instr_address);
+ ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+
+ if (Assembler::target_address_at(call_target_address) ==
+ isolate->builtins()->OnStackReplacement()->entry()) {
+ return ON_STACK_REPLACEMENT;
+ }
+
+ ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
+ Assembler::target_address_at(call_target_address));
+ return OSR_AFTER_STACK_CHECK;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 327ac57623..f8e4ea53d0 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -1304,7 +1304,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, Code::kNoExtraICState,
+ Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, eax);
@@ -1423,7 +1423,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
Code::Flags flags = Code::ComputeFlags(
- Code::STUB, MONOMORPHIC, strict_mode,
+ Code::HANDLER, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, edx, ecx, ebx, no_reg);
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index d50b780d71..46c87e1d62 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -120,24 +120,6 @@ void LCodeGen::Abort(BailoutReason reason) {
}
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
#ifdef _MSC_VER
void LCodeGen::MakeSureStackPagesMapped(int offset) {
const int kPageSize = 4 * KB;
@@ -206,15 +188,8 @@ bool LCodeGen::GeneratePrologue() {
if (NeedsEagerFrame()) {
ASSERT(!frame_is_built_);
frame_is_built_ = true;
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
+ __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
info()->AddNoFrameRange(0, masm_->pc_offset());
- __ push(esi); // Callee's context.
- if (info()->IsStub()) {
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- } else {
- __ push(edi); // Callee's JS function.
- }
}
if (info()->IsOptimizing() &&
@@ -275,7 +250,7 @@ bool LCodeGen::GeneratePrologue() {
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
- __ movdbl(MemOperand(esp, count * kDoubleSize),
+ __ movsd(MemOperand(esp, count * kDoubleSize),
XMMRegister::FromAllocationIndex(save_iterator.Current()));
save_iterator.Advance();
count++;
@@ -340,12 +315,41 @@ void LCodeGen::GenerateOsrPrologue() {
osr_pc_offset_ = masm()->pc_offset();
+ // Move state of dynamic frame alignment into edx.
+ __ mov(edx, Immediate(kNoAlignmentPadding));
+
+ if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
+ Label do_not_pad, align_loop;
+ // Align ebp + 4 to a multiple of 2 * kPointerSize.
+ __ test(ebp, Immediate(kPointerSize));
+ __ j(zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+
+ // Move all parts of the frame over one word. The frame consists of:
+ // unoptimized frame slots, alignment state, context, frame pointer, return
+ // address, receiver, and the arguments.
+ __ mov(ecx, Immediate(scope()->num_parameters() +
+ 5 + graph()->osr()->UnoptimizedFrameSlots()));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ sub(Operand(ebp), Immediate(kPointerSize));
+ __ bind(&do_not_pad);
+ }
+
// Save the first local, which is overwritten by the alignment state.
Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
__ push(alignment_loc);
- // Set the dynamic frame alignment state to "not aligned".
- __ mov(alignment_loc, Immediate(kNoAlignmentPadding));
+ // Set the dynamic frame alignment state.
+ __ mov(alignment_loc, edx);
// Adjust the frame size, subsuming the unoptimized frame into the
// optimized frame.
@@ -355,44 +359,27 @@ void LCodeGen::GenerateOsrPrologue() {
}
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
-
- // Don't emit code for basic blocks with a replacement.
- if (instr->IsLabel()) {
- emit_instructions = !LLabel::cast(instr)->HasReplacement();
- }
- if (!emit_instructions) continue;
-
- if (FLAG_code_comments && instr->HasInterestingComment(this)) {
- Comment(";;; <@%d,#%d> %s",
- current_instruction_,
- instr->hydrogen_value()->id(),
- instr->Mnemonic());
- }
-
- if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
-
- RecordAndUpdatePosition(instr->position());
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
+}
- instr->CompileToNative(this);
- if (!CpuFeatures::IsSupported(SSE2)) {
- if (instr->IsGoto()) {
- x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
- } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
- !instr->IsGap() && !instr->IsReturn()) {
- __ VerifyX87StackDepth(x87_stack_.depth());
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ if (instr->IsGoto()) {
+ x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
+ } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
+ !instr->IsGap() && !instr->IsReturn()) {
+ if (instr->ClobbersDoubleRegisters()) {
+ if (instr->HasDoubleRegisterResult()) {
+ ASSERT_EQ(1, x87_stack_.depth());
+ } else {
+ ASSERT_EQ(0, x87_stack_.depth());
+ }
}
+ __ VerifyX87StackDepth(x87_stack_.depth());
}
}
- EnsureSpaceForLazyDeopt();
- return !is_aborted();
}
@@ -453,8 +440,9 @@ bool LCodeGen::GenerateDeferredCode() {
X87Stack copy(code->x87_stack());
x87_stack_ = copy;
- int pos = instructions_->at(code->instruction_index())->position();
- RecordAndUpdatePosition(pos);
+ HValue* value =
+ instructions_->at(code->instruction_index())->hydrogen_value();
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -532,6 +520,16 @@ void LCodeGen::X87LoadForUsage(X87Register reg) {
}
+void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
+ ASSERT(x87_stack_.Contains(reg1));
+ ASSERT(x87_stack_.Contains(reg2));
+ x87_stack_.Fxch(reg1, 1);
+ x87_stack_.Fxch(reg2);
+ x87_stack_.pop();
+ x87_stack_.pop();
+}
+
+
void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
ASSERT(is_mutable_);
ASSERT(Contains(reg) && stack_depth_ > other_slot);
@@ -931,8 +929,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode) {
ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
__ call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
@@ -954,13 +950,12 @@ void LCodeGen::CallCode(Handle<Code> code,
void LCodeGen::CallRuntime(const Runtime::Function* fun,
int argc,
- LInstruction* instr) {
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles) {
ASSERT(instr != NULL);
ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ CallRuntime(fun, argc);
+ __ CallRuntime(fun, argc, save_doubles);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
@@ -1122,26 +1117,31 @@ void LCodeGen::DeoptimizeIf(Condition cc,
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
+ ZoneList<Handle<JSObject> > objects(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
+ if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
+ if (it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
maps.Add(map, zone());
+ } else if (it.rinfo()->target_object()->IsJSObject()) {
+ Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
+ objects.Add(object, zone());
}
}
}
#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
+ // This disables verification of weak embedded objects after full GC.
// AddDependentCode can cause a GC, which would observe the state where
// this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+ NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
#endif
for (int i = 0; i < maps.length(); i++) {
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
}
+ for (int i = 0; i < objects.length(); i++) {
+ AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
+ }
}
@@ -1246,7 +1246,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
+ LPointerMap empty_pointers(zone());
RecordSafepoint(&empty_pointers, mode);
}
@@ -1258,17 +1258,10 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
-void LCodeGen::RecordPosition(int position) {
+void LCodeGen::RecordAndWritePosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::RecordAndUpdatePosition(int position) {
- if (position >= 0 && position != old_position_) {
- masm()->positions_recorder()->RecordPosition(position);
- old_position_ = position;
- }
+ masm()->positions_recorder()->WriteRecordedPositions();
}
@@ -1336,11 +1329,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1733,9 +1721,9 @@ void LCodeGen::DoMulI(LMulI* instr) {
case 9:
__ lea(left, Operand(left, left, times_8, 0));
break;
- case 16:
- __ shl(left, 4);
- break;
+ case 16:
+ __ shl(left, 4);
+ break;
default:
__ imul(left, left, constant);
break;
@@ -1967,9 +1955,10 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
__ movd(res, Operand(temp));
__ psllq(res, 32);
if (lower != 0) {
+ XMMRegister xmm_scratch = double_scratch0();
__ Set(temp, Immediate(lower));
- __ movd(xmm0, Operand(temp));
- __ por(res, xmm0);
+ __ movd(xmm_scratch, Operand(temp));
+ __ por(res, xmm_scratch);
}
}
}
@@ -2178,7 +2167,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
@@ -2208,8 +2197,6 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
XMMRegister left = ToDoubleRegister(instr->left());
XMMRegister right = ToDoubleRegister(instr->right());
XMMRegister result = ToDoubleRegister(instr->result());
- // Modulo uses a fixed result register.
- ASSERT(instr->op() == Token::MOD || left.is(result));
switch (instr->op()) {
case Token::ADD:
__ addsd(left, right);
@@ -2229,17 +2216,17 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
case Token::MOD: {
// Pass two doubles as arguments on the stack.
__ PrepareCallCFunction(4, eax);
- __ movdbl(Operand(esp, 0 * kDoubleSize), left);
- __ movdbl(Operand(esp, 1 * kDoubleSize), right);
+ __ movsd(Operand(esp, 0 * kDoubleSize), left);
+ __ movsd(Operand(esp, 1 * kDoubleSize), right);
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()),
4);
// Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
+ // Store it into the result register.
__ sub(Operand(esp), Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
- __ movdbl(result, Operand(esp, 0));
+ __ movsd(result, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
break;
}
@@ -2272,6 +2259,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ PrepareCallCFunction(4, eax);
X87Mov(Operand(esp, 1 * kDoubleSize), right);
X87Mov(Operand(esp, 0), left);
+ X87Free(right);
+ ASSERT(left.is(result));
X87PrepareToWrite(result);
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()),
@@ -2301,14 +2290,6 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
}
-int LCodeGen::GetNextEmittedBlock() const {
- for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
- if (!chunk_->GetLabel(i)->HasReplacement()) return i;
- }
- return -1;
-}
-
-
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
int left_block = instr->TrueDestination(chunk_);
@@ -2340,25 +2321,6 @@ void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
}
-void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsSmiOrInteger32() || r.IsDouble()) {
- EmitBranch(instr, no_condition);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsTaggedNumber()) {
- EmitBranch(instr, no_condition);
- }
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- EmitBranch(instr, equal);
- }
-}
-
-
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsSmiOrInteger32()) {
@@ -2369,8 +2331,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
ASSERT(!info()->IsStub());
CpuFeatureScope scope(masm(), SSE2);
XMMRegister reg = ToDoubleRegister(instr->value());
- __ xorps(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
} else {
ASSERT(r.IsTagged());
@@ -2390,8 +2353,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (type.IsHeapNumber()) {
ASSERT(!info()->IsStub());
CpuFeatureScope scope(masm(), SSE2);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
@@ -2476,8 +2440,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ j(not_equal, &not_heap_number, Label::kNear);
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
} else {
__ fldz();
__ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
@@ -2521,6 +2486,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
case Token::EQ_STRICT:
cond = equal;
break;
+ case Token::NE:
+ case Token::NE_STRICT:
+ cond = not_equal;
+ break;
case Token::LT:
cond = is_unsigned ? below : less;
break;
@@ -2556,10 +2525,15 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
EmitGoto(next_block);
} else {
if (instr->is_double()) {
- CpuFeatureScope scope(masm(), SSE2);
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ } else {
+ X87LoadForUsage(ToX87Register(right), ToX87Register(left));
+ __ FCmp();
+ }
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
__ j(parity_even, instr->FalseLabel(chunk_));
} else {
if (right->IsConstantOperand()) {
@@ -2626,7 +2600,7 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->object());
- __ movdbl(MemOperand(esp, 0), input_reg);
+ __ movsd(MemOperand(esp, 0), input_reg);
} else {
__ fstp_d(MemOperand(esp, 0));
}
@@ -3016,14 +2990,6 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
}
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceSizeOffset));
-}
-
-
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
@@ -3096,7 +3062,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()),
+ __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(esp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -3131,7 +3097,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ mov(result, Operand::ForCell(instr->hydrogen()->cell()));
+ __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
@@ -3154,7 +3120,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->value());
- Handle<PropertyCell> cell_handle = instr->hydrogen()->cell();
+ Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -3245,12 +3211,15 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (access.IsExternalMemory()) {
Register result = ToRegister(instr->result());
- if (instr->object()->IsConstantOperand()) {
- ExternalReference external_reference = ToExternalReference(
- LConstantOperand::cast(instr->object()));
- __ mov(result, MemOperand::StaticVariable(external_reference));
+ MemOperand operand = instr->object()->IsConstantOperand()
+ ? MemOperand::StaticVariable(ToExternalReference(
+ LConstantOperand::cast(instr->object())))
+ : MemOperand(ToRegister(instr->object()), offset);
+ if (access.representation().IsByte()) {
+ ASSERT(instr->hydrogen()->representation().IsInteger32());
+ __ movzx_b(result, operand);
} else {
- __ mov(result, MemOperand(ToRegister(instr->object()), offset));
+ __ mov(result, operand);
}
return;
}
@@ -3261,7 +3230,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result = ToDoubleRegister(instr->result());
- __ movdbl(result, FieldOperand(object, offset));
+ __ movsd(result, FieldOperand(object, offset));
} else {
X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
}
@@ -3269,11 +3238,15 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
Register result = ToRegister(instr->result());
- if (access.IsInobject()) {
- __ mov(result, FieldOperand(object, offset));
- } else {
+ if (!access.IsInobject()) {
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(result, FieldOperand(result, offset));
+ object = result;
+ }
+ if (access.representation().IsByte()) {
+ ASSERT(instr->hydrogen()->representation().IsInteger32());
+ __ movzx_b(result, FieldOperand(object, offset));
+ } else {
+ __ mov(result, FieldOperand(object, offset));
}
}
@@ -3349,6 +3322,12 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
@@ -3405,7 +3384,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ movdbl(ToDoubleRegister(instr->result()), operand);
+ __ movsd(ToDoubleRegister(instr->result()), operand);
} else {
X87Mov(ToX87Register(instr->result()), operand);
}
@@ -3476,7 +3455,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result = ToDoubleRegister(instr->result());
- __ movdbl(result, double_load_operand);
+ __ movsd(result, double_load_operand);
} else {
X87Mov(ToX87Register(instr->result()), double_load_operand);
}
@@ -3693,7 +3672,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&invoke);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
@@ -3778,9 +3756,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
if (can_invoke_directly) {
if (edi_state == EDI_UNINITIALIZED) {
__ LoadHeapObject(edi, function);
@@ -3805,6 +3780,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
} else {
// We need to adapt arguments.
+ LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
@@ -3903,11 +3879,11 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
CpuFeatureScope scope(masm(), SSE2);
if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
+ XMMRegister scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
- __ pand(input_reg, scratch);
+ __ andps(input_reg, scratch);
} else if (r.IsSmiOrInteger32()) {
EmitIntegerMathAbs(instr);
} else { // Tagged case.
@@ -3924,7 +3900,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathFloor(LMathFloor* instr) {
CpuFeatureScope scope(masm(), SSE2);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3977,7 +3953,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ bind(&negative_sign);
// Truncate, then compare and compensate.
__ cvttsd2si(output_reg, Operand(input_reg));
- __ cvtsi2sd(xmm_scratch, output_reg);
+ __ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ sub(output_reg, Immediate(1));
@@ -3992,14 +3968,14 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
CpuFeatureScope scope(masm(), SSE2);
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_temp = ToDoubleRegister(instr->temp());
ExternalReference one_half = ExternalReference::address_of_one_half();
ExternalReference minus_one_half =
ExternalReference::address_of_minus_one_half();
Label done, round_to_zero, below_one_half, do_not_compensate;
- __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
+ __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
__ ucomisd(xmm_scratch, input_reg);
__ j(above, &below_one_half);
@@ -4013,7 +3989,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ jmp(&done);
__ bind(&below_one_half);
- __ movdbl(xmm_scratch, Operand::StaticVariable(minus_one_half));
+ __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
__ ucomisd(xmm_scratch, input_reg);
__ j(below_equal, &round_to_zero);
@@ -4027,7 +4003,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
- __ cvtsi2sd(xmm_scratch, output_reg);
+ __ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
__ j(equal, &done);
__ sub(output_reg, Immediate(1));
@@ -4059,7 +4035,7 @@ void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
CpuFeatureScope scope(masm(), SSE2);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = ToRegister(instr->temp());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
@@ -4178,8 +4154,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
XMMRegister result = ToDoubleRegister(instr->result());
- // We use xmm0 as fixed scratch register here.
- XMMRegister scratch4 = xmm0;
+ XMMRegister scratch4 = double_scratch0();
__ mov(scratch3, Immediate(0x49800000)); // 1.0 x 2^20 as single.
__ movd(scratch4, scratch3);
__ movd(result, random);
@@ -4193,29 +4168,29 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
CpuFeatureScope scope(masm(), SSE2);
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
+ XMMRegister xmm_scratch = double_scratch0();
Label positive, done, zero;
- __ xorps(xmm0, xmm0);
- __ ucomisd(input_reg, xmm0);
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
__ j(equal, &zero, Label::kNear);
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(input_reg, Operand::StaticVariable(nan));
+ __ movsd(input_reg, Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
__ bind(&zero);
- __ push(Immediate(0xFFF00000));
- __ push(Immediate(0));
- __ movdbl(input_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
+ ExternalReference ninf =
+ ExternalReference::address_of_negative_infinity();
+ __ movsd(input_reg, Operand::StaticVariable(ninf));
__ jmp(&done, Label::kNear);
__ bind(&positive);
__ fldln2();
__ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), input_reg);
+ __ movsd(Operand(esp, 0), input_reg);
__ fld_d(Operand(esp, 0));
__ fyl2x();
__ fstp_d(Operand(esp, 0));
- __ movdbl(input_reg, Operand(esp, 0));
+ __ movsd(input_reg, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
__ bind(&done);
}
@@ -4225,10 +4200,11 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister temp0 = double_scratch0();
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
- MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
+ MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
}
@@ -4273,7 +4249,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
Handle<JSFunction> known_function = instr->hydrogen()->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
@@ -4409,7 +4384,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
+ CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
}
@@ -4441,11 +4416,16 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
ToExternalReference(LConstantOperand::cast(instr->object())))
: MemOperand(ToRegister(instr->object()), offset);
if (instr->value()->IsConstantOperand()) {
+ ASSERT(!representation.IsByte());
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
__ mov(operand, Immediate(ToInteger32(operand_value)));
} else {
Register value = ToRegister(instr->value());
- __ mov(operand, value);
+ if (representation.IsByte()) {
+ __ mov_b(operand, value);
+ } else {
+ __ mov(operand, value);
+ }
}
return;
}
@@ -4480,7 +4460,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister value = ToDoubleRegister(instr->value());
- __ movdbl(FieldOperand(object, offset), value);
+ __ movsd(FieldOperand(object, offset), value);
} else {
X87Register value = ToX87Register(instr->value());
X87Mov(FieldOperand(object, offset), value);
@@ -4518,17 +4498,28 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
}
+ MemOperand operand = FieldOperand(write_register, offset);
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
if (operand_value->IsRegister()) {
- __ mov(FieldOperand(write_register, offset), ToRegister(operand_value));
+ Register value = ToRegister(operand_value);
+ if (representation.IsByte()) {
+ __ mov_b(operand, value);
+ } else {
+ __ mov(operand, value);
+ }
} else {
Handle<Object> handle_value = ToHandle(operand_value);
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- __ mov(FieldOperand(write_register, offset), handle_value);
+ __ mov(operand, handle_value);
}
} else {
- __ mov(FieldOperand(write_register, offset), ToRegister(instr->value()));
+ Register value = ToRegister(instr->value());
+ if (representation.IsByte()) {
+ __ mov_b(operand, value);
+ } else {
+ __ mov(operand, value);
+ }
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
@@ -4609,8 +4600,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
- __ movss(operand, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
+ __ movss(operand, xmm_scratch);
} else {
__ fld(0);
__ fstp_s(operand);
@@ -4618,7 +4610,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ movdbl(operand, ToDoubleRegister(instr->value()));
+ __ movsd(operand, ToDoubleRegister(instr->value()));
} else {
X87Mov(operand, ToX87Register(instr->value()));
}
@@ -4676,11 +4668,11 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ ucomisd(value, value);
__ j(parity_odd, &have_value); // NaN.
- __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
+ __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
__ bind(&have_value);
}
- __ movdbl(double_store_operand, value);
+ __ movsd(double_store_operand, value);
} else {
// Can't use SSE2 in the serializer
if (instr->hydrogen()->IsConstantHoleStore()) {
@@ -4803,8 +4795,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationMemento(object, temp);
+ Label no_memento_found;
+ __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(equal, instr->environment());
+ __ bind(&no_memento_found);
}
@@ -4825,9 +4819,8 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ j(not_equal, &not_applicable, branch_distance);
if (is_simple_map_transition) {
Register new_map_reg = ToRegister(instr->new_map_temp());
- Handle<Map> map = instr->hydrogen()->transitioned_map();
__ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
- Immediate(map));
+ Immediate(to_map));
// Write barrier.
ASSERT_NE(instr->temp(), NULL);
__ RecordWriteForMap(object_reg, to_map, new_map_reg,
@@ -4978,7 +4971,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
ASSERT(output->IsDoubleRegister());
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+ __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
} else if (input->IsRegister()) {
Register input_reg = ToRegister(input);
__ push(input_reg);
@@ -5001,14 +4994,32 @@ void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- CpuFeatureScope scope(masm(), SSE2);
LOperand* input = instr->value();
LOperand* output = instr->result();
- LOperand* temp = instr->temp();
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ LOperand* temp = instr->temp();
- __ LoadUint32(ToDoubleRegister(output),
- ToRegister(input),
- ToDoubleRegister(temp));
+ __ LoadUint32(ToDoubleRegister(output),
+ ToRegister(input),
+ ToDoubleRegister(temp));
+ } else {
+ X87Register res = ToX87Register(output);
+ X87PrepareToWrite(res);
+ __ LoadUint32NoSSE2(ToRegister(input));
+ X87CommitWrite(res);
+ }
+}
+
+
+void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
+ Register input = ToRegister(instr->value());
+ if (!instr->hydrogen()->value()->HasRange() ||
+ !instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ __ test(input, Immediate(0xc0000000));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ SmiTag(input);
}
@@ -5073,6 +5084,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
Label slow;
Register reg = ToRegister(value);
Register tmp = reg.is(eax) ? ecx : eax;
+ XMMRegister xmm_scratch = double_scratch0();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
@@ -5087,7 +5099,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ xor_(reg, 0x80000000);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ cvtsi2sd(xmm0, Operand(reg));
+ __ Cvtsi2sd(xmm_scratch, Operand(reg));
} else {
__ push(reg);
__ fild_s(Operand(esp, 0));
@@ -5096,7 +5108,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
} else {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ LoadUint32(xmm0, reg,
+ __ LoadUint32(xmm_scratch, reg,
ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
} else {
// There's no fild variant for unsigned values, so zero-extend to a 64-bit
@@ -5132,12 +5144,12 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
if (!reg.is(eax)) __ mov(reg, eax);
- // Done. Put the value in xmm0 into the value of the allocated heap
+ // Done. Put the value in xmm_scratch into the value of the allocated heap
// number.
__ bind(&done);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
@@ -5181,7 +5193,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
@@ -5308,7 +5320,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
- Label load_smi, done;
+ Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
@@ -5317,28 +5329,17 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(not_equal, env);
+ if (can_convert_undefined_to_nan) {
+ __ j(not_equal, &convert, Label::kNear);
} else {
- Label heap_number, convert;
- __ j(equal, &heap_number, Label::kNear);
-
- // Convert undefined (and hole) to NaN.
- __ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, env);
-
- __ bind(&convert);
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(result_reg, Operand::StaticVariable(nan));
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
}
+
// Heap number to XMM conversion.
- __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+
if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(result_reg, xmm_scratch);
__ j(not_zero, &done, Label::kNear);
@@ -5347,6 +5348,19 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DeoptimizeIf(not_zero, env);
}
__ jmp(&done, Label::kNear);
+
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+
+ // Convert undefined (and hole) to NaN.
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, env);
+
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ movsd(result_reg, Operand::StaticVariable(nan));
+ __ jmp(&done, Label::kNear);
+ }
} else {
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
@@ -5356,7 +5370,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// input register since we avoid dependencies.
__ mov(temp_reg, input_reg);
__ SmiUntag(temp_reg); // Untag smi before converting to float.
- __ cvtsi2sd(result_reg, Operand(temp_reg));
+ __ Cvtsi2sd(result_reg, Operand(temp_reg));
__ bind(&done);
}
@@ -5364,25 +5378,36 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
Register input_reg = ToRegister(instr->value());
-
if (instr->truncating()) {
- Label heap_number, slow_case;
+ Label no_heap_number, check_bools, check_false;
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
+ __ j(not_equal, &no_heap_number, Label::kNear);
+ __ TruncateHeapNumberToI(input_reg, input_reg);
+ __ jmp(done);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
+ __ bind(&no_heap_number);
+ // Check for Oddballs. Undefined/False is converted to zero and True to one
+ // for truncating conversions.
__ cmp(input_reg, factory()->undefined_value());
+ __ j(not_equal, &check_bools, Label::kNear);
+ __ Set(input_reg, Immediate(0));
+ __ jmp(done);
+
+ __ bind(&check_bools);
+ __ cmp(input_reg, factory()->true_value());
+ __ j(not_equal, &check_false, Label::kNear);
+ __ Set(input_reg, Immediate(1));
+ __ jmp(done);
+
+ __ bind(&check_false);
+ __ cmp(input_reg, factory()->false_value());
__ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
- __ mov(input_reg, 0);
+ __ Set(input_reg, Immediate(0));
__ jmp(done);
-
- __ bind(&heap_number);
- __ TruncateHeapNumberToI(input_reg, input_reg);
} else {
Label bailout;
XMMRegister scratch = (instr->temp() != NULL)
@@ -5417,12 +5442,16 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(input);
ASSERT(input_reg.is(ToRegister(instr->result())));
- DeferredTaggedToI* deferred =
- new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred =
+ new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiUntag(input_reg);
- __ bind(deferred->exit());
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+ }
}
@@ -5487,7 +5516,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
} else {
X87Register input_reg = ToX87Register(input);
@@ -5514,7 +5544,8 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
} else {
X87Register input_reg = ToX87Register(input);
@@ -5594,7 +5625,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckValue(LCheckValue* instr) {
- Handle<HeapObject> object = instr->hydrogen()->object();
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
if (instr->hydrogen()->object_in_new_space()) {
Register reg = ToRegister(instr->value());
Handle<Cell> cell = isolate()->factory()->NewCell(object);
@@ -5649,22 +5680,21 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- SmallMapList* map_set = instr->hydrogen()->map_set();
-
DeferredCheckMaps* deferred = NULL;
if (instr->hydrogen()->has_migration_target()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
__ bind(deferred->check_maps());
}
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
__ CompareMap(reg, map, &success);
__ j(equal, &success);
}
- Handle<Map> map = map_set->last();
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
__ CompareMap(reg, map, &success);
if (instr->hydrogen()->has_migration_target()) {
__ j(not_equal, deferred->entry());
@@ -5679,8 +5709,9 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+ XMMRegister xmm_scratch = double_scratch0();
Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
+ __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
}
@@ -5696,6 +5727,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
+ XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
+ XMMRegister xmm_scratch = double_scratch0();
Label is_smi, done, heap_number;
__ JumpIfSmi(input_reg, &is_smi);
@@ -5714,8 +5747,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number
__ bind(&heap_number);
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
+ __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
// smi
@@ -6146,14 +6179,13 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
}
-void LCodeGen::EnsureSpaceForLazyDeopt() {
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (!info()->IsStub()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
__ Nop(padding_size);
}
}
@@ -6162,7 +6194,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -6233,7 +6265,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -6246,7 +6278,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(below, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 769917f7e2..78bc69de91 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -33,6 +33,7 @@
#include "checks.h"
#include "deoptimizer.h"
#include "ia32/lithium-gap-resolver-ia32.h"
+#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "v8utils.h"
@@ -45,45 +46,28 @@ class LDeferredCode;
class LGapNode;
class SafepointGenerator;
-class LCodeGen V8_FINAL BASE_EMBEDDED {
+class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
+ : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
- status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
dynamic_frame_alignment_(false),
support_aligned_spilled_doubles_(false),
osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
frame_is_built_(false),
x87_stack_(assembler),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- old_position_(RelocInfo::kNoPosition) {
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
@@ -129,12 +113,17 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
X87Register left, X87Register right, X87Register result);
void X87LoadForUsage(X87Register reg);
+ void X87LoadForUsage(X87Register reg1, X87Register reg2);
void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); }
void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); }
void X87Fxch(X87Register reg, int other_slot = 0) {
x87_stack_.Fxch(reg, other_slot);
}
+ void X87Free(X87Register reg) {
+ x87_stack_.Free(reg);
+ }
+
bool X87StackEmpty() {
return x87_stack_.depth() == 0;
@@ -188,27 +177,13 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
#undef DECLARE_DO
private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk()->graph(); }
- int GetNextEmittedBlock() const;
+ XMMRegister double_scratch0() const { return xmm0; }
void EmitClassOfTest(Label* if_true,
Label* if_false,
@@ -220,14 +195,14 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void Abort(BailoutReason reason);
- void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
bool GeneratePrologue();
- bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateJumpTable();
bool GenerateSafepointTable();
@@ -251,7 +226,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void CallRuntime(const Runtime::Function* fun,
int argc,
- LInstruction* instr);
+ LInstruction* instr,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int argc,
@@ -331,9 +307,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordPosition(int position);
- void RecordAndUpdatePosition(int position);
+ void RecordAndWritePosition(int position) V8_OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
@@ -395,7 +370,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
int* offset,
AllocationSiteMode mode);
- void EnsureSpaceForLazyDeopt();
+ void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -425,26 +400,16 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
void MakeSureStackPagesMapped(int offset);
#endif
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
- Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
bool dynamic_frame_alignment_;
bool support_aligned_spilled_doubles_;
int osr_pc_offset_;
- int last_lazy_deopt_pc_;
bool frame_is_built_;
class X87Stack {
@@ -505,8 +470,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- int old_position_;
-
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index b5bc18bdc9..2b2126af9d 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -326,7 +326,7 @@ void LGapResolver::EmitMove(int index) {
} else {
__ push(Immediate(upper));
__ push(Immediate(lower));
- __ movdbl(dst, Operand(esp, 0));
+ __ movsd(dst, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
} else {
@@ -360,7 +360,7 @@ void LGapResolver::EmitMove(int index) {
} else {
ASSERT(destination->IsDoubleStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ movdbl(dst, src);
+ __ movsd(dst, src);
}
} else {
// load from the register onto the stack, store in destination, which must
@@ -378,12 +378,12 @@ void LGapResolver::EmitMove(int index) {
Operand src = cgen_->ToOperand(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movdbl(dst, src);
+ __ movsd(dst, src);
} else {
// We rely on having xmm0 available as a fixed scratch register.
Operand dst = cgen_->ToOperand(destination);
- __ movdbl(xmm0, src);
- __ movdbl(dst, xmm0);
+ __ movsd(xmm0, src);
+ __ movsd(dst, xmm0);
}
} else {
// load from the stack slot on top of the floating point stack, and then
@@ -486,9 +486,9 @@ void LGapResolver::EmitSwap(int index) {
: destination);
Operand other =
cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
- __ movdbl(xmm0, other);
- __ movdbl(other, reg);
- __ movdbl(reg, Operand(xmm0));
+ __ movsd(xmm0, other);
+ __ movsd(other, reg);
+ __ movsd(reg, Operand(xmm0));
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), SSE2);
// Double-width memory-to-memory. Spill on demand to use a general
@@ -499,12 +499,12 @@ void LGapResolver::EmitSwap(int index) {
Operand src1 = cgen_->HighOperand(source);
Operand dst0 = cgen_->ToOperand(destination);
Operand dst1 = cgen_->HighOperand(destination);
- __ movdbl(xmm0, dst0); // Save destination in xmm0.
+ __ movsd(xmm0, dst0); // Save destination in xmm0.
__ mov(tmp, src0); // Then use tmp to copy source to destination.
__ mov(dst0, tmp);
__ mov(tmp, src1);
__ mov(dst1, tmp);
- __ movdbl(src0, xmm0);
+ __ movsd(src0, xmm0);
} else {
// No other combinations are possible.
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index ca1e60d644..fdddef3f47 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -386,9 +386,9 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
- if (is_double) {
+ if (kind == DOUBLE_REGISTERS) {
spill_slot_count_++;
spill_slot_count_ |= 1;
num_double_slots_++;
@@ -397,11 +397,12 @@ int LPlatformChunk::GetNextSpillIndex(bool is_double) {
}
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+ int index = GetNextSpillIndex(kind);
+ if (kind == DOUBLE_REGISTERS) {
return LDoubleStackSlot::Create(index, zone());
} else {
+ ASSERT(kind == GENERAL_REGISTERS);
return LStackSlot::Create(index, zone());
}
}
@@ -479,7 +480,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// Reserve the first spill slot for the state of dynamic alignment.
if (info()->IsOptimizing()) {
- int alignment_state_index = chunk_->GetNextSpillIndex(false);
+ int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
ASSERT_EQ(alignment_state_index, 0);
USE(alignment_state_index);
}
@@ -488,7 +489,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// which will be subsumed into this frame.
if (graph()->has_osr()) {
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
- chunk_->GetNextSpillIndex(false);
+ chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
}
}
@@ -560,29 +561,34 @@ LOperand* LChunkBuilder::UseAtStart(HValue* value) {
}
+static inline bool CanBeImmediateConstant(HValue* value) {
+ return value->IsConstant() && HConstant::cast(value)->NotInNewSpace();
+}
+
+
LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: Use(value);
}
LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseAtStart(value);
}
LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseRegister(value);
}
LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
+ return CanBeImmediateConstant(value)
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseRegisterAtStart(value);
}
@@ -707,7 +713,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
+ instr->set_pointer_map(new(zone()) LPointerMap(zone()));
return instr;
}
@@ -762,52 +768,44 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsSmiOrTagged());
- ASSERT(instr->right()->representation().IsSmiOrTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new(zone()) LArithmeticT(op, context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
- }
-
- ASSERT(instr->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
+ } else {
+ right = UseFixed(right_value, ecx);
}
- } else {
- right = UseFixed(right_value, ecx);
- }
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ // Shift operations can only deoptimize if we do a logical shift by 0 and
+ // the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
}
- }
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
@@ -816,21 +814,22 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return MarkAsCall(DefineSameAsFirst(result), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
@@ -914,10 +913,31 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
+
+ LInstruction* instr = NULL;
+ if (current->CanReplaceWithDummyUses()) {
+ HValue* first_operand = current->OperandCount() == 0
+ ? graph()->GetConstant1()
+ : current->OperandAt(0);
+ instr = DefineAsRegister(new(zone()) LDummyUse(UseAny(first_operand)));
+ for (int i = 1; i < current->OperandCount(); ++i) {
+ LInstruction* dummy =
+ new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+ dummy->set_hydrogen_value(current);
+ chunk_->AddInstruction(dummy, current_block_);
+ }
+ } else {
+ instr = current->CompileToLithium(this);
+ }
+
+ argument_count_ += current->argument_delta();
+ ASSERT(argument_count_ >= 0);
if (instr != NULL) {
+ // Associate the hydrogen instruction first, since we may need it for
+ // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+ instr->set_hydrogen_value(current);
+
#if DEBUG
// Make sure that the lithium instruction has either no fixed register
// constraints in temps or the result OR no uses that are only used at
@@ -947,7 +967,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
- instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
@@ -964,7 +983,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
clobber->set_hydrogen_value(current);
chunk_->AddInstruction(clobber, current_block_);
}
- instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
@@ -1061,21 +1079,15 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- ASSERT(value->IsConstant());
- ASSERT(!value->representation().IsDouble());
- HBasicBlock* successor = HConstant::cast(value)->BooleanValue()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor);
- }
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
ToBooleanStub::Types expected = instr->expected_input_types();
// Tagged values that are not known smis or booleans require a
// deoptimization environment. If the instruction is generic no
// environment is needed since all cases are handled.
+ HValue* value = instr->value();
Representation rep = value->representation();
HType type = value->type();
if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
@@ -1141,12 +1153,6 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegister(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
@@ -1171,7 +1177,6 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
LOperand* argument = UseAny(instr->argument());
return new(zone()) LPushArgument(argument);
}
@@ -1238,7 +1243,6 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, eax), instr);
}
@@ -1246,7 +1250,6 @@ LInstruction* LChunkBuilder::DoCallConstantFunction(
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
- argument_count_ -= instr->argument_count();
LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@@ -1356,7 +1359,6 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
LOperand* context = UseFixed(instr->context(), esi);
LOperand* key = UseFixed(instr->key(), ecx);
- argument_count_ -= instr->argument_count();
LCallKeyed* result = new(zone()) LCallKeyed(context, key);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1364,7 +1366,6 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
LCallNamed* result = new(zone()) LCallNamed(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1372,14 +1373,12 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
LCallGlobal* result = new(zone()) LCallGlobal(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, eax), instr);
}
@@ -1387,7 +1386,6 @@ LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
- argument_count_ -= instr->argument_count();
LCallNew* result = new(zone()) LCallNew(context, constructor);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1396,7 +1394,6 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
- argument_count_ -= instr->argument_count();
LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1405,14 +1402,12 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
- argument_count_ -= instr->argument_count();
LCallFunction* result = new(zone()) LCallFunction(context, function);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
}
@@ -1442,29 +1437,19 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
- ASSERT(instr->left()->representation().IsSmiOrTagged());
- ASSERT(instr->right()->representation().IsSmiOrTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result =
- new(zone()) LArithmeticT(instr->op(), context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
@@ -1481,8 +1466,9 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LOperand* divisor = UseRegister(instr->right());
LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, eax));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::DIV, instr);
}
}
@@ -1584,17 +1570,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsSmiOrTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC. We need
- // to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
- UseFixedDouble(left, xmm2),
- UseFixedDouble(right, xmm1));
- return MarkAsCall(DefineFixedDouble(mod, xmm1), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
@@ -1618,7 +1597,6 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::MUL, instr);
}
}
@@ -1639,7 +1617,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::SUB, instr);
}
}
@@ -1671,7 +1648,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1752,9 +1728,12 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
ASSERT(instr->right()->representation().IsDouble());
LOperand* left;
LOperand* right;
- if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
- left = UseRegisterOrConstantAtStart(instr->left());
- right = UseRegisterOrConstantAtStart(instr->right());
+ if (CanBeImmediateConstant(instr->left()) &&
+ CanBeImmediateConstant(instr->right())) {
+ // The code generator requires either both inputs to be constant
+ // operands, or neither.
+ left = UseConstant(instr->left());
+ right = UseConstant(instr->right());
} else {
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
@@ -1766,6 +1745,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
+ LInstruction* goto_instr = CheckElideControlInstruction(instr);
+ if (goto_instr != NULL) return goto_instr;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return new(zone()) LCmpObjectEqAndBranch(left, right);
@@ -1774,8 +1755,8 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return new(zone()) LCmpHoleAndBranch(object);
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
}
@@ -1909,6 +1890,13 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
}
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+ // The control instruction marking the end of a block that completed
+ // abruptly (e.g., threw an exception). There is nothing specific to do.
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* value = UseFixed(instr->value(), eax);
@@ -1944,7 +1932,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
// building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
// Temp register only necessary for minus zero check.
LOperand* temp = TempRegister();
@@ -2015,8 +2002,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ LInstruction* result = val->CheckFlag(HInstruction::kUint32)
+ ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
+ : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
if (val->HasRange() && val->range()->IsInSmiRange()) {
return result;
}
@@ -2050,12 +2038,6 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
-LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
- return new(zone())
- LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
@@ -2234,6 +2216,11 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2435,7 +2422,11 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
!(FLAG_track_double_fields && instr->field_representation().IsDouble());
LOperand* val;
- if (needs_write_barrier) {
+ if (instr->field_representation().IsByte()) {
+ // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx).
+ // Just force the value to be in eax and we're safe here.
+ val = UseFixed(instr->value(), eax);
+ } else if (needs_write_barrier) {
val = UseTempRegister(instr->value());
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
@@ -2582,7 +2573,6 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
LCallStub* result = new(zone()) LCallStub(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2711,7 +2701,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
+ ASSERT(instr->argument_delta() == -argument_count);
}
HEnvironment* outer = current_block_->last_environment()->
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 3a609c991a..752fdd4f6a 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -107,7 +107,6 @@ class LCodeGen;
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
@@ -116,7 +115,6 @@ class LCodeGen;
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
- V(IsNumberAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
@@ -130,6 +128,7 @@ class LCodeGen;
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
+ V(LoadRoot) \
V(MapEnumLength) \
V(MathAbs) \
V(MathCos) \
@@ -184,6 +183,7 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
+ V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(ValueOf) \
V(WrapReceiver)
@@ -215,7 +215,6 @@ class LInstruction : public ZoneObject {
: environment_(NULL),
hydrogen_value_(NULL),
bit_field_(IsCallBits::encode(false)) {
- set_position(RelocInfo::kNoPosition);
}
virtual ~LInstruction() {}
@@ -256,15 +255,6 @@ class LInstruction : public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- // The 31 bits PositionBits is used to store the int position value. And the
- // position value may be RelocInfo::kNoPosition (-1). The accessor always
- // +1/-1 so that the encoded value of position in bit_field_ is always >= 0
- // and can fit into the 31 bits PositionBits.
- void set_position(int pos) {
- bit_field_ = PositionBits::update(bit_field_, pos + 1);
- }
- int position() { return PositionBits::decode(bit_field_) - 1; }
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
@@ -310,7 +300,6 @@ class LInstruction : public ZoneObject {
virtual LOperand* TempAt(int i) = 0;
class IsCallBits: public BitField<bool, 0, 1> {};
- class PositionBits: public BitField<int, 1, 31> {};
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
@@ -922,19 +911,6 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
};
-class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
- public:
- explicit LIsNumberAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
-};
-
-
class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1144,19 +1120,6 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@@ -1309,7 +1272,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- Handle<Map> map() const { return hydrogen()->map(); }
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
@@ -1605,6 +1568,15 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
class LLoadExternalArrayPointer V8_FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
@@ -1634,11 +1606,6 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
return hydrogen()->is_external();
}
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
- return !CpuFeatures::IsSupported(SSE2) &&
- !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
- }
-
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@@ -2061,8 +2028,13 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ return save_doubles() == kDontSaveFPRegs;
+ }
+
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
+ SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
};
@@ -2105,6 +2077,19 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
+class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LUint32ToSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
@@ -2189,7 +2174,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -2364,8 +2349,10 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
ElementsKind from_kind() { return hydrogen()->from_kind(); }
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
@@ -2515,12 +2502,13 @@ class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
- LClampTToUint8(LOperand* value, LOperand* temp) {
+ LClampTToUint8(LOperand* value, LOperand* temp_xmm) {
inputs_[0] = value;
- temps_[0] = temp;
+ temps_[0] = temp_xmm;
}
LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp_xmm() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
@@ -2742,8 +2730,8 @@ class LPlatformChunk V8_FINAL : public LChunk {
: LChunk(info, graph),
num_double_slots_(0) { }
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
+ int GetNextSpillIndex(RegisterKind kind);
+ LOperand* GetNextSpillSlot(RegisterKind kind);
int num_double_slots() const { return num_double_slots_; }
@@ -2765,13 +2753,14 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
next_block_(NULL),
argument_count_(0),
allocator_(allocator),
- position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
+ LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
+
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@@ -2907,7 +2896,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
@@ -2921,7 +2910,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED {
HBasicBlock* next_block_;
int argument_count_;
LAllocator* allocator_;
- int position_;
LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_;
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index b65d328435..025bd891c2 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -33,6 +33,7 @@
#include "codegen.h"
#include "cpu-profiler.h"
#include "debug.h"
+#include "isolate-inl.h"
#include "runtime.h"
#include "serialize.h"
@@ -232,7 +233,7 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
j(not_equal, &done, Label::kNear);
sub(esp, Immediate(kDoubleSize));
- movdbl(MemOperand(esp, 0), input_reg);
+ movsd(MemOperand(esp, 0), input_reg);
SlowTruncateToI(result_reg, esp, 0);
add(esp, Immediate(kDoubleSize));
bind(&done);
@@ -253,8 +254,8 @@ void MacroAssembler::X87TOSToI(Register result_reg,
Label::Distance dst) {
Label done;
sub(esp, Immediate(kPointerSize));
- fist_s(MemOperand(esp, 0));
fld(0);
+ fist_s(MemOperand(esp, 0));
fild_s(MemOperand(esp, 0));
pop(result_reg);
FCmp();
@@ -283,7 +284,7 @@ void MacroAssembler::DoubleToI(Register result_reg,
Label::Distance dst) {
ASSERT(!input_reg.is(scratch));
cvttsd2si(result_reg, Operand(input_reg));
- cvtsi2sd(scratch, Operand(result_reg));
+ Cvtsi2sd(scratch, Operand(result_reg));
ucomisd(scratch, input_reg);
j(not_equal, conversion_failed, dst);
j(parity_even, conversion_failed, dst); // NaN.
@@ -344,7 +345,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
}
} else if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(this, SSE2);
- movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
cmp(result_reg, 0x80000000u);
j(not_equal, &done, Label::kNear);
@@ -361,7 +362,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
if (input_reg.is(result_reg)) {
// Input is clobbered. Restore number from double scratch.
sub(esp, Immediate(kDoubleSize));
- movdbl(MemOperand(esp, 0), xmm0);
+ movsd(MemOperand(esp, 0), xmm0);
SlowTruncateToI(result_reg, esp, 0);
add(esp, Immediate(kDoubleSize));
} else {
@@ -390,9 +391,9 @@ void MacroAssembler::TaggedToI(Register result_reg,
ASSERT(!temp.is(no_xmm_reg));
CpuFeatureScope scope(this, SSE2);
- movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
- cvtsi2sd(temp, Operand(result_reg));
+ Cvtsi2sd(temp, Operand(result_reg));
ucomisd(xmm0, temp);
RecordComment("Deferred TaggedToI: lost precision");
j(not_equal, lost_precision, Label::kNear);
@@ -445,25 +446,36 @@ void MacroAssembler::TaggedToI(Register result_reg,
}
-
-static double kUint32Bias =
- static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
-
-
void MacroAssembler::LoadUint32(XMMRegister dst,
Register src,
XMMRegister scratch) {
Label done;
cmp(src, Immediate(0));
- movdbl(scratch,
- Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE32));
- cvtsi2sd(dst, src);
+ ExternalReference uint32_bias =
+ ExternalReference::address_of_uint32_bias();
+ movsd(scratch, Operand::StaticVariable(uint32_bias));
+ Cvtsi2sd(dst, src);
j(not_sign, &done, Label::kNear);
addsd(dst, scratch);
bind(&done);
}
+void MacroAssembler::LoadUint32NoSSE2(Register src) {
+ Label done;
+ push(src);
+ fild_s(Operand(esp, 0));
+ cmp(src, Immediate(0));
+ j(not_sign, &done, Label::kNear);
+ ExternalReference uint32_bias =
+ ExternalReference::address_of_uint32_bias();
+ fld_d(Operand::StaticVariable(uint32_bias));
+ faddp(1);
+ bind(&done);
+ add(esp, Immediate(kPointerSize));
+}
+
+
void MacroAssembler::RecordWriteArray(Register object,
Register value,
Register index,
@@ -676,6 +688,12 @@ void MacroAssembler::DebugBreak() {
#endif
+void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
+ xorps(dst, dst);
+ cvtsi2sd(dst, src);
+}
+
+
void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) {
xor_(dst, dst); // Shorter than mov.
@@ -799,9 +817,9 @@ void MacroAssembler::StoreNumberToDoubleElements(
ExternalReference::address_of_canonical_non_hole_nan();
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope use_sse2(this, SSE2);
- movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movdbl(FieldOperand(elements, key, times_4,
+ movsd(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
@@ -821,7 +839,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
bind(&is_nan);
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope use_sse2(this, SSE2);
- movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
+ movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
} else {
fld_d(Operand::StaticVariable(canonical_nan_reference));
}
@@ -834,8 +852,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
SmiUntag(scratch1);
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope fscope(this, SSE2);
- cvtsi2sd(scratch2, scratch1);
- movdbl(FieldOperand(elements, key, times_4,
+ Cvtsi2sd(scratch2, scratch1);
+ movsd(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
@@ -996,6 +1014,30 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
+void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
+ if (frame_mode == BUILD_STUB_FRAME) {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (isolate()->IsCodePreAgingActive()) {
+ // Pre-age the code.
+ call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength);
+ } else {
+ push(ebp); // Caller's frame pointer.
+ mov(ebp, esp);
+ push(esi); // Callee's context.
+ push(edi); // Callee's JS function.
+ }
+ }
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, esp);
@@ -1051,7 +1093,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
+ movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else {
sub(esp, Immediate(argc * kPointerSize));
@@ -1095,7 +1137,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
+ movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
}
}
@@ -1109,14 +1151,16 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Push the return address to get ready to return.
push(ecx);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(true);
}
-void MacroAssembler::LeaveExitFrameEpilogue() {
+void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Isolate::kContextAddress, isolate());
- mov(esi, Operand::StaticVariable(context_address));
+ if (restore_context) {
+ mov(esi, Operand::StaticVariable(context_address));
+ }
#ifdef DEBUG
mov(Operand::StaticVariable(context_address), Immediate(0));
#endif
@@ -1128,11 +1172,11 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
}
-void MacroAssembler::LeaveApiExitFrame() {
+void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
mov(esp, ebp);
pop(ebp);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(restore_context);
}
@@ -2141,23 +2185,9 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
}
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(eax, Immediate(function->nargs));
- mov(ebx, Immediate(ExternalReference(function, isolate())));
- CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? kSaveFPRegs
- : kDontSaveFPRegs);
- CallStub(&ces);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
+ int num_arguments,
+ SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
@@ -2172,7 +2202,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter.
Set(eax, Immediate(num_arguments));
mov(ebx, Immediate(ExternalReference(f, isolate())));
- CEntryStub ces(1);
+ CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? save_doubles
+ : kDontSaveFPRegs);
CallStub(&ces);
}
@@ -2221,11 +2252,13 @@ void MacroAssembler::PrepareCallApiFunction(int argc) {
}
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- Address thunk_address,
- Operand thunk_last_arg,
- int stack_space,
- int return_value_offset) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ Address function_address,
+ Address thunk_address,
+ Operand thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
ExternalReference limit_address =
@@ -2281,9 +2314,10 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
Label prologue;
// Load the value from ReturnValue
- mov(eax, Operand(ebp, return_value_offset * kPointerSize));
+ mov(eax, return_value_operand);
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
@@ -2303,6 +2337,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
cmp(Operand::StaticVariable(scheduled_exception_address),
Immediate(isolate()->factory()->the_hole_value()));
j(not_equal, &promote_scheduled_exception);
+ bind(&exception_handled);
#if ENABLE_EXTRA_CHECKS
// Check if the function returned a valid JavaScript value.
@@ -2339,11 +2374,19 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
bind(&ok);
#endif
- LeaveApiExitFrame();
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ mov(esi, *context_restore_operand);
+ }
+ LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallRuntime(Runtime::kPromoteScheduledException, 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
ExternalReference delete_extensions =
@@ -3003,6 +3046,88 @@ void MacroAssembler::LoadPowerOf2(XMMRegister dst,
}
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
+ sub(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label smi_hash_calculated;
+ Label load_result_from_cache;
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpIfNotSmi(object, &not_smi, Label::kNear);
+ mov(scratch, object);
+ SmiUntag(scratch);
+ jmp(&smi_hash_calculated, Label::kNear);
+ bind(&not_smi);
+ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ j(not_equal, not_found);
+ STATIC_ASSERT(8 == kDoubleSize);
+ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ // Object is heap number and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ Register index = scratch;
+ Register probe = mask;
+ mov(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope fscope(this, SSE2);
+ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
+ } else {
+ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
+ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
+ FCmp();
+ }
+ j(parity_even, not_found); // Bail out if NaN is involved.
+ j(not_equal, not_found); // The cache did not contain this value.
+ jmp(&load_result_from_cache, Label::kNear);
+
+ bind(&smi_hash_calculated);
+ // Object is smi and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ // Check if the entry is the smi we are looking for.
+ cmp(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ mov(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+}
+
+
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type,
Register scratch,
@@ -3408,9 +3533,8 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
- Register scratch_reg) {
- Label no_memento_available;
-
+ Register scratch_reg,
+ Label* no_memento_found) {
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
@@ -3419,12 +3543,11 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
lea(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
cmp(scratch_reg, Immediate(new_space_start));
- j(less, &no_memento_available);
+ j(less, no_memento_found);
cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
- j(greater, &no_memento_available);
+ j(greater, no_memento_found);
cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
- Immediate(Handle<Map>(isolate()->heap()->allocation_memento_map())));
- bind(&no_memento_available);
+ Immediate(isolate()->factory()->allocation_memento_map()));
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index e4e4533bf5..30f8a8dfbb 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -225,6 +225,9 @@ class MacroAssembler: public Assembler {
void DebugBreak();
#endif
+ // Generates function and stub prologue code.
+ void Prologue(PrologueFrameMode frame_mode);
+
// Enter specific kind of exit frame. Expects the number of
// arguments in register eax and sets up the number of arguments in
// register edi and the pointer to the first argument in register
@@ -240,7 +243,7 @@ class MacroAssembler: public Assembler {
// Leave the current exit frame. Expects the return value in
// register eax (untouched).
- void LeaveApiExitFrame();
+ void LeaveApiExitFrame(bool restore_context);
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
@@ -366,6 +369,12 @@ class MacroAssembler: public Assembler {
void Set(Register dst, const Immediate& x);
void Set(const Operand& dst, const Immediate& x);
+ // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
+ // hinders register renaming and makes dependence chains longer. So we use
+ // xorps to clear the dst register before cvtsi2sd to solve this issue.
+ void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
+ void Cvtsi2sd(XMMRegister dst, const Operand& src);
+
// Support for constant splitting.
bool IsUnsafeImmediate(const Immediate& x);
void SafeSet(Register dst, const Immediate& x);
@@ -509,6 +518,7 @@ class MacroAssembler: public Assembler {
}
void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
+ void LoadUint32NoSSE2(Register src);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value,
@@ -754,11 +764,18 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+ void CallRuntime(const Runtime::Function* f,
+ int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, kSaveFPRegs);
+ }
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments);
+ void CallRuntime(Runtime::FunctionId id, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(id), num_arguments);
+ }
// Convenience function: call an external reference.
void CallExternalReference(ExternalReference ref, int num_arguments);
@@ -807,7 +824,8 @@ class MacroAssembler: public Assembler {
Address thunk_address,
Operand thunk_last_arg,
int stack_space,
- int return_value_offset_from_ebp);
+ Operand return_value_operand,
+ Operand* context_restore_operand);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext);
@@ -890,6 +908,17 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String utilities.
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found);
+
// Check whether the instance type represents a flat ASCII string. Jump to the
// label if not. If the instance type can be scratched specify same register
// for both instance type and scratch.
@@ -931,9 +960,20 @@ class MacroAssembler: public Assembler {
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
- // If allocation info is present, conditional code is set to equal
+ // If allocation info is present, conditional code is set to equal.
void TestJSArrayForAllocationMemento(Register receiver_reg,
- Register scratch_reg);
+ Register scratch_reg,
+ Label* no_memento_found);
+
+ void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+ Register scratch_reg,
+ Label* memento_found) {
+ Label no_memento_found;
+ TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+ &no_memento_found);
+ j(equal, memento_found);
+ bind(&no_memento_found);
+ }
private:
bool generating_stub_;
@@ -957,7 +997,7 @@ class MacroAssembler: public Assembler {
void EnterExitFramePrologue();
void EnterExitFrameEpilogue(int argc, bool save_doubles);
- void LeaveExitFrameEpilogue();
+ void LeaveExitFrameEpilogue(bool restore_context);
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 354c2fdcb0..9786cffe86 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -329,32 +329,28 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
// Load length from the string and convert to a smi.
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, JS_VALUE_TYPE);
- __ j(not_equal, miss);
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmp(scratch1, JS_VALUE_TYPE);
+ __ j(not_equal, miss);
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
- }
+ // Check if the wrapped value is a string and load the length
+ // directly if it is.
+ __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+ __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
+ __ ret(0);
}
@@ -462,50 +458,50 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// Generates call to API function.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc) {
+ int argc,
+ bool restore_context) {
// ----------- S t a t e -------------
// -- esp[0] : return address
- // -- esp[4] : object passing the type check
- // (last fast api call extra argument,
- // set by CheckPrototypes)
- // -- esp[8] : api function
- // (first fast api call extra argument)
- // -- esp[12] : api call data
- // -- esp[16] : isolate
- // -- esp[20] : ReturnValue default value
- // -- esp[24] : ReturnValue
- // -- esp[28] : last argument
+ // -- esp[4] - esp[28] : FunctionCallbackInfo, incl.
+ // : object passing the type check
+ // (set by CheckPrototypes)
+ // -- esp[32] : last argument
// -- ...
- // -- esp[(argc + 6) * 4] : first argument
- // -- esp[(argc + 7) * 4] : receiver
+ // -- esp[(argc + 7) * 4] : first argument
+ // -- esp[(argc + 8) * 4] : receiver
// -----------------------------------
+
+ typedef FunctionCallbackArguments FCA;
+ // Save calling context.
+ __ mov(Operand(esp, (1 + FCA::kContextSaveIndex) * kPointerSize), esi);
+
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ LoadHeapObject(edi, function);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // Pass the additional arguments.
- __ mov(Operand(esp, 2 * kPointerSize), edi);
+ // Construct the FunctionCallbackInfo.
+ __ mov(Operand(esp, (1 + FCA::kCalleeIndex) * kPointerSize), edi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ mov(ecx, api_call_info);
__ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
- __ mov(Operand(esp, 3 * kPointerSize), ebx);
+ __ mov(Operand(esp, (1 + FCA::kDataIndex) * kPointerSize), ebx);
} else {
- __ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
+ __ mov(Operand(esp, (1 + FCA::kDataIndex) * kPointerSize),
+ Immediate(call_data));
}
- __ mov(Operand(esp, 4 * kPointerSize),
+ __ mov(Operand(esp, (1 + FCA::kIsolateIndex) * kPointerSize),
Immediate(reinterpret_cast<int>(masm->isolate())));
- __ mov(Operand(esp, 5 * kPointerSize),
+ __ mov(Operand(esp, (1 + FCA::kReturnValueOffset) * kPointerSize),
masm->isolate()->factory()->undefined_value());
- __ mov(Operand(esp, 6 * kPointerSize),
+ __ mov(Operand(esp, (1 + FCA::kReturnValueDefaultValueIndex) * kPointerSize),
masm->isolate()->factory()->undefined_value());
// Prepare arguments.
- STATIC_ASSERT(kFastApiCallArguments == 6);
- __ lea(eax, Operand(esp, kFastApiCallArguments * kPointerSize));
-
+ STATIC_ASSERT(kFastApiCallArguments == 7);
+ __ lea(eax, Operand(esp, 1 * kPointerSize));
// API function gets reference to the v8::Arguments. If CPU profiler
// is enabled wrapper function will be called and we need to pass
@@ -521,14 +517,14 @@ static void GenerateFastApiCall(MacroAssembler* masm,
Address function_address = v8::ToCData<Address>(api_call_info->callback());
__ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
- // v8::Arguments::implicit_args_.
+ // FunctionCallbackInfo::implicit_args_.
__ mov(ApiParameterOperand(2), eax);
- __ add(eax, Immediate(argc * kPointerSize));
- // v8::Arguments::values_.
+ __ add(eax, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize));
+ // FunctionCallbackInfo::values_.
__ mov(ApiParameterOperand(3), eax);
- // v8::Arguments::length_.
+ // FunctionCallbackInfo::length_.
__ Set(ApiParameterOperand(4), Immediate(argc));
- // v8::Arguments::is_construct_call_.
+ // FunctionCallbackInfo::is_construct_call_.
__ Set(ApiParameterOperand(5), Immediate(0));
// v8::InvocationCallback's argument.
@@ -537,11 +533,17 @@ static void GenerateFastApiCall(MacroAssembler* masm,
Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ Operand context_restore_operand(ebp,
+ (2 + FCA::kContextSaveIndex) * kPointerSize);
+ Operand return_value_operand(ebp,
+ (2 + FCA::kReturnValueOffset) * kPointerSize);
__ CallApiFunctionAndReturn(function_address,
thunk_address,
ApiParameterOperand(1),
argc + kFastApiCallArguments + 1,
- kFastApiCallArguments + 1);
+ return_value_operand,
+ restore_context ?
+ &context_restore_operand : NULL);
}
@@ -556,6 +558,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
ASSERT(!receiver.is(scratch));
const int stack_space = kFastApiCallArguments + argc + 1;
+ const int kHolderIndex = FunctionCallbackArguments::kHolderIndex + 1;
// Copy return value.
__ mov(scratch, Operand(esp, 0));
// Assign stack space for the call arguments.
@@ -563,7 +566,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// Move the return address on top of the stack.
__ mov(Operand(esp, 0), scratch);
// Write holder to stack frame.
- __ mov(Operand(esp, 1 * kPointerSize), receiver);
+ __ mov(Operand(esp, kHolderIndex * kPointerSize), receiver);
// Write receiver to stack frame.
int index = stack_space;
__ mov(Operand(esp, index-- * kPointerSize), receiver);
@@ -574,7 +577,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ mov(Operand(esp, index-- * kPointerSize), values[i]);
}
- GenerateFastApiCall(masm, optimization, argc);
+ GenerateFastApiCall(masm, optimization, argc, true);
}
@@ -688,7 +691,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
+ GenerateFastApiCall(masm, optimization, arguments_.immediate(), false);
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
@@ -776,9 +779,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
};
-void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
- Label* label,
- Handle<Name> name) {
+void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
+ Label* label,
+ Handle<Name> name) {
if (!label->is_unused()) {
__ bind(label);
__ mov(this->name(), Immediate(name));
@@ -809,7 +812,7 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
}
-void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
+void StoreStubCompiler::GenerateNegativeHolderLookup(
MacroAssembler* masm,
Handle<JSObject> holder,
Register holder_reg,
@@ -827,19 +830,19 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
// store is successful.
-void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register storage_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register unused,
- Label* miss_label,
- Label* slow) {
+void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register unused,
+ Label* miss_label,
+ Label* slow) {
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
@@ -862,7 +865,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ SmiUntag(value_reg);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, value_reg);
+ __ Cvtsi2sd(xmm0, value_reg);
} else {
__ push(value_reg);
__ fild_s(Operand(esp, 0));
@@ -876,7 +879,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
miss_label, DONT_DO_SMI_CHECK);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
} else {
__ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
}
@@ -884,7 +887,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ bind(&do_store);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
}
@@ -998,15 +1001,15 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
-void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
+void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -1041,7 +1044,7 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
__ SmiUntag(value_reg);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, value_reg);
+ __ Cvtsi2sd(xmm0, value_reg);
} else {
__ push(value_reg);
__ fild_s(Operand(esp, 0));
@@ -1054,14 +1057,14 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
miss_label, DONT_DO_SMI_CHECK);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
} else {
__ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
}
__ bind(&do_store);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset));
}
@@ -1160,6 +1163,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ const int kHolderIndex = FunctionCallbackArguments::kHolderIndex + 1;
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
__ mov(scratch1, Handle<Map>(object->map()));
@@ -1176,7 +1180,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int depth = 0;
if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
+ __ mov(Operand(esp, kHolderIndex * kPointerSize), reg);
}
// Traverse the prototype chain and check the maps in the prototype chain for
@@ -1237,7 +1241,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
+ __ mov(Operand(esp, kHolderIndex * kPointerSize), reg);
}
// Go to the next object in the prototype chain.
@@ -1269,9 +1273,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
if (!miss->is_unused()) {
__ jmp(success);
__ bind(miss);
@@ -1280,9 +1284,9 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
}
-void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
- Label* success,
- Label* miss) {
+void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
if (!miss->is_unused()) {
__ jmp(success);
GenerateRestoreName(masm(), miss, name);
@@ -1291,7 +1295,7 @@ void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
}
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
+Register LoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> object,
Register object_reg,
Handle<JSObject> holder,
@@ -1351,7 +1355,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
}
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
+void LoadStubCompiler::NonexistentHandlerFrontend(
Handle<JSObject> object,
Handle<JSObject> last,
Handle<Name> name,
@@ -1371,10 +1375,10 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
}
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex field,
- Representation representation) {
+void LoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex field,
+ Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder),
@@ -1390,34 +1394,32 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization) {
GenerateFastApiCall(
masm(), call_optimization, receiver(), scratch3(), 0, NULL);
}
-void BaseLoadStubCompiler::GenerateLoadCallback(
+void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
ASSERT(!scratch3().is(reg));
__ pop(scratch3()); // Get return address to place it below.
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
__ push(receiver()); // receiver
- __ mov(scratch2(), esp);
- ASSERT(!scratch2().is(reg));
// Push data from ExecutableAccessorInfo.
if (isolate()->heap()->InNewSpace(callback->data())) {
- Register scratch = reg.is(scratch1()) ? receiver() : scratch1();
- __ mov(scratch, Immediate(callback));
- __ push(FieldOperand(scratch, ExecutableAccessorInfo::kDataOffset));
+ ASSERT(!scratch2().is(reg));
+ __ mov(scratch2(), Immediate(callback));
+ __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset));
} else {
__ push(Immediate(Handle<Object>(callback->data(), isolate())));
}
@@ -1427,9 +1429,9 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ push(Immediate(reinterpret_cast<int>(isolate())));
__ push(reg); // holder
- // Save a pointer to where we pushed the arguments pointer. This will be
- // passed as the const ExecutableAccessorInfo& to the C++ callback.
- __ push(scratch2());
+ // Save a pointer to where we pushed the arguments. This will be
+ // passed as the const PropertyAccessorInfo& to the C++ callback.
+ __ push(esp);
__ push(name()); // name
__ mov(ebx, esp); // esp points to reference to name (handler).
@@ -1460,18 +1462,19 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
thunk_address,
ApiParameterOperand(2),
kStackSpace,
- 7);
+ Operand(ebp, 7 * kPointerSize),
+ NULL);
}
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
+void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
__ LoadObject(eax, value);
__ ret(0);
}
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
+void LoadStubCompiler::GenerateLoadInterceptor(
Register holder_reg,
Handle<JSObject> object,
Handle<JSObject> interceptor_holder,
@@ -2394,7 +2397,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Check if the argument is a heap number and load its value into xmm0.
Label slow;
__ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
// Check if the argument is strictly positive. Note this also
// discards NaN.
@@ -2444,7 +2447,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Return a new heap number.
__ AllocateHeapNumber(eax, ebx, edx, &slow);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(2 * kPointerSize);
// Return the argument (when it's an already round heap number).
@@ -2623,7 +2626,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
// esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
// duplicate of return address and will be overwritten.
- GenerateFastApiCall(masm(), optimization, argc);
+ GenerateFastApiCall(masm(), optimization, argc, false);
__ bind(&miss);
__ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
@@ -3111,18 +3114,14 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
+ Register receiver,
Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
- __ push(edx);
+ __ push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,