summaryrefslogtreecommitdiff
path: root/deps/v8/src/ia32
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/ia32')
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc15
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h31
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc1
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc843
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h89
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc12
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc1
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc20
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc1
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc8
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc28
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc685
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h40
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc33
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc135
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h114
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc30
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h8
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc1
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc220
20 files changed, 939 insertions, 1376 deletions
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 7bb643a16..e0ae00665 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -65,7 +65,7 @@ int IntelDoubleRegister::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(SSE2)) {
return XMMRegister::kNumAllocatableRegisters;
} else {
- return X87TopOfStackRegister::kNumAllocatableRegisters;
+ return X87Register::kNumAllocatableRegisters;
}
}
@@ -74,7 +74,7 @@ int IntelDoubleRegister::NumRegisters() {
if (CpuFeatures::IsSupported(SSE2)) {
return XMMRegister::kNumRegisters;
} else {
- return X87TopOfStackRegister::kNumRegisters;
+ return X87Register::kNumRegisters;
}
}
@@ -83,7 +83,7 @@ const char* IntelDoubleRegister::AllocationIndexToString(int index) {
if (CpuFeatures::IsSupported(SSE2)) {
return XMMRegister::AllocationIndexToString(index);
} else {
- return X87TopOfStackRegister::AllocationIndexToString(index);
+ return X87Register::AllocationIndexToString(index);
}
}
@@ -1055,6 +1055,7 @@ void Assembler::rcr(Register dst, uint8_t imm8) {
}
}
+
void Assembler::ror(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
ASSERT(is_uint5(imm8)); // illegal shift count
@@ -1068,6 +1069,7 @@ void Assembler::ror(Register dst, uint8_t imm8) {
}
}
+
void Assembler::ror_cl(Register dst) {
EnsureSpace ensure_space(this);
EMIT(0xD3);
@@ -1782,6 +1784,12 @@ void Assembler::fisub_s(const Operand& adr) {
}
+void Assembler::fmul_i(int i) {
+ EnsureSpace ensure_space(this);
+ emit_farith(0xD8, 0xC8, i);
+}
+
+
void Assembler::fmul(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC8, i);
@@ -2137,6 +2145,7 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
EMIT(static_cast<byte>(mode) | 0x8);
}
+
void Assembler::movmskpd(Register dst, XMMRegister src) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 353f265ab..8380897f6 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -229,30 +229,40 @@ struct XMMRegister : IntelDoubleRegister {
#define xmm7 (static_cast<const XMMRegister&>(double_register_7))
-struct X87TopOfStackRegister : IntelDoubleRegister {
- static const int kNumAllocatableRegisters = 1;
- static const int kNumRegisters = 1;
+struct X87Register : IntelDoubleRegister {
+ static const int kNumAllocatableRegisters = 5;
+ static const int kNumRegisters = 5;
- bool is(X87TopOfStackRegister reg) const {
+ bool is(X87Register reg) const {
return code_ == reg.code_;
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
- "st0",
+ "stX_0", "stX_1", "stX_2", "stX_3", "stX_4"
};
return names[index];
}
- static int ToAllocationIndex(X87TopOfStackRegister reg) {
- ASSERT(reg.code() == 0);
- return 0;
+ static X87Register FromAllocationIndex(int index) {
+ STATIC_ASSERT(sizeof(X87Register) == sizeof(IntelDoubleRegister));
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ X87Register result;
+ result.code_ = index;
+ return result;
+ }
+
+ static int ToAllocationIndex(X87Register reg) {
+ return reg.code_;
}
};
-#define x87tos \
- static_cast<const X87TopOfStackRegister&>(double_register_0)
+#define stX_0 static_cast<const X87Register&>(double_register_0)
+#define stX_1 static_cast<const X87Register&>(double_register_1)
+#define stX_2 static_cast<const X87Register&>(double_register_2)
+#define stX_3 static_cast<const X87Register&>(double_register_3)
+#define stX_4 static_cast<const X87Register&>(double_register_4)
typedef IntelDoubleRegister DoubleRegister;
@@ -947,6 +957,7 @@ class Assembler : public AssemblerBase {
void fadd(int i);
void fsub(int i);
void fmul(int i);
+ void fmul_i(int i);
void fdiv(int i);
void fisub_s(const Operand& adr);
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 8aa6e4a60..b90a17f6c 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -1114,7 +1114,6 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
ebx, // Result.
ecx, // Scratch 1.
edx, // Scratch 2.
- false, // Input is known to be smi?
&not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1);
__ bind(&argument_is_string);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 29a4be214..548cbaace 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -65,6 +65,16 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
}
+void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { ebx };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -230,6 +240,39 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
}
+void UnaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(UnaryOpIC_Miss);
+}
+
+
+void StoreGlobalStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx, eax };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(StoreIC_MissFromStubFailure);
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx, ecx, edx };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -619,462 +662,155 @@ class FloatingPointHelper : public AllStatic {
};
-// Get the integer part of a heap number. Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
-// trashed registers.
-static void IntegerConvert(MacroAssembler* masm,
- Register source,
- bool use_sse3,
- Label* conversion_failure) {
- ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
- Label done, right_exponent, normal_exponent;
- Register scratch = ebx;
- Register scratch2 = edi;
- // Get exponent word.
- __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
- __ shr(scratch2, HeapNumber::kExponentShift);
- __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
- // Load ecx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(ecx, ecx);
- // If the exponent is above 83, the number contains no significant
- // bits in the range 0..2^31, so the result is zero.
- static const uint32_t kResultIsZeroExponent = 83;
- __ cmp(scratch2, Immediate(kResultIsZeroExponent));
- __ j(above, &done);
- if (use_sse3) {
- CpuFeatureScope scope(masm, SSE3);
- // Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent = 63;
- __ cmp(scratch2, Immediate(kTooBigExponent));
- __ j(greater_equal, conversion_failure);
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
- // Reserve space for 64 bit answer.
- __ sub(esp, Immediate(sizeof(uint64_t))); // Nolint.
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
- __ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
- } else {
- // Check whether the exponent matches a 32 bit signed int that cannot be
- // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
- // exponent is 30 (biased). This is the exponent that we are fastest at and
- // also the highest exponent we can handle here.
- const uint32_t non_smi_exponent = 30;
- __ cmp(scratch2, Immediate(non_smi_exponent));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- __ j(equal, &right_exponent, Label::kNear);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- __ j(less, &normal_exponent, Label::kNear);
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+ Register input_reg = this->source();
+ Register final_result_reg = this->destination();
+ ASSERT(is_truncating());
- {
- // Handle a big exponent. The only reason we have this code is that the
- // >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent = 31;
- __ cmp(scratch2, Immediate(big_non_smi_exponent));
- __ j(not_equal, conversion_failure);
- // We have the big exponent, typically from >>>. This means the number is
- // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch2, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to use the full unsigned range so we subtract 1 bit from the
- // shift distance.
- const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
- __ shl(scratch2, big_shift_distance);
- // Get the second half of the double.
- __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 21 bits to get the most significant 11 bits or the low
- // mantissa word.
- __ shr(ecx, 32 - big_shift_distance);
- __ or_(ecx, scratch2);
- // We have the answer in ecx, but we may need to negate it.
- __ test(scratch, scratch);
- __ j(positive, &done, Label::kNear);
- __ neg(ecx);
- __ jmp(&done, Label::kNear);
- }
+ Label check_negative, process_64_bits, done, done_no_stash;
- __ bind(&normal_exponent);
- // Exponent word in scratch, exponent in scratch2. Zero in ecx.
- // We know that 0 <= exponent < 30.
- __ mov(ecx, Immediate(30));
- __ sub(ecx, scratch2);
-
- __ bind(&right_exponent);
- // Here ecx is the shift, scratch is the exponent word.
- // Get the top bits of the mantissa.
- __ and_(scratch, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We have kExponentShift + 1 significant bits int he low end of the
- // word. Shift them to the top bits.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ shl(scratch, shift_distance);
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the most significant 10 bits or the low
- // mantissa word.
- __ shr(scratch2, 32 - shift_distance);
- __ or_(scratch2, scratch);
- // Move down according to the exponent.
- __ shr_cl(scratch2);
- // Now the unsigned answer is in scratch2. We need to move it to ecx and
- // we may need to fix the sign.
- Label negative;
- __ xor_(ecx, ecx);
- __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
- __ j(greater, &negative, Label::kNear);
- __ mov(ecx, scratch2);
- __ jmp(&done, Label::kNear);
- __ bind(&negative);
- __ sub(ecx, scratch2);
- }
- __ bind(&done);
-}
+ int double_offset = offset();
+ // Account for return address and saved regs if input is esp.
+ if (input_reg.is(esp)) double_offset += 3 * kPointerSize;
-// Uses SSE2 to convert the heap number in |source| to an integer. Jumps to
-// |conversion_failure| if the heap number did not contain an int32 value.
-// Result is in ecx. Trashes ebx, xmm0, and xmm1.
-static void ConvertHeapNumberToInt32(MacroAssembler* masm,
- Register source,
- Label* conversion_failure) {
- __ movdbl(xmm0, FieldOperand(source, HeapNumber::kValueOffset));
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, conversion_failure, xmm0, ecx, ebx, xmm1);
-}
-
+ MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
+ MemOperand exponent_operand(MemOperand(input_reg,
+ double_offset + kDoubleSize / 2));
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::NUMBER:
- GenerateNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
+ Register scratch1;
+ {
+ Register scratch_candidates[3] = { ebx, edx, edi };
+ for (int i = 0; i < 3; i++) {
+ scratch1 = scratch_candidates[i];
+ if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
+ }
}
-}
-
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
-
- __ push(eax); // the operand
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(mode_)));
- __ push(Immediate(Smi::FromInt(operand_type_)));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
+ // Since we must use ecx for shifts below, use some other register (eax)
+ // to calculate the result if ecx is the requested return register.
+ Register result_reg = final_result_reg.is(ecx) ? eax : final_result_reg;
+ // Save ecx if it isn't the return register and therefore volatile, or if it
+ // is the return register, then save the temp register we use in its stead for
+ // the result.
+ Register save_reg = final_result_reg.is(ecx) ? eax : ecx;
+ __ push(scratch1);
+ __ push(save_reg);
+
+ bool stash_exponent_copy = !input_reg.is(esp);
+ __ mov(scratch1, mantissa_operand);
+ if (CpuFeatures::IsSupported(SSE3)) {
+ CpuFeatureScope scope(masm, SSE3);
+ // Load x87 register with heap number.
+ __ fld_d(mantissa_operand);
}
-}
+ __ mov(ecx, exponent_operand);
+ if (stash_exponent_copy) __ push(ecx);
+ __ and_(ecx, HeapNumber::kExponentMask);
+ __ shr(ecx, HeapNumber::kExponentShift);
+ __ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias));
+ __ cmp(result_reg, Immediate(HeapNumber::kMantissaBits));
+ __ j(below, &process_64_bits);
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label non_smi, undo, slow;
- GenerateSmiCodeSub(masm, &non_smi, &undo, &slow,
- Label::kNear, Label::kNear, Label::kNear);
- __ bind(&undo);
- GenerateSmiCodeUndo(masm);
- __ bind(&non_smi);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* undo,
- Label* slow,
- Label::Distance non_smi_near,
- Label::Distance undo_near,
- Label::Distance slow_near) {
- // Check whether the value is a smi.
- __ JumpIfNotSmi(eax, non_smi, non_smi_near);
-
- // We can't handle -0 with smis, so use a type transition for that case.
- __ test(eax, eax);
- __ j(zero, slow, slow_near);
-
- // Try optimistic subtraction '0 - value', saving operand in eax for undo.
- __ mov(edx, eax);
- __ Set(eax, Immediate(0));
- __ sub(eax, edx);
- __ j(overflow, undo, undo_near);
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(
- MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near) {
- // Check whether the value is a smi.
- __ JumpIfNotSmi(eax, non_smi, non_smi_near);
-
- // Flip bits and revert inverted smi-tag.
- __ not_(eax);
- __ and_(eax, ~kSmiTagMask);
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
- __ mov(eax, edx);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
+ // Result is entirely in lower 32-bits of mantissa
+ int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
+ if (CpuFeatures::IsSupported(SSE3)) {
+ __ fstp(0);
}
-}
-
-
-void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
- Label non_smi, undo, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &undo, &call_builtin, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&undo);
- GenerateSmiCodeUndo(masm);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateNumberStubBitNot(
- MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, slow);
+ __ sub(ecx, Immediate(delta));
+ __ xor_(result_reg, result_reg);
+ __ cmp(ecx, Immediate(31));
+ __ j(above, &done);
+ __ shl_cl(scratch1);
+ __ jmp(&check_negative);
- if (mode_ == UNARY_OVERWRITE) {
- __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
- Immediate(HeapNumber::kSignMask)); // Flip sign.
+ __ bind(&process_64_bits);
+ if (CpuFeatures::IsSupported(SSE3)) {
+ CpuFeatureScope scope(masm, SSE3);
+ if (stash_exponent_copy) {
+ // Already a copy of the exponent on the stack, overwrite it.
+ STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+ __ sub(esp, Immediate(kDoubleSize / 2));
+ } else {
+ // Reserve space for 64 bit answer.
+ __ sub(esp, Immediate(kDoubleSize)); // Nolint.
+ }
+ // Do conversion, which cannot fail because we checked the exponent.
+ __ fisttp_d(Operand(esp, 0));
+ __ mov(result_reg, Operand(esp, 0)); // Load low word of answer as result
+ __ add(esp, Immediate(kDoubleSize));
+ __ jmp(&done_no_stash);
} else {
- __ mov(edx, eax);
- // edx: operand
-
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated, Label::kNear);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ pop(edx);
+ // Result must be extracted from shifted 32-bit mantissa
+ __ sub(ecx, Immediate(delta));
+ __ neg(ecx);
+ if (stash_exponent_copy) {
+ __ mov(result_reg, MemOperand(esp, 0));
+ } else {
+ __ mov(result_reg, exponent_operand);
}
-
- __ bind(&heapnumber_allocated);
- // eax: allocated 'empty' number
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
- }
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
- Label* slow) {
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, slow);
-
- // Convert the heap number in eax to an untagged integer in ecx.
- IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ not_(ecx);
- __ cmp(ecx, 0xc0000000);
- __ j(sign, &try_float, Label::kNear);
-
- // Tag the result as a smi and we're done.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(eax, Operand(ecx, times_2, kSmiTag));
- __ ret(0);
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (mode_ == UNARY_NO_OVERWRITE) {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ mov(ebx, eax);
- __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the original HeapNumber on the stack. The integer value can't
- // be stored since it's untagged and not in the smi range (so we can't
- // smi-tag it). We'll recalculate the value after the GC instead.
- __ push(ebx);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- // New HeapNumber is in eax.
- __ pop(edx);
+ __ and_(result_reg,
+ Immediate(static_cast<uint32_t>(Double::kSignificandMask >> 32)));
+ __ add(result_reg,
+ Immediate(static_cast<uint32_t>(Double::kHiddenBit >> 32)));
+ __ shrd(result_reg, scratch1);
+ __ shr_cl(result_reg);
+ __ test(ecx, Immediate(32));
+ if (CpuFeatures::IsSupported(CMOV)) {
+ CpuFeatureScope use_cmov(masm, CMOV);
+ __ cmov(not_equal, scratch1, result_reg);
+ } else {
+ Label skip_mov;
+ __ j(equal, &skip_mov, Label::kNear);
+ __ mov(scratch1, result_reg);
+ __ bind(&skip_mov);
}
- // IntegerConvert uses ebx and edi as scratch registers.
- // This conversion won't go slow-case.
- IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
- __ not_(ecx);
+ }
- __ bind(&heapnumber_allocated);
+ // If the double was negative, negate the integer result.
+ __ bind(&check_negative);
+ __ mov(result_reg, scratch1);
+ __ neg(result_reg);
+ if (stash_exponent_copy) {
+ __ cmp(MemOperand(esp, 0), Immediate(0));
+ } else {
+ __ cmp(exponent_operand, Immediate(0));
}
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ecx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ if (CpuFeatures::IsSupported(CMOV)) {
+ CpuFeatureScope use_cmov(masm, CMOV);
+ __ cmov(greater, result_reg, scratch1);
} else {
- __ push(ecx);
- __ fild_s(Operand(esp, 0));
- __ pop(ecx);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ Label skip_mov;
+ __ j(less_equal, &skip_mov, Label::kNear);
+ __ mov(result_reg, scratch1);
+ __ bind(&skip_mov);
}
- __ ret(0);
-}
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
+ // Restore registers
+ __ bind(&done);
+ if (stash_exponent_copy) {
+ __ add(esp, Immediate(kDoubleSize / 2));
}
+ __ bind(&done_no_stash);
+ if (!final_result_reg.is(result_reg)) {
+ ASSERT(final_result_reg.is(ecx));
+ __ mov(final_result_reg, result_reg);
+ }
+ __ pop(save_reg);
+ __ pop(scratch1);
+ __ ret(0);
}
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, undo, slow;
- GenerateSmiCodeSub(masm, &non_smi, &undo, &slow, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&undo);
- GenerateSmiCodeUndo(masm);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
- // Handle the slow case by jumping to the corresponding JavaScript builtin.
- __ pop(ecx); // pop return address.
- __ push(eax);
- __ push(ecx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
+// Uses SSE2 to convert the heap number in |source| to an integer. Jumps to
+// |conversion_failure| if the heap number did not contain an int32 value.
+// Result is in ecx. Trashes ebx, xmm0, and xmm1.
+static void ConvertHeapNumberToInt32(MacroAssembler* masm,
+ Register source,
+ Label* conversion_failure) {
+ __ movdbl(xmm0, FieldOperand(source, HeapNumber::kValueOffset));
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, conversion_failure, xmm0, ecx, ebx, xmm1);
}
@@ -1617,8 +1353,8 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_runtime, Label::kNear);
- StringAddStub string_add_stub((StringAddFlags)
- (ERECT_FRAME | NO_STRING_CHECK_IN_STUB));
+ StringAddStub string_add_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_stub);
@@ -2263,8 +1999,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &left_not_string, Label::kNear);
- StringAddStub string_add_left_stub((StringAddFlags)
- (ERECT_FRAME | NO_STRING_CHECK_LEFT_IN_STUB));
+ StringAddStub string_add_left_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
@@ -2274,8 +2010,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_runtime, Label::kNear);
- StringAddStub string_add_right_stub((StringAddFlags)
- (ERECT_FRAME | NO_STRING_CHECK_RIGHT_IN_STUB));
+ StringAddStub string_add_right_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_right_stub);
@@ -2683,7 +2419,9 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(
CpuFeatureScope use_sse2(masm, SSE2);
ConvertHeapNumberToInt32(masm, edx, conversion_failure);
} else {
- IntegerConvert(masm, edx, use_sse3, conversion_failure);
+ DoubleToIStub stub(edx, ecx, HeapNumber::kValueOffset - kHeapObjectTag,
+ true);
+ __ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
}
__ mov(edx, ecx);
@@ -2718,7 +2456,9 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(
CpuFeatureScope use_sse2(masm, SSE2);
ConvertHeapNumberToInt32(masm, eax, conversion_failure);
} else {
- IntegerConvert(masm, eax, use_sse3, conversion_failure);
+ DoubleToIStub stub(eax, ecx, HeapNumber::kValueOffset - kHeapObjectTag,
+ true);
+ __ call(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
}
__ bind(&done);
@@ -3197,7 +2937,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
__ bind(&miss);
- StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@@ -3217,7 +2958,8 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
support_wrapper_);
__ bind(&miss);
- StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@@ -3281,7 +3023,8 @@ void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
- StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
+ StubCompiler::TailCallBuiltin(
+ masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@@ -4229,7 +3972,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register result,
Register scratch1,
Register scratch2,
- bool object_is_smi,
Label* not_found) {
// Use of registers. Register result is used as a temporary.
Register number_string_cache = result;
@@ -4254,52 +3996,46 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Heap::GetNumberStringCache.
Label smi_hash_calculated;
Label load_result_from_cache;
- if (object_is_smi) {
- __ mov(scratch, object);
- __ SmiUntag(scratch);
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ __ JumpIfNotSmi(object, &not_smi, Label::kNear);
+ __ mov(scratch, object);
+ __ SmiUntag(scratch);
+ __ jmp(&smi_hash_calculated, Label::kNear);
+ __ bind(&not_smi);
+ __ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, not_found);
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ // Object is heap number and hash is now in scratch. Calculate cache index.
+ __ and_(scratch, mask);
+ Register index = scratch;
+ Register probe = mask;
+ __ mov(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ __ JumpIfSmi(probe, not_found);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope fscope(masm, SSE2);
+ __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm1);
} else {
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(object, &not_smi, Label::kNear);
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- __ jmp(&smi_hash_calculated, Label::kNear);
- __ bind(&not_smi);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, not_found);
- STATIC_ASSERT(8 == kDoubleSize);
- __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- // Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- Register index = scratch;
- Register probe = mask;
- __ mov(probe,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope fscope(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- } else {
- __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
- __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
- __ FCmp();
- }
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache, Label::kNear);
+ __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
+ __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
+ __ FCmp();
}
+ __ j(parity_even, not_found); // Bail out if NaN is involved.
+ __ j(not_equal, not_found); // The cache did not contain this value.
+ __ jmp(&load_result_from_cache, Label::kNear);
__ bind(&smi_hash_calculated);
// Object is smi and hash is now in scratch. Calculate cache index.
__ and_(scratch, mask);
- Register index = scratch;
// Check if the entry is the smi we are looking for.
__ cmp(object,
FieldOperand(number_string_cache,
@@ -4326,7 +4062,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
__ mov(ebx, Operand(esp, kPointerSize));
// Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
+ GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, &runtime);
__ ret(1 * kPointerSize);
__ bind(&runtime);
@@ -4369,9 +4105,9 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
__ JumpIfSmi(object, label);
__ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, kIsInternalizedMask | kIsNotStringMask);
- __ cmp(scratch, kInternalizedTag | kStringTag);
- __ j(not_equal, label);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ test(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ __ j(not_zero, label);
}
@@ -4697,17 +4433,15 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
__ j(equal, &done);
- // Special handling of the Array() function, which caches not only the
- // monomorphic Array function but the initial ElementsKind with special
- // sentinels
- __ JumpIfNotSmi(ecx, &miss);
- if (FLAG_debug_code) {
- Handle<Object> terminal_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
- LAST_FAST_ELEMENTS_KIND);
- __ cmp(ecx, Immediate(terminal_kind_sentinel));
- __ Assert(less_equal, "Array function sentinel is not an ElementsKind");
- }
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the cell either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ Handle<Map> allocation_site_map(
+ masm->isolate()->heap()->allocation_site_map(),
+ masm->isolate());
+ __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
+ __ j(not_equal, &miss);
// Load the global or builtins object from the current context
__ LoadGlobalContext(ecx);
@@ -4739,14 +4473,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
__ j(not_equal, &not_array_function);
- // The target function is the Array constructor, install a sentinel value in
- // the constructor's type info cell that will track the initial ElementsKind
- // that should be used for the array when its constructed.
- Handle<Object> initial_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(isolate,
- GetInitialFastElementsKind());
- __ mov(FieldOperand(ebx, Cell::kValueOffset),
- Immediate(initial_kind_sentinel));
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the cell
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ __ push(eax);
+ __ push(edi);
+ __ push(ebx);
+
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
+
+ __ pop(ebx);
+ __ pop(edi);
+ __ pop(eax);
+ }
__ jmp(&done);
__ bind(&not_array_function);
@@ -4912,6 +4654,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
// It is important that the store buffer overflow stubs are generated first.
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
}
@@ -5633,7 +5376,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
- if ((flags_ & NO_STRING_ADD_FLAGS) != 0) {
+ // Otherwise, at least one of the arguments is definitely a string,
+ // and we convert the one that is not known to be a string.
+ if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
__ JumpIfSmi(eax, &call_runtime);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
__ j(above_equal, &call_runtime);
@@ -5642,20 +5389,16 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(edx, &call_runtime);
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
__ j(above_equal, &call_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
+ } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
+ GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
+ &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
+ ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
+ GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
+ &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
}
// Both arguments are strings.
@@ -5941,7 +5684,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Drop(2);
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- if ((flags_ & ERECT_FRAME) != 0) {
+ if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
GenerateRegisterArgsPop(masm, ecx);
// Build a frame
{
@@ -5956,7 +5699,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- if ((flags_ & ERECT_FRAME) != 0) {
+ if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
GenerateRegisterArgsPop(masm, ecx);
// Build a frame
{
@@ -6009,7 +5752,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
scratch1,
scratch2,
scratch3,
- false,
&not_cached);
__ mov(arg, scratch1);
__ mov(Operand(esp, stack_offset), arg);
@@ -6859,14 +6601,10 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
__ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, Immediate(kIsNotStringMask | kIsInternalizedMask));
- __ cmpb(tmp1, kInternalizedTag | kStringTag);
- __ j(not_equal, &miss, Label::kNear);
-
- __ and_(tmp2, Immediate(kIsNotStringMask | kIsInternalizedMask));
- __ cmpb(tmp2, kInternalizedTag | kStringTag);
- __ j(not_equal, &miss, Label::kNear);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ __ or_(tmp1, tmp2);
+ __ test(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ __ j(not_zero, &miss, Label::kNear);
// Internalized strings are compared by identity.
Label done;
@@ -6905,7 +6643,6 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
// Check that both operands are unique names. This leaves the instance
// types loaded in tmp1 and tmp2.
- STATIC_ASSERT(kInternalizedTag != 0);
__ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
__ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
__ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
@@ -6981,10 +6718,10 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// also know they are both strings.
if (equality) {
Label do_compare;
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, tmp2);
- __ test(tmp1, Immediate(kIsInternalizedMask));
- __ j(zero, &do_compare, Label::kNear);
+ STATIC_ASSERT(kInternalizedTag == 0);
+ __ or_(tmp1, tmp2);
+ __ test(tmp1, Immediate(kIsNotInternalizedMask));
+ __ j(not_zero, &do_compare, Label::kNear);
// Make sure eax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(eax));
@@ -7789,18 +7526,20 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
__ j(zero, &normal_sequence);
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry
+ // Fix kind and retry (only if we have an allocation site in the cell).
__ inc(edx);
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &normal_sequence);
-
- // The type cell may have gone megamorphic, don't overwrite if so
- __ mov(ecx, FieldOperand(ebx, kPointerSize));
- __ JumpIfNotSmi(ecx, &normal_sequence);
+ __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
+ Handle<Map> allocation_site_map(
+ masm->isolate()->heap()->allocation_site_map(),
+ masm->isolate());
+ __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
+ __ j(not_equal, &normal_sequence);
// Save the resulting elements kind in type info
__ SmiTag(edx);
- __ mov(FieldOperand(ebx, kPointerSize), edx);
+ __ mov(FieldOperand(ecx, AllocationSite::kTransitionInfoOffset), edx);
__ SmiUntag(edx);
__ bind(&normal_sequence);
@@ -7829,7 +7568,7 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
- if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+ if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -7901,7 +7640,17 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
__ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
- __ JumpIfNotSmi(edx, &no_info);
+
+ // The type cell may have undefined in its value.
+ __ cmp(edx, Immediate(undefined_sentinel));
+ __ j(equal, &no_info);
+
+ // The type cell has either an AllocationSite or a JSFunction
+ __ cmp(FieldOperand(edx, 0), Immediate(Handle<Map>(
+ masm->isolate()->heap()->allocation_site_map())));
+ __ j(not_equal, &no_info);
+
+ __ mov(edx, FieldOperand(edx, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(edx);
__ jmp(&switch_ready);
__ bind(&no_info);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index 6dc63bdd4..e80acc6cc 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -86,80 +86,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
};
-class UnaryOpStub: public PlatformCodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* undo,
- Label* slow,
- Label::Distance non_smi_near = Label::kFar,
- Label::Distance undo_near = Label::kFar,
- Label::Distance slow_near = Label::kFar);
- void GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near = Label::kFar);
- void GenerateSmiCodeUndo(MacroAssembler* masm);
-
- void GenerateNumberStub(MacroAssembler* masm);
- void GenerateNumberStubSub(MacroAssembler* masm);
- void GenerateNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual Code::Kind GetCodeKind() const { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -218,20 +144,6 @@ class StringHelper : public AllStatic {
};
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 1 << 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 1,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 2,
- // Stub needs a frame before calling the runtime
- ERECT_FRAME = 1 << 3,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -319,7 +231,6 @@ class NumberToStringStub: public PlatformCodeStub {
Register result,
Register scratch1,
Register scratch2,
- bool object_is_smi,
Label* not_found);
private:
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index da32c504f..f488718dc 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -651,7 +651,7 @@ OS::MemMoveFunction CreateMemMoveFunction() {
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode,
- Label* allocation_site_info_found) {
+ Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- eax : value
// -- ebx : target map
@@ -660,9 +660,9 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -- esp[0] : return address
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
- ASSERT(allocation_site_info_found != NULL);
- __ TestJSArrayForAllocationSiteInfo(edx, edi);
- __ j(equal, allocation_site_info_found);
+ ASSERT(allocation_memento_found != NULL);
+ __ TestJSArrayForAllocationMemento(edx, edi);
+ __ j(equal, allocation_memento_found);
}
// Set transitioned map.
@@ -689,7 +689,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Label loop, entry, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(edx, edi);
+ __ TestJSArrayForAllocationMemento(edx, edi);
__ j(equal, fail);
}
@@ -828,7 +828,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label loop, entry, convert_hole, gc_required, only_change_map, success;
if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(edx, edi);
+ __ TestJSArrayForAllocationMemento(edx, edi);
__ j(equal, fail);
}
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index db1d5a612..68199f905 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -91,6 +91,7 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
}
+
// All debug break stubs support padding for LiveEdit.
const bool Debug::FramePaddingLayout::kIsSupported = true;
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 6af2445f4..505cd4fc1 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -566,15 +566,11 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the bailout id from the stack.
__ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
- // Get the address of the location in the code object if possible
+ // Get the address of the location in the code object
// and compute the fp-to-sp delta in register edx.
- if (type() == EAGER || type() == SOFT) {
- __ Set(ecx, Immediate(0));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
- } else {
- __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
- }
+ __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
+ __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
+
__ sub(edx, ebp);
__ neg(edx);
@@ -620,12 +616,8 @@ void Deoptimizer::EntryGenerator::Generate() {
// and check that the generated code never deoptimizes with unbalanced stack.
__ fnclex();
- // Remove the bailout id and the double registers from the stack.
- if (type() == EAGER || type() == SOFT) {
- __ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
- } else {
- __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
- }
+ // Remove the bailout id, return address and the double registers.
+ __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 14e580069..c43f11c00 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -575,6 +575,7 @@ int DisassemblerIA32::F7Instruction(byte* data) {
}
}
+
int DisassemblerIA32::D1D3C1Instruction(byte* data) {
byte op = *data;
ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index cf3132d33..66a7c1c08 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -107,6 +107,7 @@ class JumpPatchSite BASE_EMBEDDED {
// formal parameter count expected by the function.
//
// The live registers are:
+// o ecx: CallKind
// o edi: the JS function object being called (i.e. ourselves)
// o esi: our context
// o ebp: our caller's frame pointer
@@ -3684,7 +3685,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- StringAddStub stub(NO_STRING_ADD_FLAGS);
+ StringAddStub stub(STRING_ADD_CHECK_BOTH);
__ CallStub(&stub);
context()->Plug(eax);
}
@@ -4363,10 +4364,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
const char* comment) {
Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
+ UnaryOpStub stub(expr->op());
// UnaryOpStub expects the argument to be in the
// accumulator register eax.
VisitForAccumulatorValue(expr->expression());
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index eb6ccd90e..1e9146847 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -319,9 +319,10 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
// Is the string internalized? We already know it's a string so a single
// bit test is enough.
- STATIC_ASSERT(kInternalizedTag != 0);
- __ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsInternalizedMask);
- __ j(zero, not_unique);
+ STATIC_ASSERT(kNotInternalizedTag != 0);
+ __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
+ kIsNotInternalizedMask);
+ __ j(not_zero, not_unique);
__ bind(&unique);
}
@@ -798,8 +799,8 @@ static void KeyedStoreGenerateGenericHelper(
ebx,
edi,
slow);
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
@@ -811,7 +812,7 @@ static void KeyedStoreGenerateGenericHelper(
ebx,
edi,
slow);
- mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
@@ -827,7 +828,7 @@ static void KeyedStoreGenerateGenericHelper(
ebx,
edi,
slow);
- mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -1421,8 +1422,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -- esp[0] : return address
// -----------------------------------
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::STUB, MONOMORPHIC, strict_mode,
+ Code::NORMAL, Code::STORE_IC);
Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
no_reg);
@@ -1598,8 +1600,8 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
__ mov(eax, edx);
__ Ret();
@@ -1626,8 +1628,8 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS);
+ AllocationSiteMode mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
__ mov(eax, edx);
__ Ret();
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index defae1c16..2c234d834 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -353,7 +353,6 @@ bool LCodeGen::GenerateBody() {
instr->CompileToNative(this);
if (!CpuFeatures::IsSupported(SSE2)) {
- ASSERT(!instr->HasDoubleRegisterResult() || x87_stack_depth_ == 1);
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(x87_stack_depth_);
}
@@ -365,8 +364,7 @@ bool LCodeGen::GenerateBody() {
bool LCodeGen::GenerateJumpTable() {
- Label needs_frame_not_call;
- Label needs_frame_is_call;
+ Label needs_frame;
if (jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------");
}
@@ -382,56 +380,32 @@ bool LCodeGen::GenerateJumpTable() {
}
if (jump_table_[i].needs_frame) {
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
- if (type == Deoptimizer::LAZY) {
- if (needs_frame_is_call.is_bound()) {
- __ jmp(&needs_frame_is_call);
- } else {
- __ bind(&needs_frame_is_call);
- __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- // Push a PC inside the function so that the deopt code can find where
- // the deopt comes from. It doesn't have to be the precise return
- // address of a "calling" LAZY deopt, it only has to be somewhere
- // inside the code body.
- Label push_approx_pc;
- __ call(&push_approx_pc);
- __ bind(&push_approx_pc);
- // Push the continuation which was stashed were the ebp should
- // be. Replace it with the saved ebp.
- __ push(MemOperand(esp, 3 * kPointerSize));
- __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
- __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
- __ ret(0); // Call the continuation without clobbering registers.
- }
+ if (needs_frame.is_bound()) {
+ __ jmp(&needs_frame);
} else {
- if (needs_frame_not_call.is_bound()) {
- __ jmp(&needs_frame_not_call);
- } else {
- __ bind(&needs_frame_not_call);
- __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- // Push the continuation which was stashed were the ebp should
- // be. Replace it with the saved ebp.
- __ push(MemOperand(esp, 2 * kPointerSize));
- __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
- __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
- __ ret(0); // Call the continuation without clobbering registers.
- }
+ __ bind(&needs_frame);
+ __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ // Push a PC inside the function so that the deopt code can find where
+ // the deopt comes from. It doesn't have to be the precise return
+ // address of a "calling" LAZY deopt, it only has to be somewhere
+ // inside the code body.
+ Label push_approx_pc;
+ __ call(&push_approx_pc);
+ __ bind(&push_approx_pc);
+ // Push the continuation which was stashed were the ebp should
+ // be. Replace it with the saved ebp.
+ __ push(MemOperand(esp, 3 * kPointerSize));
+ __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
+ __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+ __ ret(0); // Call the continuation without clobbering registers.
}
} else {
- if (type == Deoptimizer::LAZY) {
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- }
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
}
}
return !is_aborted();
@@ -501,68 +475,181 @@ Register LCodeGen::ToRegister(int index) const {
}
+X87Register LCodeGen::ToX87Register(int index) const {
+ return X87Register::FromAllocationIndex(index);
+}
+
+
XMMRegister LCodeGen::ToDoubleRegister(int index) const {
return XMMRegister::FromAllocationIndex(index);
}
-bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
- return op->IsDoubleRegister();
+void LCodeGen::X87LoadForUsage(X87Register reg) {
+ ASSERT(X87StackContains(reg));
+ X87Fxch(reg);
+ x87_stack_depth_--;
+}
+
+
+void LCodeGen::X87Fxch(X87Register reg, int other_slot) {
+ ASSERT(X87StackContains(reg) && x87_stack_depth_ > other_slot);
+ int i = X87ArrayIndex(reg);
+ int st = x87_st2idx(i);
+ if (st != other_slot) {
+ int other_i = x87_st2idx(other_slot);
+ X87Register other = x87_stack_[other_i];
+ x87_stack_[other_i] = reg;
+ x87_stack_[i] = other;
+ if (st == 0) {
+ __ fxch(other_slot);
+ } else if (other_slot == 0) {
+ __ fxch(st);
+ } else {
+ __ fxch(st);
+ __ fxch(other_slot);
+ __ fxch(st);
+ }
+ }
}
-void LCodeGen::ReadX87Operand(Operand dst) {
- ASSERT(x87_stack_depth_ == 1);
- __ fst_d(dst);
+int LCodeGen::x87_st2idx(int pos) {
+ return x87_stack_depth_ - pos - 1;
}
-void LCodeGen::PushX87DoubleOperand(Operand src) {
- ASSERT(x87_stack_depth_ == 0);
- x87_stack_depth_++;
- __ fld_d(src);
+int LCodeGen::X87ArrayIndex(X87Register reg) {
+ for (int i = 0; i < x87_stack_depth_; i++) {
+ if (x87_stack_[i].is(reg)) return i;
+ }
+ UNREACHABLE();
+ return -1;
}
-void LCodeGen::PushX87FloatOperand(Operand src) {
- ASSERT(x87_stack_depth_ == 0);
- x87_stack_depth_++;
- __ fld_s(src);
+bool LCodeGen::X87StackContains(X87Register reg) {
+ for (int i = 0; i < x87_stack_depth_; i++) {
+ if (x87_stack_[i].is(reg)) return true;
+ }
+ return false;
}
-void LCodeGen::PopX87() {
- ASSERT(x87_stack_depth_ == 1);
+void LCodeGen::X87Free(X87Register reg) {
+ ASSERT(X87StackContains(reg));
+ int i = X87ArrayIndex(reg);
+ int st = x87_st2idx(i);
+ if (st > 0) {
+ // keep track of how fstp(i) changes the order of elements
+ int tos_i = x87_st2idx(0);
+ x87_stack_[i] = x87_stack_[tos_i];
+ }
x87_stack_depth_--;
- __ fstp(0);
+ __ fstp(st);
}
-void LCodeGen::CurrentInstructionReturnsX87Result() {
- ASSERT(x87_stack_depth_ <= 1);
- if (x87_stack_depth_ == 0) {
- x87_stack_depth_ = 1;
+void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
+ if (X87StackContains(dst)) {
+ X87Fxch(dst);
+ __ fstp(0);
+ } else {
+ ASSERT(x87_stack_depth_ < X87Register::kNumAllocatableRegisters);
+ x87_stack_[x87_stack_depth_] = dst;
+ x87_stack_depth_++;
}
+ X87Fld(src, opts);
+}
+
+
+void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
+ if (opts == kX87DoubleOperand) {
+ __ fld_d(src);
+ } else if (opts == kX87FloatOperand) {
+ __ fld_s(src);
+ } else if (opts == kX87IntOperand) {
+ __ fild_s(src);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void LCodeGen::X87Mov(Operand dst, X87Register src) {
+ X87Fxch(src);
+ __ fst_d(dst);
+}
+
+
+void LCodeGen::X87PrepareToWrite(X87Register reg) {
+ if (X87StackContains(reg)) {
+ X87Free(reg);
+ }
+ // Mark this register as the next register to write to
+ x87_stack_[x87_stack_depth_] = reg;
+}
+
+
+void LCodeGen::X87CommitWrite(X87Register reg) {
+ // Assert the reg is prepared to write, but not on the virtual stack yet
+ ASSERT(!X87StackContains(reg) && x87_stack_[x87_stack_depth_].is(reg) &&
+ x87_stack_depth_ < X87Register::kNumAllocatableRegisters);
+ x87_stack_depth_++;
+}
+
+
+void LCodeGen::X87PrepareBinaryOp(
+ X87Register left, X87Register right, X87Register result) {
+ // You need to use DefineSameAsFirst for x87 instructions
+ ASSERT(result.is(left));
+ X87Fxch(right, 1);
+ X87Fxch(left);
}
void LCodeGen::FlushX87StackIfNecessary(LInstruction* instr) {
- if (x87_stack_depth_ > 0) {
- if ((instr->ClobbersDoubleRegisters() ||
- instr->HasDoubleRegisterResult()) &&
- !instr->HasDoubleRegisterInput()) {
- PopX87();
+ if (x87_stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
+ bool double_inputs = instr->HasDoubleRegisterInput();
+
+ // Flush stack from tos down, since FreeX87() will mess with tos
+ for (int i = x87_stack_depth_-1; i >= 0; i--) {
+ X87Register reg = x87_stack_[i];
+ // Skip registers which contain the inputs for the next instruction
+ // when flushing the stack
+ if (double_inputs && instr->IsDoubleInput(reg, this)) {
+ continue;
+ }
+ X87Free(reg);
+ if (i < x87_stack_depth_-1) i++;
+ }
+ }
+ if (instr->IsReturn()) {
+ while (x87_stack_depth_ > 0) {
+ __ fstp(0);
+ x87_stack_depth_--;
}
}
}
+void LCodeGen::EmitFlushX87ForDeopt() {
+ for (int i = 0; i < x87_stack_depth_; i++) __ fstp(0);
+}
+
+
Register LCodeGen::ToRegister(LOperand* op) const {
ASSERT(op->IsRegister());
return ToRegister(op->index());
}
+X87Register LCodeGen::ToX87Register(LOperand* op) const {
+ ASSERT(op->IsDoubleRegister());
+ return ToX87Register(op->index());
+}
+
+
XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
ASSERT(op->IsDoubleRegister());
return ToDoubleRegister(op->index());
@@ -835,8 +922,6 @@ void LCodeGen::DeoptimizeIf(Condition cc,
Deoptimizer::BailoutType bailout_type) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
- // It's an error to deoptimize with the x87 fp stack in use.
- ASSERT(x87_stack_depth_ == 0);
int id = environment->deoptimization_index();
ASSERT(info()->IsOptimizing() || info()->IsStub());
Address entry =
@@ -847,49 +932,47 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
- Handle<SharedFunctionInfo> shared(info()->shared_info());
+ ExternalReference count = ExternalReference::stress_deopt_count(isolate());
Label no_deopt;
__ pushfd();
__ push(eax);
- __ push(ebx);
- __ mov(ebx, shared);
- __ mov(eax,
- FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset));
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ mov(eax, Operand::StaticVariable(count));
+ __ sub(eax, Immediate(1));
__ j(not_zero, &no_deopt, Label::kNear);
if (FLAG_trap_on_deopt) __ int3();
- __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
- __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
- eax);
- __ pop(ebx);
+ __ mov(eax, Immediate(FLAG_deopt_every_n_times));
+ __ mov(Operand::StaticVariable(count), eax);
__ pop(eax);
__ popfd();
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
-
+ ASSERT(frame_is_built_);
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&no_deopt);
- __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
- eax);
- __ pop(ebx);
+ __ mov(Operand::StaticVariable(count), eax);
__ pop(eax);
__ popfd();
}
+ // Before Instructions which can deopt, we normally flush the x87 stack. But
+ // we can have inputs or outputs of the current instruction on the stack,
+ // thus we need to flush them here from the physical stack to leave it in a
+ // consistent state.
+ if (x87_stack_depth_ > 0) {
+ Label done;
+ if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
+ EmitFlushX87ForDeopt();
+ __ bind(&done);
+ }
+
if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
Label done;
- if (cc != no_condition) {
- __ j(NegateCondition(cc), &done, Label::kNear);
- }
+ if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
__ int3();
__ bind(&done);
}
ASSERT(info()->IsStub() || frame_is_built_);
if (cc == no_condition && frame_is_built_) {
- if (bailout_type == Deoptimizer::LAZY) {
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- }
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
@@ -1139,11 +1222,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -1721,11 +1799,10 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
- __ push(Immediate(lower));
__ push(Immediate(upper));
- PushX87DoubleOperand(Operand(esp, 0));
+ __ push(Immediate(lower));
+ X87Mov(ToX87Register(instr->result()), Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
- CurrentInstructionReturnsX87Result();
} else {
CpuFeatureScope scope1(masm(), SSE2);
ASSERT(instr->result()->IsDoubleRegister());
@@ -1990,48 +2067,63 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister left = ToDoubleRegister(instr->left());
- XMMRegister right = ToDoubleRegister(instr->right());
- XMMRegister result = ToDoubleRegister(instr->result());
- // Modulo uses a fixed result register.
- ASSERT(instr->op() == Token::MOD || left.is(result));
- switch (instr->op()) {
- case Token::ADD:
- __ addsd(left, right);
- break;
- case Token::SUB:
- __ subsd(left, right);
- break;
- case Token::MUL:
- __ mulsd(left, right);
- break;
- case Token::DIV:
- __ divsd(left, right);
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a mulsd depending on the result
- __ movaps(left, left);
- break;
- case Token::MOD: {
- // Pass two doubles as arguments on the stack.
- __ PrepareCallCFunction(4, eax);
- __ movdbl(Operand(esp, 0 * kDoubleSize), left);
- __ movdbl(Operand(esp, 1 * kDoubleSize), right);
- __ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
- 4);
-
- // Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movdbl(result, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- break;
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister left = ToDoubleRegister(instr->left());
+ XMMRegister right = ToDoubleRegister(instr->right());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ // Modulo uses a fixed result register.
+ ASSERT(instr->op() == Token::MOD || left.is(result));
+ switch (instr->op()) {
+ case Token::ADD:
+ __ addsd(left, right);
+ break;
+ case Token::SUB:
+ __ subsd(left, right);
+ break;
+ case Token::MUL:
+ __ mulsd(left, right);
+ break;
+ case Token::DIV:
+ __ divsd(left, right);
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a mulsd depending on the result
+ __ movaps(left, left);
+ break;
+ case Token::MOD: {
+ // Pass two doubles as arguments on the stack.
+ __ PrepareCallCFunction(4, eax);
+ __ movdbl(Operand(esp, 0 * kDoubleSize), left);
+ __ movdbl(Operand(esp, 1 * kDoubleSize), right);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(Token::MOD, isolate()),
+ 4);
+
+ // Return value is in st(0) on ia32.
+ // Store it into the (fixed) result register.
+ __ sub(Operand(esp), Immediate(kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ __ movdbl(result, Operand(esp, 0));
+ __ add(Operand(esp), Immediate(kDoubleSize));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ X87Register left = ToX87Register(instr->left());
+ X87Register right = ToX87Register(instr->right());
+ X87Register result = ToX87Register(instr->result());
+ X87PrepareBinaryOp(left, right, result);
+ switch (instr->op()) {
+ case Token::MUL:
+ __ fmul_i(1);
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
- default:
- UNREACHABLE();
- break;
}
}
@@ -2058,12 +2150,12 @@ int LCodeGen::GetNextEmittedBlock() const {
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
- int right_block = instr->FalseDestination(chunk_);
int left_block = instr->TrueDestination(chunk_);
+ int right_block = instr->FalseDestination(chunk_);
int next_block = GetNextEmittedBlock();
- if (right_block == left_block) {
+ if (right_block == left_block || cc == no_condition) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
@@ -2076,6 +2168,25 @@ void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
}
+void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsSmiOrInteger32() || r.IsDouble()) {
+ EmitBranch(instr, no_condition);
+ } else {
+ ASSERT(r.IsTagged());
+ Register reg = ToRegister(instr->value());
+ HType type = instr->hydrogen()->value()->type();
+ if (type.IsTaggedNumber()) {
+ EmitBranch(instr, no_condition);
+ }
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+ __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ EmitBranch(instr, equal);
+ }
+}
+
+
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsSmiOrInteger32()) {
@@ -2256,7 +2367,7 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
Condition cc = TokenToCondition(instr->op(), instr->is_double());
@@ -2868,6 +2979,20 @@ void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
}
+void LCodeGen::DoLinkObjectInList(LLinkObjectInList* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ ExternalReference sites_list_address = instr->GetReference(isolate());
+
+ __ mov(temp, Immediate(sites_list_address));
+ __ mov(temp, Operand(temp, 0));
+ __ mov(FieldOperand(object, instr->hydrogen()->store_field().offset()),
+ temp);
+ __ mov(temp, Immediate(sites_list_address));
+ __ mov(Operand(temp, 0), object);
+}
+
+
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2934,8 +3059,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
XMMRegister result = ToDoubleRegister(instr->result());
__ movdbl(result, FieldOperand(object, offset));
} else {
- PushX87DoubleOperand(FieldOperand(object, offset));
- CurrentInstructionReturnsX87Result();
+ X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
}
return;
}
@@ -3180,16 +3304,14 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ movss(result, operand);
__ cvtss2sd(result, result);
} else {
- PushX87FloatOperand(operand);
- CurrentInstructionReturnsX87Result();
+ X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
}
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
__ movdbl(ToDoubleRegister(instr->result()), operand);
} else {
- PushX87DoubleOperand(operand);
- CurrentInstructionReturnsX87Result();
+ X87Mov(ToX87Register(instr->result()), operand);
}
} else {
Register result(ToRegister(instr->result()));
@@ -3260,8 +3382,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
XMMRegister result = ToDoubleRegister(instr->result());
__ movdbl(result, double_load_operand);
} else {
- PushX87DoubleOperand(double_load_operand);
- CurrentInstructionReturnsX87Result();
+ X87Mov(ToX87Register(instr->result()), double_load_operand);
}
}
@@ -3777,6 +3898,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
}
}
+
void LCodeGen::DoMathRound(LMathRound* instr) {
CpuFeatureScope scope(masm(), SSE2);
Register output_reg = ToRegister(instr->result());
@@ -4168,7 +4290,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ mov(ebx, instr->hydrogen()->property_cell());
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
- (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE)
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
@@ -4254,7 +4376,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
__ movdbl(FieldOperand(object, offset), value);
} else {
- __ fstp_d(FieldOperand(object, offset));
+ X87Register value = ToX87Register(instr->value());
+ X87Mov(FieldOperand(object, offset), value);
}
return;
}
@@ -4380,7 +4503,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
CpuFeatureScope scope(masm(), SSE2);
__ movdbl(operand, ToDoubleRegister(instr->value()));
} else {
- __ fst_d(operand);
+ X87Mov(operand, ToX87Register(instr->value()));
}
} else {
Register value = ToRegister(instr->value());
@@ -4462,7 +4585,8 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ mov(double_store_operand2, Immediate(upper));
} else {
Label no_special_nan_handling;
- ASSERT(x87_stack_depth_ > 0);
+ X87Register value = ToX87Register(instr->value());
+ X87Fxch(value);
if (instr->NeedsCanonicalization()) {
__ fld(0);
@@ -4561,7 +4685,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationSiteInfo(object, temp);
+ __ TestJSArrayForAllocationMemento(object, temp);
DeoptimizeIf(equal, instr->environment());
}
@@ -4744,7 +4868,7 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
EmitPushTaggedOperand(instr->left());
EmitPushTaggedOperand(instr->right());
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ StringAddStub stub(instr->hydrogen()->flags());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -4932,10 +5056,16 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
convert_hole = load->UsesMustHandleHole();
}
+ bool use_sse2 = CpuFeatures::IsSupported(SSE2);
+ if (!use_sse2) {
+ // Put the value to the top of stack
+ X87Register src = ToX87Register(instr->value());
+ X87LoadForUsage(src);
+ }
+
Label no_special_nan_handling;
Label done;
if (convert_hole) {
- bool use_sse2 = CpuFeatures::IsSupported(SSE2);
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -4989,12 +5119,12 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- if (CpuFeatures::IsSupported(SSE2)) {
+ if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
} else {
- __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
__ bind(&done);
}
@@ -5045,12 +5175,14 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
Register temp_reg,
+ X87Register res_reg,
bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
Label load_smi, done;
+ X87PrepareToWrite(res_reg);
STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
NUMBER_CANDIDATE_IS_ANY_TAGGED);
if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
@@ -5111,6 +5243,7 @@ void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
__ pop(input_reg);
__ SmiTag(input_reg); // Retag smi.
__ bind(&done);
+ X87CommitWrite(res_reg);
}
@@ -5492,11 +5625,11 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
} else {
EmitNumberUntagDNoSSE2(input_reg,
temp_reg,
+ ToX87Register(instr->result()),
instr->hydrogen()->allow_undefined_as_nan(),
deoptimize_on_minus_zero,
instr->environment(),
mode);
- CurrentInstructionReturnsX87Result();
}
}
@@ -5511,93 +5644,22 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
+ __ cvttsd2si(result_reg, Operand(input_reg));
+
if (instr->truncating()) {
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
- __ cvttsd2si(result_reg, Operand(input_reg));
+ Label fast_case_succeeded;
__ cmp(result_reg, 0x80000000u);
- if (CpuFeatures::IsSupported(SSE3)) {
- // This will deoptimize if the exponent of the input in out of range.
- CpuFeatureScope scope(masm(), SSE3);
- Label convert, done;
- __ j(not_equal, &done, Label::kNear);
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), input_reg);
- // Get exponent alone and check for too-big exponent.
- __ mov(result_reg, Operand(esp, sizeof(int32_t)));
- __ and_(result_reg, HeapNumber::kExponentMask);
- const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
- __ j(less, &convert, Label::kNear);
- __ add(Operand(esp), Immediate(kDoubleSize));
- DeoptimizeIf(no_condition, instr->environment());
- __ bind(&convert);
- // Do conversion, which cannot fail because we checked the exponent.
- __ fld_d(Operand(esp, 0));
- __ fisttp_d(Operand(esp, 0));
- __ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
- __ add(Operand(esp), Immediate(kDoubleSize));
- __ bind(&done);
- } else {
- Label done;
- Register temp_reg = ToRegister(instr->temp());
- XMMRegister xmm_scratch = xmm0;
-
- // If cvttsd2si succeeded, we're done. Otherwise, we attempt
- // manual conversion.
- __ j(not_equal, &done, Label::kNear);
-
- // Get high 32 bits of the input in result_reg and temp_reg.
- __ pshufd(xmm_scratch, input_reg, 1);
- __ movd(Operand(temp_reg), xmm_scratch);
- __ mov(result_reg, temp_reg);
-
- // Prepare negation mask in temp_reg.
- __ sar(temp_reg, kBitsPerInt - 1);
-
- // Extract the exponent from result_reg and subtract adjusted
- // bias from it. The adjustment is selected in a way such that
- // when the difference is zero, the answer is in the low 32 bits
- // of the input, otherwise a shift has to be performed.
- __ shr(result_reg, HeapNumber::kExponentShift);
- __ and_(result_reg,
- HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
- __ sub(Operand(result_reg),
- Immediate(HeapNumber::kExponentBias +
- HeapNumber::kExponentBits +
- HeapNumber::kMantissaBits));
- // Don't handle big (> kMantissaBits + kExponentBits == 63) or
- // special exponents.
- DeoptimizeIf(greater, instr->environment());
-
- // Zero out the sign and the exponent in the input (by shifting
- // it to the left) and restore the implicit mantissa bit,
- // i.e. convert the input to unsigned int64 shifted left by
- // kExponentBits.
- ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
- // Minus zero has the most significant bit set and the other
- // bits cleared.
- __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
- __ psllq(input_reg, HeapNumber::kExponentBits);
- __ por(input_reg, xmm_scratch);
-
- // Get the amount to shift the input right in xmm_scratch.
- __ neg(result_reg);
- __ movd(xmm_scratch, Operand(result_reg));
-
- // Shift the input right and extract low 32 bits.
- __ psrlq(input_reg, xmm_scratch);
- __ movd(Operand(result_reg), input_reg);
-
- // Use the prepared mask in temp_reg to negate the result if necessary.
- __ xor_(result_reg, Operand(temp_reg));
- __ sub(result_reg, Operand(temp_reg));
- __ bind(&done);
- }
+ __ j(not_equal, &fast_case_succeeded);
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movdbl(MemOperand(esp, 0), input_reg);
+ DoubleToIStub stub(esp, result_reg, 0, true);
+ __ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ __ add(esp, Immediate(kDoubleSize));
+ __ bind(&fast_case_succeeded);
} else {
Label done;
- __ cvttsd2si(result_reg, Operand(input_reg));
__ cvtsi2sd(xmm0, Operand(result_reg));
__ ucomisd(xmm0, input_reg);
DeoptimizeIf(not_equal, instr->environment());
@@ -5946,95 +6008,6 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
-
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- __ Allocate(instance_size, result, no_reg, scratch, deferred->entry(),
- TAG_OBJECT);
-
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(scratch, constructor);
- __ mov(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
-
- if (FLAG_debug_code) {
- __ AssertNotSmi(map);
- __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
- instance_size >> kPointerSizeLog2);
- __ Assert(equal, "Unexpected instance size");
- __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
- initial_map->pre_allocated_property_fields());
- __ Assert(equal, "Unexpected pre-allocated property fields count");
- __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
- initial_map->unused_property_fields());
- __ Assert(equal, "Unexpected unused property fields count");
- __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
- initial_map->inobject_properties());
- __ Assert(equal, "Unexpected in-object property fields count");
- }
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ mov(FieldOperand(result, JSObject::kMapOffset), map);
- __ mov(scratch, factory()->empty_fixed_array());
- __ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
- __ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
- if (initial_map->inobject_properties() != 0) {
- __ mov(scratch, factory()->undefined_value());
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ mov(FieldOperand(result, property_offset), scratch);
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- __ push(Immediate(Smi::FromInt(instance_size)));
- CallRuntimeFromDeferred(
- Runtime::kAllocateInNewSpace, 1, instr, instr->context());
- __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
@@ -6346,24 +6319,6 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
}
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- __ push(ToOperand(obj));
- EmitPushTaggedOperand(key);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- // Create safepoint generator that will also ensure enough space in the
- // reloc info for patching in deoptimization (since this is invoking a
- // builtin)
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ push(Immediate(Smi::FromInt(strict_mode_flag())));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
-}
-
-
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -6444,20 +6399,6 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
}
-void LCodeGen::DoIn(LIn* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- EmitPushTaggedOperand(key);
- EmitPushTaggedOperand(obj);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
-}
-
-
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ cmp(eax, isolate()->factory()->undefined_value());
DeoptimizeIf(equal, instr->environment());
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index d05da8a08..eb75225b9 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -105,7 +105,7 @@ class LCodeGen BASE_EMBEDDED {
Operand ToOperand(LOperand* op) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
- bool IsX87TopOfStack(LOperand* op) const;
+ X87Register ToX87Register(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
@@ -115,16 +115,23 @@ class LCodeGen BASE_EMBEDDED {
Immediate ToSmiImmediate(LOperand* op) const {
return Immediate(Smi::FromInt(ToInteger32(LConstantOperand::cast(op))));
}
+ double ToDouble(LConstantOperand* op) const;
// Support for non-sse2 (x87) floating point stack handling.
- // These functions maintain the depth of the stack (either 0 or 1)
- void PushX87DoubleOperand(Operand src);
- void PushX87FloatOperand(Operand src);
- void ReadX87Operand(Operand dst);
- bool X87StackNonEmpty() const { return x87_stack_depth_ > 0; }
- void PopX87();
- void CurrentInstructionReturnsX87Result();
- void FlushX87StackIfNecessary(LInstruction* instr);
+ // These functions maintain the mapping of physical stack registers to our
+ // virtual registers between instructions.
+ enum X87OperandType { kX87DoubleOperand, kX87FloatOperand, kX87IntOperand };
+
+ void X87Mov(X87Register reg, Operand src,
+ X87OperandType operand = kX87DoubleOperand);
+ void X87Mov(Operand src, X87Register reg);
+
+ void X87PrepareBinaryOp(
+ X87Register left, X87Register right, X87Register result);
+
+ void X87LoadForUsage(X87Register reg);
+ void X87PrepareToWrite(X87Register reg);
+ void X87CommitWrite(X87Register reg);
Handle<Object> ToHandle(LConstantOperand* op) const;
@@ -156,7 +163,6 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
@@ -291,9 +297,9 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
+ X87Register ToX87Register(int index) const;
int ToInteger32(LConstantOperand* op) const;
- double ToDouble(LConstantOperand* op) const;
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
Representation key_representation,
@@ -331,6 +337,7 @@ class LCodeGen BASE_EMBEDDED {
void EmitNumberUntagDNoSSE2(
Register input,
Register temp,
+ X87Register res_reg,
bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
@@ -392,6 +399,16 @@ class LCodeGen BASE_EMBEDDED {
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
+ void X87Fxch(X87Register reg, int other_slot = 0);
+ void X87Fld(Operand src, X87OperandType opts);
+ void X87Free(X87Register reg);
+
+ void FlushX87StackIfNecessary(LInstruction* instr);
+ void EmitFlushX87ForDeopt();
+ bool X87StackContains(X87Register reg);
+ int X87ArrayIndex(X87Register reg);
+ int x87_st2idx(int pos);
+
Zone* zone_;
LPlatformChunk* const chunk_;
MacroAssembler* const masm_;
@@ -413,6 +430,7 @@ class LCodeGen BASE_EMBEDDED {
int osr_pc_offset_;
int last_lazy_deopt_pc_;
bool frame_is_built_;
+ X87Register x87_stack_[X87Register::kNumAllocatableRegisters];
int x87_stack_depth_;
// Builder that keeps track of safepoints in the code. The table
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index 86bfe2fbf..e884a9dbc 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -313,6 +313,29 @@ void LGapResolver::EmitMove(int index) {
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
+ } else if (destination->IsDoubleRegister()) {
+ double v = cgen_->ToDouble(constant_source);
+ uint64_t int_val = BitCast<uint64_t, double>(v);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(cgen_->masm(), SSE2);
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ if (int_val == 0) {
+ __ xorps(dst, dst);
+ } else {
+ __ push(Immediate(upper));
+ __ push(Immediate(lower));
+ __ movdbl(dst, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ }
+ } else {
+ __ push(Immediate(upper));
+ __ push(Immediate(lower));
+ X87Register dst = cgen_->ToX87Register(destination);
+ cgen_->X87Mov(dst, MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ }
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
@@ -342,10 +365,10 @@ void LGapResolver::EmitMove(int index) {
} else {
// load from the register onto the stack, store in destination, which must
// be a double stack slot in the non-SSE2 case.
- ASSERT(source->index() == 0); // source is on top of the stack
ASSERT(destination->IsDoubleStackSlot());
Operand dst = cgen_->ToOperand(destination);
- cgen_->ReadX87Operand(dst);
+ X87Register src = cgen_->ToX87Register(source);
+ cgen_->X87Mov(dst, src);
}
} else if (source->IsDoubleStackSlot()) {
if (CpuFeatures::IsSupported(SSE2)) {
@@ -378,10 +401,8 @@ void LGapResolver::EmitMove(int index) {
__ mov(dst1, tmp);
} else {
Operand src = cgen_->ToOperand(source);
- if (cgen_->X87StackNonEmpty()) {
- cgen_->PopX87();
- }
- cgen_->PushX87DoubleOperand(src);
+ X87Register dst = cgen_->ToX87Register(destination);
+ cgen_->X87Mov(dst, src);
}
}
} else {
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 8231c4e8b..aebe26b78 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -82,6 +82,17 @@ bool LInstruction::HasDoubleRegisterInput() {
}
+bool LInstruction::IsDoubleInput(X87Register reg, LCodeGen* cgen) {
+ for (int i = 0; i < InputCount(); i++) {
+ LOperand* op = InputAt(i);
+ if (op != NULL && op->IsDoubleRegister()) {
+ if (cgen->ToX87Register(op).is(reg)) return true;
+ }
+ }
+ return false;
+}
+
+
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
@@ -201,7 +212,7 @@ void LBranch::PrintDataTo(StringStream* stream) {
}
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
@@ -291,6 +302,24 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
}
+ExternalReference LLinkObjectInList::GetReference(Isolate* isolate) {
+ switch (hydrogen()->known_list()) {
+ case HLinkObjectInList::ALLOCATION_SITE_LIST:
+ return ExternalReference::allocation_sites_list_address(isolate);
+ }
+
+ UNREACHABLE();
+ // Return a dummy value
+ return ExternalReference::isolate_address(isolate);
+}
+
+
+void LLinkObjectInList::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(" offset %d", hydrogen()->store_field().offset());
+}
+
+
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -350,7 +379,6 @@ void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add(" ");
constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
- ASSERT(hydrogen()->property_cell()->value()->IsSmi());
ElementsKind kind = hydrogen()->elements_kind();
stream->Add(" (%s) ", ElementsKindToString(kind));
}
@@ -495,12 +523,6 @@ LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
}
-LUnallocated* LChunkBuilder::ToUnallocated(X87TopOfStackRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- X87TopOfStackRegister::ToAllocationIndex(reg));
-}
-
-
LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
return Use(value, ToUnallocated(fixed_register));
}
@@ -511,11 +533,6 @@ LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
}
-LOperand* LChunkBuilder::UseX87TopOfStack(HValue* value) {
- return Use(value, ToUnallocated(x87tos));
-}
-
-
LOperand* LChunkBuilder::UseRegister(HValue* value) {
return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
@@ -643,13 +660,6 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
}
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineX87TOS(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, ToUnallocated(x87tos));
-}
-
-
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
@@ -1695,8 +1705,8 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+ HCompareNumericAndBranch* instr) {
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().IsSmiOrInteger32());
@@ -1704,7 +1714,7 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
instr->right()->representation()));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
@@ -1718,7 +1728,7 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
}
- return new(zone()) LCmpIDAndBranch(left, right);
+ return new(zone()) LCompareNumericAndBranch(left, right);
}
}
@@ -1928,11 +1938,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
? TempRegister()
: NULL;
LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- return AssignEnvironment(DefineAsRegister(res));
- } else {
- return AssignEnvironment(DefineX87TOS(res));
- }
+ return AssignEnvironment(DefineAsRegister(res));
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
@@ -1967,9 +1973,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (from.IsDouble()) {
if (to.IsTagged()) {
info()->MarkAsDeferredCalling();
- LOperand* value = CpuFeatures::IsSupported(SSE2)
- ? UseRegisterAtStart(instr->value())
- : UseAtStart(instr->value());
+ LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = FLAG_inline_new ? TempRegister() : NULL;
// Make sure that temp and result_temp are different registers.
@@ -2038,6 +2042,18 @@ LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
}
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
+ return new(zone())
+ LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
+}
+
+
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
@@ -2119,12 +2135,8 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
} else if (r.IsDouble()) {
double value = instr->DoubleValue();
bool value_is_zero = BitCast<uint64_t, double>(value) == 0;
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- LOperand* temp = value_is_zero ? NULL : TempRegister();
- return DefineAsRegister(new(zone()) LConstantD(temp));
- } else {
- return DefineX87TOS(new(zone()) LConstantD(NULL));
- }
+ LOperand* temp = value_is_zero ? NULL : TempRegister();
+ return DefineAsRegister(new(zone()) LConstantD(temp));
} else if (r.IsTagged()) {
return DefineAsRegister(new(zone()) LConstantT);
} else {
@@ -2168,6 +2180,14 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
}
+LInstruction* LChunkBuilder::DoLinkObjectInList(HLinkObjectInList* instr) {
+ LOperand* object = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LLinkObjectInList* result = new(zone()) LLinkObjectInList(object, temp);
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2316,11 +2336,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (instr->value()->representation().IsDouble()) {
LOperand* object = UseRegisterAtStart(instr->elements());
LOperand* val = NULL;
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- val = UseRegisterAtStart(instr->value());
- } else if (!instr->IsConstantHoleStore()) {
- val = UseX87TopOfStack(instr->value());
- }
+ val = UseRegisterAtStart(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyed(object, key, val);
} else {
@@ -2450,11 +2466,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
val = UseTempRegister(instr->value());
} else if (FLAG_track_double_fields &&
instr->field_representation().IsDouble()) {
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
- val = UseRegisterAtStart(instr->value());
- } else {
- val = UseX87TopOfStack(instr->value());
- }
+ val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
}
@@ -2524,15 +2536,6 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LOperand* temp = TempRegister();
- LAllocateObject* result = new(zone()) LAllocateObject(context, temp);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
@@ -2559,15 +2562,6 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseAtStart(instr->object());
- LOperand* key = UseOrConstantAtStart(instr->key());
- LDeleteProperty* result = new(zone()) LDeleteProperty(context, object, key);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
ASSERT(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2748,15 +2742,6 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
}
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* key = UseOrConstantAtStart(instr->key());
- LOperand* object = UseOrConstantAtStart(instr->object());
- LIn* result = new(zone()) LIn(context, key, object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->enumerable(), eax);
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index e48e881eb..a938ee56b 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -44,7 +44,6 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
- V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -75,7 +74,7 @@ class LCodeGen;
V(ClampTToUint8) \
V(ClampTToUint8NoSSE2) \
V(ClassOfTestAndBranch) \
- V(CmpIDAndBranch) \
+ V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
@@ -87,7 +86,6 @@ class LCodeGen;
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
- V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
@@ -101,7 +99,6 @@ class LCodeGen;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
- V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstanceSize) \
@@ -114,9 +111,11 @@ class LCodeGen;
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
+ V(IsNumberAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
+ V(LinkObjectInList) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
@@ -265,7 +264,11 @@ class LInstruction: public ZoneObject {
bool ClobbersTemps() const { return is_call_; }
bool ClobbersRegisters() const { return is_call_; }
virtual bool ClobbersDoubleRegisters() const {
- return is_call_ || !CpuFeatures::IsSupported(SSE2);
+ return is_call_ ||
+ (!CpuFeatures::IsSupported(SSE2) &&
+ // We only have rudimentary X87Stack tracking, thus in general
+ // cannot handle deoptimization nor phi-nodes.
+ (HasEnvironment() || IsControl()));
}
virtual bool HasResult() const = 0;
@@ -273,6 +276,7 @@ class LInstruction: public ZoneObject {
bool HasDoubleRegisterResult();
bool HasDoubleRegisterInput();
+ bool IsDoubleInput(X87Register reg, LCodeGen* cgen);
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
@@ -377,7 +381,6 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
class LInstructionGap: public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool ClobbersDoubleRegisters() const { return false; }
virtual bool HasInterestingComment(LCodeGen* gen) const {
return !IsRedundant();
@@ -676,9 +679,9 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
};
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
+ LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
@@ -686,8 +689,9 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+ "compare-numeric-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@@ -883,6 +887,19 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
};
+class LIsNumberAndBranch: public LControlInstruction<1, 0> {
+ public:
+ explicit LIsNumberAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
+};
+
+
class LIsStringAndBranch: public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1196,10 +1213,6 @@ class LConstantD: public LTemplateInstruction<1, 0, 1> {
temps_[0] = temp;
}
- virtual bool ClobbersDoubleRegisters() const {
- return false;
- }
-
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
@@ -1694,6 +1707,25 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
};
+class LLinkObjectInList: public LTemplateInstruction<0, 1, 1> {
+ public:
+ explicit LLinkObjectInList(LOperand* object, LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ ExternalReference GetReference(Isolate* isolate);
+
+ DECLARE_CONCRETE_INSTRUCTION(LinkObjectInList, "link-object-in-list")
+ DECLARE_HYDROGEN_ACCESSOR(LinkObjectInList)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -2180,9 +2212,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- virtual bool ClobbersDoubleRegisters() const {
- return false;
- }
+ virtual bool ClobbersDoubleRegisters() const { return false; }
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change);
@@ -2561,21 +2591,6 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocateObject: public LTemplateInstruction<1, 1, 1> {
- public:
- LAllocateObject(LOperand* context, LOperand* temp) {
- inputs_[0] = context;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
class LAllocate: public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
@@ -2663,22 +2678,6 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
};
-class LDeleteProperty: public LTemplateInstruction<1, 3, 0> {
- public:
- LDeleteProperty(LOperand* context, LOperand* obj, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = obj;
- inputs_[2] = key;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-};
-
-
class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
@@ -2706,22 +2705,6 @@ class LStackCheck: public LTemplateInstruction<0, 1, 0> {
};
-class LIn: public LTemplateInstruction<1, 3, 0> {
- public:
- LIn(LOperand* context, LOperand* key, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = key;
- inputs_[2] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* object() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
class LForInPrepareMap: public LTemplateInstruction<1, 2, 0> {
public:
LForInPrepareMap(LOperand* context, LOperand* object) {
@@ -2858,14 +2841,13 @@ class LChunkBuilder BASE_EMBEDDED {
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(XMMRegister reg);
- LUnallocated* ToUnallocated(X87TopOfStackRegister reg);
+ LUnallocated* ToUnallocated(X87Register reg);
// Methods for setting up define-use relationships.
MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
XMMRegister fixed_register);
- MUST_USE_RESULT LOperand* UseX87TopOfStack(HValue* value);
// A value that is guaranteed to be allocated to a register.
// Operand created by UseRegister is guaranteed to be live until the end of
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index a9a0268ae..ef90c10df 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -842,6 +842,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
LeaveExitFrameEpilogue();
}
+
void MacroAssembler::LeaveExitFrameEpilogue() {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Isolate::kContextAddress, isolate());
@@ -2811,11 +2812,14 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
void MacroAssembler::JumpIfNotUniqueName(Operand operand,
Label* not_unique_name,
Label::Distance distance) {
- STATIC_ASSERT(((SYMBOL_TYPE - 1) & kIsInternalizedMask) == kInternalizedTag);
- cmp(operand, Immediate(kInternalizedTag));
- j(less, not_unique_name, distance);
- cmp(operand, Immediate(SYMBOL_TYPE));
- j(greater, not_unique_name, distance);
+ STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+ Label succeed;
+ test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
+ j(zero, &succeed);
+ cmpb(operand, static_cast<uint8_t>(SYMBOL_TYPE));
+ j(not_equal, not_unique_name, distance);
+
+ bind(&succeed);
}
@@ -3162,10 +3166,10 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
}
-void MacroAssembler::TestJSArrayForAllocationSiteInfo(
+void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg) {
- Label no_info_available;
+ Label no_memento_available;
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
@@ -3173,14 +3177,14 @@ void MacroAssembler::TestJSArrayForAllocationSiteInfo(
ExternalReference::new_space_allocation_top_address(isolate());
lea(scratch_reg, Operand(receiver_reg,
- JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
+ JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
cmp(scratch_reg, Immediate(new_space_start));
- j(less, &no_info_available);
+ j(less, &no_memento_available);
cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
- j(greater, &no_info_available);
- cmp(MemOperand(scratch_reg, -AllocationSiteInfo::kSize),
- Immediate(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
- bind(&no_info_available);
+ j(greater, &no_memento_available);
+ cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
+ Immediate(Handle<Map>(isolate()->heap()->allocation_memento_map())));
+ bind(&no_memento_available);
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 5cb8286ba..3bca930d6 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -905,14 +905,14 @@ class MacroAssembler: public Assembler {
// in eax. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
- // AllocationSiteInfo support. Arrays may have an associated
- // AllocationSiteInfo object that can be checked for in order to pretransition
+ // AllocationMemento support. Arrays may have an associated
+ // AllocationMemento object that can be checked for in order to pretransition
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
// If allocation info is present, conditional code is set to equal
- void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
- Register scratch_reg);
+ void TestJSArrayForAllocationMemento(Register receiver_reg,
+ Register scratch_reg);
private:
bool generating_stub_;
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index f478e574f..dfcc86956 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1030,6 +1030,7 @@ void RegExpMacroAssemblerIA32::SetCurrentPositionFromEnd(int by) {
__ bind(&after_position);
}
+
void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
__ mov(register_location(register_index), Immediate(to));
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 28e043d64..2b391e0b3 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -779,87 +779,53 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
}
-// Both name_reg and receiver_reg are preserved on jumps to miss_label,
-// but may be destroyed if store is successful.
-void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Register unused,
- Label* miss_label,
- Label* miss_restore_name,
- Label* slow) {
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, scratch2, miss_label);
- }
-
+void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
+ MacroAssembler* masm,
+ Handle<JSObject> holder,
+ Register holder_reg,
+ Handle<Name> name,
+ Label* miss) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm, Handle<GlobalObject>::cast(holder), name, scratch1(), miss);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss, holder_reg, name, scratch1(), scratch2());
+ }
+}
+
+
+// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
+// store is successful.
+void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register storage_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Register unused,
+ Label* miss_label,
+ Label* slow) {
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
ASSERT(!representation.IsNone());
- // Ensure no transitions to deprecated maps are followed.
- __ CheckMapDeprecated(transition, scratch1, miss_label);
-
- // Check that we are allowed to write this.
- if (object->GetPrototype()->IsJSObject()) {
- JSObject* holder;
- // holder == object indicates that no property was found.
- if (lookup->holder() != *object) {
- holder = lookup->holder();
- } else {
- // Find the top object.
- holder = *object;
- do {
- holder = JSObject::cast(holder->GetPrototype());
- } while (holder->GetPrototype()->IsJSObject());
- }
- // We need an extra register, push
- Register holder_reg = CheckPrototypes(
- object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
- // If no property was found, and the holder (the last object in the
- // prototype chain) is in slow mode, we need to do a negative lookup on the
- // holder.
- if (lookup->holder() == *object) {
- if (holder->IsJSGlobalObject()) {
- GenerateCheckPropertyCell(
- masm,
- Handle<GlobalObject>(GlobalObject::cast(holder)),
- name,
- scratch1,
- miss_restore_name);
- } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
- GenerateDictionaryNegativeLookup(
- masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
- }
- }
- }
-
- Register storage_reg = name_reg;
-
if (details.type() == CONSTANT_FUNCTION) {
Handle<HeapObject> constant(
HeapObject::cast(descriptors->GetValue(descriptor)));
__ LoadHeapObject(scratch1, constant);
__ cmp(value_reg, scratch1);
- __ j(not_equal, miss_restore_name);
+ __ j(not_equal, miss_label);
} else if (FLAG_track_fields && representation.IsSmi()) {
- __ JumpIfNotSmi(value_reg, miss_restore_name);
+ __ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- __ JumpIfSmi(value_reg, miss_restore_name);
+ __ JumpIfSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
@@ -879,7 +845,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ bind(&heap_number);
__ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
- miss_restore_name, DONT_DO_SMI_CHECK);
+ miss_label, DONT_DO_SMI_CHECK);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
__ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
@@ -959,15 +925,12 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ mov(name_reg, value_reg);
- } else {
- ASSERT(storage_reg.is(name_reg));
+ __ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
offset,
- name_reg,
+ storage_reg,
scratch1,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
@@ -986,15 +949,12 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (!FLAG_track_fields || !representation.IsSmi()) {
// Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
- __ mov(name_reg, value_reg);
- } else {
- ASSERT(storage_reg.is(name_reg));
+ __ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
offset,
- name_reg,
+ storage_reg,
receiver_reg,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
@@ -1010,24 +970,15 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, scratch2, miss_label);
- }
-
+void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@@ -1181,6 +1132,10 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ // Make sure that the type feedback oracle harvests the receiver map.
+ // TODO(svenpanne) Remove this hack when all ICs are reworked.
+ __ mov(scratch1, Handle<Map>(object->map()));
+
Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1286,7 +1241,8 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
+void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
Label* miss) {
if (!miss->is_unused()) {
__ jmp(success);
@@ -1296,6 +1252,17 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
}
+void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
+ Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ jmp(success);
+ GenerateRestoreName(masm(), miss, name);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+ }
+}
+
+
Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> object,
Register object_reg,
@@ -1351,7 +1318,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
__ j(not_equal, &miss);
}
- HandlerFrontendFooter(success, &miss);
+ HandlerFrontendFooter(name, success, &miss);
return reg;
}
@@ -1372,7 +1339,7 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
}
- HandlerFrontendFooter(success, &miss);
+ HandlerFrontendFooter(name, success, &miss);
}
@@ -1724,11 +1691,11 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
GenerateLoadFunctionFromCell(cell, function, &miss);
}
- Handle<Smi> kind(Smi::FromInt(GetInitialFastElementsKind()), isolate());
- Handle<Cell> kind_feedback_cell =
- isolate()->factory()->NewCell(kind);
+ Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
+ site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
+ Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
__ mov(eax, Immediate(argc));
- __ mov(ebx, kind_feedback_cell);
+ __ mov(ebx, site_feedback_cell);
__ mov(edi, function);
ArrayConstructorStub stub(isolate());
@@ -2899,19 +2866,13 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
+ Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
- Label miss, miss_restore_name;
- // Check that the maps haven't changed, preserving the value register.
- __ JumpIfSmi(receiver(), &miss);
- CheckPrototypes(object, receiver(), holder,
- scratch1(), this->name(), scratch2(),
- name, &miss_restore_name);
-
- // Stub never generated for non-global objects that require access checks.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
__ pop(scratch1()); // remove the return address
__ push(receiver());
@@ -2925,13 +2886,8 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
__ TailCallExternalReference(store_callback_property, 4, 1);
- // Handle store cache miss.
- GenerateRestoreName(masm(), &miss_restore_name, name);
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
- return GetICCode(kind(), Code::CALLBACKS, name);
+ return GetCode(kind(), Code::CALLBACKS, name);
}
@@ -2985,20 +2941,6 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver(), scratch1(), scratch2(), &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
__ pop(scratch1()); // remove the return address
__ push(receiver());
__ push(this->name());
@@ -3011,12 +2953,8 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 4, 1);
- // Handle store cache miss.
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
- return GetICCode(kind(), Code::INTERCEPTOR, name);
+ return GetCode(kind(), Code::INTERCEPTOR, name);
}
@@ -3220,7 +3158,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
- HandlerFrontendFooter(&success, &miss);
+ HandlerFrontendFooter(name, &success, &miss);
__ bind(&success);
Counters* counters = isolate()->counters();
@@ -3233,7 +3171,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
}
-Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
+Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
MapHandleList* receiver_maps,
CodeHandleList* handlers,
Handle<Name> name,