summaryrefslogtreecommitdiff
path: root/deps/v8/src/ia32
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2013-07-29 21:21:03 +0200
committerBen Noordhuis <info@bnoordhuis.nl>2013-07-29 21:21:03 +0200
commit1bd711c8a09e327946f2eca5030e9710dc0e1e6e (patch)
tree6233c588fca458165ad6e448c5d3fbaa1648f805 /deps/v8/src/ia32
parent17fbd6cd66453da565d77ab557188eab479dab15 (diff)
downloadnode-1bd711c8a09e327946f2eca5030e9710dc0e1e6e.tar.gz
v8: upgrade to v8 3.20.9
Diffstat (limited to 'deps/v8/src/ia32')
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc31
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc46
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc2
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc203
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h22
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc16
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc76
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h34
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc4
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc16
10 files changed, 207 insertions, 243 deletions
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 548cbaace..5789f4921 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -43,6 +43,16 @@ namespace v8 {
namespace internal {
+void ToNumberStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -300,27 +310,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label check_heap_number, call_builtin;
- __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
- __ ret(0);
-
- __ bind(&check_heap_number);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ebx, Immediate(factory->heap_number_map()));
- __ j(not_equal, &call_builtin, Label::kNear);
- __ ret(0);
-
- __ bind(&call_builtin);
- __ pop(ecx); // Pop return address.
- __ push(eax);
- __ push(ecx); // Push return address.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in esi.
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 505cd4fc1..48968064a 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -114,22 +114,8 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
}
-void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
- JSFunction* function) {
- Isolate* isolate = function->GetIsolate();
- HandleScope scope(isolate);
- DisallowHeapAllocation nha;
-
- ASSERT(function->IsOptimized());
- ASSERT(function->FunctionsInFunctionListShareSameCode());
-
- // Get the optimized code.
- Code* code = function->code();
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
-
- // The optimized code is going to be patched, so we cannot use it any more.
- function->shared()->EvictFromOptimizedCodeMap(code, "deoptimized function");
-
// We will overwrite the code's relocation info in-place. Relocation info
// is written backward. The relocation info is the payload of a byte
// array. Later on we will slide this to the start of the byte array and
@@ -188,25 +174,6 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
ASSERT(junk_address <= reloc_end_address);
isolate->heap()->CreateFillerObjectAt(junk_address,
reloc_end_address - junk_address);
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- ReplaceCodeForRelatedFunctions(function, code);
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
- }
}
@@ -741,6 +708,17 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&done);
}
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+
#undef __
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 66a7c1c08..8f11acc1b 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -4045,7 +4045,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, ASCII_STRING_TYPE);
+ __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
// Add (separator length times array_length) - separator length
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 2c234d834..d022a82f4 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -656,9 +656,18 @@ XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
}
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+ const Representation& r) const {
HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ ASSERT(r.IsSmiOrTagged());
+ return reinterpret_cast<int32_t>(Smi::FromInt(value));
}
@@ -1003,12 +1012,6 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
-void LCodeGen::SoftDeoptimize(LEnvironment* environment) {
- ASSERT(!info()->IsStub());
- DeoptimizeIf(no_condition, environment, Deoptimizer::SOFT);
-}
-
-
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -1625,6 +1628,9 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ imul(left, left, constant);
}
} else {
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(left);
+ }
__ imul(left, ToOperand(right));
}
@@ -1661,7 +1667,8 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(left->IsRegister());
if (right->IsConstantOperand()) {
- int right_operand = ToInteger32(LConstantOperand::cast(right));
+ int right_operand = ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->representation());
switch (instr->op()) {
case Token::BIT_AND:
__ and_(ToRegister(left), right_operand);
@@ -1772,7 +1779,8 @@ void LCodeGen::DoSubI(LSubI* instr) {
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
- __ sub(ToOperand(left), ToInteger32Immediate(right));
+ __ sub(ToOperand(left),
+ ToImmediate(right, instr->hydrogen()->representation()));
} else {
__ sub(ToRegister(left), ToOperand(right));
}
@@ -1842,11 +1850,7 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
Register reg = ToRegister(instr->result());
Handle<Object> handle = instr->value();
AllowDeferredHandleDereference smi_check;
- if (handle->IsHeapObject()) {
- __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
- } else {
- __ Set(reg, Immediate(handle));
- }
+ __ LoadObject(reg, handle);
}
@@ -1985,7 +1989,8 @@ void LCodeGen::DoAddI(LAddI* instr) {
if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
if (right->IsConstantOperand()) {
- int32_t offset = ToInteger32(LConstantOperand::cast(right));
+ int32_t offset = ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->representation());
__ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
} else {
Operand address(ToRegister(left), ToRegister(right), times_1, 0);
@@ -1993,7 +1998,8 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
} else {
if (right->IsConstantOperand()) {
- __ add(ToOperand(left), ToInteger32Immediate(right));
+ __ add(ToOperand(left),
+ ToImmediate(right, instr->hydrogen()->representation()));
} else {
__ add(ToRegister(left), ToOperand(right));
}
@@ -2010,17 +2016,18 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
HMathMinMax::Operation operation = instr->hydrogen()->operation();
- if (instr->hydrogen()->representation().IsInteger32()) {
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
Label return_left;
Condition condition = (operation == HMathMinMax::kMathMin)
? less_equal
: greater_equal;
if (right->IsConstantOperand()) {
Operand left_op = ToOperand(left);
- Immediate right_imm = ToInteger32Immediate(right);
- __ cmp(left_op, right_imm);
+ Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
+ instr->hydrogen()->representation());
+ __ cmp(left_op, immediate);
__ j(condition, &return_left, Label::kNear);
- __ mov(left_op, right_imm);
+ __ mov(left_op, immediate);
} else {
Register left_reg = ToRegister(left);
Operand right_op = ToOperand(right);
@@ -2388,19 +2395,11 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
__ j(parity_even, instr->FalseLabel(chunk_));
} else {
if (right->IsConstantOperand()) {
- int32_t const_value = ToInteger32(LConstantOperand::cast(right));
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmp(ToOperand(left), Immediate(Smi::FromInt(const_value)));
- } else {
- __ cmp(ToOperand(left), Immediate(const_value));
- }
+ __ cmp(ToOperand(left),
+ ToImmediate(right, instr->hydrogen()->representation()));
} else if (left->IsConstantOperand()) {
- int32_t const_value = ToInteger32(LConstantOperand::cast(left));
- if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmp(ToOperand(right), Immediate(Smi::FromInt(const_value)));
- } else {
- __ cmp(ToOperand(right), Immediate(const_value));
- }
+ __ cmp(ToOperand(right),
+ ToImmediate(left, instr->hydrogen()->representation()));
// We transposed the operands. Reverse the condition.
cc = ReverseCondition(cc);
} else {
@@ -2426,14 +2425,6 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
-
- __ cmp(left, instr->hydrogen()->right());
- EmitBranch(instr, equal);
-}
-
-
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
@@ -3074,11 +3065,11 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
+void LCodeGen::EmitLoadFieldOrConstant(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name,
+ LEnvironment* env) {
LookupResult lookup(isolate());
type->LookupDescriptor(NULL, *name, &lookup);
ASSERT(lookup.IsFound() || lookup.IsCacheable());
@@ -3094,9 +3085,9 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
__ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
}
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
+ } else if (lookup.IsConstant()) {
+ Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate());
+ __ LoadObject(result, constant);
} else {
// Negative lookup.
// Check prototypes.
@@ -3145,7 +3136,7 @@ static bool CompactEmit(SmallMapList* list,
if (map->HasElementsTransition()) return false;
LookupResult lookup(isolate);
map->LookupDescriptor(NULL, *name, &lookup);
- return lookup.IsField() || lookup.IsConstantFunction();
+ return lookup.IsField() || lookup.IsConstant();
}
@@ -3177,16 +3168,14 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
if (last && !need_generic) {
DeoptimizeIf(not_equal, instr->environment());
__ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
+ EmitLoadFieldOrConstant(result, object, map, name, instr->environment());
} else {
Label next;
bool compact = all_are_compact ? true :
CompactEmit(instr->hydrogen()->types(), name, i, isolate());
__ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
__ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
+ EmitLoadFieldOrConstant(result, object, map, name, instr->environment());
__ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
__ bind(&next);
}
@@ -3736,38 +3725,30 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
factory()->heap_number_map());
DeoptimizeIf(not_equal, instr->environment());
- Label done;
+ Label slow, allocated, done;
Register tmp = input_reg.is(eax) ? ecx : eax;
Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
- Label negative;
__ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive, just
// return it. We do not need to patch the stack since |input| and
// |result| are the same register and |input| will be restored
// unchanged by popping safepoint registers.
__ test(tmp, Immediate(HeapNumber::kSignMask));
- __ j(not_zero, &negative);
- __ jmp(&done);
+ __ j(zero, &done);
- __ bind(&negative);
-
- Label allocated, slow;
__ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
- __ jmp(&allocated);
+ __ jmp(&allocated, Label::kNear);
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
-
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
instr, instr->context());
-
// Set the pointer to the new heap number in tmp.
if (!tmp.is(eax)) __ mov(tmp, eax);
-
// Restore input_reg after call to runtime.
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
@@ -3787,9 +3768,8 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ test(input_reg, Operand(input_reg));
Label is_positive;
- __ j(not_sign, &is_positive);
- __ neg(input_reg);
- __ test(input_reg, Operand(input_reg));
+ __ j(not_sign, &is_positive, Label::kNear);
+ __ neg(input_reg); // Sets flags.
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
}
@@ -4454,22 +4434,34 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
+ if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+ Label done;
+ __ j(NegateCondition(cc), &done, Label::kNear);
+ __ int3();
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, check->environment());
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->hydrogen()->skip_check()) return;
+ if (instr->hydrogen()->skip_check() && !FLAG_debug_code) return;
if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsSmi()) {
- __ cmp(ToOperand(instr->length()),
- Immediate(Smi::FromInt(constant_index)));
- } else {
- __ cmp(ToOperand(instr->length()), Immediate(constant_index));
- }
- DeoptimizeIf(below_equal, instr->environment());
+ Immediate immediate =
+ ToImmediate(LConstantOperand::cast(instr->index()),
+ instr->hydrogen()->length()->representation());
+ __ cmp(ToOperand(instr->length()), immediate);
+ Condition condition =
+ instr->hydrogen()->allow_equality() ? below : below_equal;
+ ApplyCheckIf(condition, instr);
} else {
__ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
- DeoptimizeIf(above_equal, instr->environment());
+ Condition condition =
+ instr->hydrogen()->allow_equality() ? above : above_equal;
+ ApplyCheckIf(condition, instr);
}
}
@@ -4629,10 +4621,11 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
__ mov(operand, ToRegister(instr->value()));
} else {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (IsInteger32(operand_value)) {
- Smi* smi_value = Smi::FromInt(ToInteger32(operand_value));
- __ mov(operand, Immediate(smi_value));
+ if (IsSmi(operand_value)) {
+ Immediate immediate = ToImmediate(operand_value, Representation::Smi());
+ __ mov(operand, immediate);
} else {
+ ASSERT(!IsInteger32(operand_value));
Handle<Object> handle_value = ToHandle(operand_value);
__ mov(operand, handle_value);
}
@@ -4795,8 +4788,9 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
// DoStringCharCodeAt above.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ push(Immediate(Smi::FromInt(const_index)));
+ Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
+ Representation::Smi());
+ __ push(immediate);
} else {
Register index = ToRegister(instr->index());
__ SmiTag(index);
@@ -5802,6 +5796,7 @@ void LCodeGen::DoCheckMapCommon(Register reg,
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ if (instr->hydrogen()->CanOmitMapChecks()) return;
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
@@ -5992,6 +5987,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) return;
Register reg = ToRegister(instr->temp());
ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
@@ -5999,11 +5995,9 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(prototypes->length() == maps->length());
- if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(reg, prototypes->at(i));
- DoCheckMapCommon(reg, maps->at(i), instr);
- }
+ for (int i = 0; i < prototypes->length(); i++) {
+ __ LoadHeapObject(reg, prototypes->at(i));
+ DoCheckMapCommon(reg, maps->at(i), instr);
}
}
@@ -6046,6 +6040,23 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
__ bind(deferred->exit());
+
+ if (instr->hydrogen()->MustPrefillWithFiller()) {
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ mov(temp, (size / kPointerSize) - 1);
+ } else {
+ temp = ToRegister(instr->size());
+ __ shr(temp, kPointerSizeLog2);
+ __ dec(temp);
+ }
+ Label loop;
+ __ bind(&loop);
+ __ mov(FieldOperand(result, temp, times_pointer_size, 0),
+ isolate()->factory()->one_pointer_filler_map());
+ __ dec(temp);
+ __ j(not_zero, &loop);
+ }
}
@@ -6306,11 +6317,15 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- if (instr->hydrogen_value()->IsSoftDeoptimize()) {
- SoftDeoptimize(instr->environment());
- } else {
- DeoptimizeIf(no_condition, instr->environment());
- }
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
+ }
+ DeoptimizeIf(no_condition, instr->environment(), type);
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index eb75225b9..657453231 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -109,11 +109,8 @@ class LCodeGen BASE_EMBEDDED {
bool IsInteger32(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
- Immediate ToInteger32Immediate(LOperand* op) const {
- return Immediate(ToInteger32(LConstantOperand::cast(op)));
- }
- Immediate ToSmiImmediate(LOperand* op) const {
- return Immediate(Smi::FromInt(ToInteger32(LConstantOperand::cast(op))));
+ Immediate ToImmediate(LOperand* op, const Representation& r) const {
+ return Immediate(ToRepresentation(LConstantOperand::cast(op), r));
}
double ToDouble(LConstantOperand* op) const;
@@ -283,7 +280,7 @@ class LCodeGen BASE_EMBEDDED {
LEnvironment* environment,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
- void SoftDeoptimize(LEnvironment* environment);
+ void ApplyCheckIf(Condition cc, LBoundsCheck* check);
void AddToTranslation(Translation* translation,
LOperand* op,
@@ -298,7 +295,8 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
X87Register ToX87Register(int index) const;
- int ToInteger32(LConstantOperand* op) const;
+ int ToRepresentation(LConstantOperand* op, const Representation& r) const;
+ int32_t ToInteger32(LConstantOperand* op) const;
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
@@ -371,11 +369,11 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
+ void EmitLoadFieldOrConstant(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name,
+ LEnvironment* env);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index e884a9dbc..b5bc18bdc 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -306,10 +306,10 @@ void LGapResolver::EmitMove(int index) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsSmi(constant_source)) {
- __ Set(dst, cgen_->ToSmiImmediate(constant_source));
- } else if (cgen_->IsInteger32(constant_source)) {
- __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
+ if (cgen_->IsInteger32(constant_source)) {
+ __ Set(dst, cgen_->ToImmediate(constant_source, r));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
@@ -339,10 +339,10 @@ void LGapResolver::EmitMove(int index) {
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
- if (cgen_->IsSmi(constant_source)) {
- __ Set(dst, cgen_->ToSmiImmediate(constant_source));
- } else if (cgen_->IsInteger32(constant_source)) {
- __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
+ Representation r = cgen_->IsSmi(constant_source)
+ ? Representation::Smi() : Representation::Integer32();
+ if (cgen_->IsInteger32(constant_source)) {
+ __ Set(dst, cgen_->ToImmediate(constant_source, r));
} else {
Register tmp = EnsureTempRegister();
__ LoadObject(tmp, cgen_->ToHandle(constant_source));
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index aebe26b78..f03cd72be 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -754,11 +754,6 @@ LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
}
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -837,8 +832,8 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
op == Token::SUB);
HValue* left = instr->left();
HValue* right = instr->right();
- ASSERT(left->representation().IsSmiOrTagged());
- ASSERT(right->representation().IsSmiOrTagged());
+ ASSERT(left->representation().IsTagged());
+ ASSERT(right->representation().IsTagged());
LOperand* context = UseFixed(instr->context(), esi);
LOperand* left_operand = UseFixed(left, edx);
LOperand* right_operand = UseFixed(right, eax);
@@ -1404,9 +1399,10 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->right()->representation().Equals(
+ instr->left()->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
@@ -1439,7 +1435,9 @@ LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
+ } else if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
@@ -1455,7 +1453,7 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, eax));
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::DIV, instr);
}
}
@@ -1521,9 +1519,10 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
- if (instr->representation().IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(left->representation().IsSmiOrInteger32());
+ ASSERT(right->representation().Equals(left->representation()));
+
if (instr->HasPowerOf2Divisor()) {
ASSERT(!right->CanBeZero());
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
@@ -1572,9 +1571,9 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstant(instr->BetterRightOperand());
LOperand* temp = NULL;
@@ -1590,16 +1589,17 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
+ ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::MUL, instr);
}
}
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->right()->representation().Equals(
+ instr->left()->representation()));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
@@ -1618,14 +1618,15 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->right()->representation().Equals(
+ instr->left()->representation()));
// Check to see if it would be advantageous to use an lea instruction rather
// than an add. This is the case when no overflow check is needed and there
// are multiple uses of the add's inputs, so using a 3-register add will
// preserve all input values for later uses.
bool use_lea = LAddI::UseLea(instr);
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
HValue* right_candidate = instr->BetterRightOperand();
LOperand* right = use_lea
@@ -1652,9 +1653,10 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->right()->representation().Equals(
+ instr->left()->representation()));
left = UseRegisterAtStart(instr->BetterLeftOperand());
right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
@@ -1741,13 +1743,6 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
}
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
- return new(zone()) LCmpConstantEqAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsSmiOrTagged());
LOperand* temp = TempRegister();
@@ -2063,8 +2058,10 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp = TempRegister();
+ LUnallocated* temp = NULL;
+ if (!instr->CanOmitPrototypeChecks()) temp = TempRegister();
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
+ if (instr->CanOmitPrototypeChecks()) return result;
return AssignEnvironment(result);
}
@@ -2081,8 +2078,10 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = NULL;
+ if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value());
LCheckMaps* result = new(zone()) LCheckMaps(value);
+ if (instr->CanOmitMapChecks()) return result;
return AssignEnvironment(result);
}
@@ -2261,8 +2260,7 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsSmi());
+ ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
bool clobbers_key = ExternalArrayOpRequiresTemp(
instr->key()->representation(), elements_kind);
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index a938ee56b..85c04685b 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -66,6 +66,7 @@ class LCodeGen;
V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMaps) \
+ V(CheckMapValue) \
V(CheckNonSmi) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
@@ -78,20 +79,23 @@ class LCodeGen;
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
- V(CmpConstantEqAndBranch) \
V(ConstantD) \
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
V(Context) \
+ V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
V(DoubleToSmi) \
+ V(Drop) \
V(DummyUse) \
V(ElementsKind) \
+ V(ForInCacheArray) \
+ V(ForInPrepareMap) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
@@ -99,13 +103,13 @@ class LCodeGen;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
+ V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
- V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -118,6 +122,7 @@ class LCodeGen;
V(LinkObjectInList) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
+ V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
@@ -180,16 +185,10 @@ class LCodeGen;
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
+ V(Uint32ToDouble) \
V(UnknownOSRValue) \
V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver) \
- V(Drop) \
- V(InnerAllocatedObject)
+ V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@@ -424,6 +423,7 @@ class LDummyUse: public LTemplateInstruction<1, 1, 0> {
class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+ DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
@@ -857,20 +857,6 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
};
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
- }
-
- LOperand* left() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
-};
-
-
class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index ef90c10df..2ab5a2593 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -1248,6 +1248,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -2798,7 +2799,8 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
// Check that both are flat ASCII strings.
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ const int kFlatAsciiStringTag =
+ kStringTag | kOneByteStringTag | kSeqStringTag;
// Interleave bits from both instance types and compare them in one check.
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
and_(scratch1, kFlatAsciiStringMask);
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 2b391e0b3..123506fa6 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -816,11 +816,9 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Representation representation = details.representation();
ASSERT(!representation.IsNone());
- if (details.type() == CONSTANT_FUNCTION) {
- Handle<HeapObject> constant(
- HeapObject::cast(descriptors->GetValue(descriptor)));
- __ LoadHeapObject(scratch1, constant);
- __ cmp(value_reg, scratch1);
+ if (details.type() == CONSTANT) {
+ Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
+ __ CmpObject(value_reg, constant);
__ j(not_equal, miss_label);
} else if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
@@ -897,7 +895,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- if (details.type() == CONSTANT_FUNCTION) {
+ if (details.type() == CONSTANT) {
ASSERT(value_reg.is(eax));
__ ret(0);
return;
@@ -1428,9 +1426,9 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
}
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
+void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
- __ LoadHeapObject(eax, value);
+ __ LoadObject(eax, value);
__ ret(0);
}
@@ -2727,7 +2725,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(
Handle<Code> code = CompileCustomCall(object, holder,
Handle<Cell>::null(),
function, Handle<String>::cast(name),
- Code::CONSTANT_FUNCTION);
+ Code::CONSTANT);
// A null handle means bail out to the regular compiler code below.
if (!code.is_null()) return code;
}