summaryrefslogtreecommitdiff
path: root/chromium/v8/src/ia32
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/ia32')
-rw-r--r--chromium/v8/src/ia32/assembler-ia32.cc4
-rw-r--r--chromium/v8/src/ia32/builtins-ia32.cc20
-rw-r--r--chromium/v8/src/ia32/code-stubs-ia32.cc109
-rw-r--r--chromium/v8/src/ia32/codegen-ia32.cc4
-rw-r--r--chromium/v8/src/ia32/debug-ia32.cc2
-rw-r--r--chromium/v8/src/ia32/deoptimizer-ia32.cc2
-rw-r--r--chromium/v8/src/ia32/full-codegen-ia32.cc44
-rw-r--r--chromium/v8/src/ia32/ic-ia32.cc2
-rw-r--r--chromium/v8/src/ia32/lithium-codegen-ia32.cc449
-rw-r--r--chromium/v8/src/ia32/lithium-codegen-ia32.h29
-rw-r--r--chromium/v8/src/ia32/lithium-ia32.cc170
-rw-r--r--chromium/v8/src/ia32/lithium-ia32.h99
-rw-r--r--chromium/v8/src/ia32/macro-assembler-ia32.cc126
-rw-r--r--chromium/v8/src/ia32/macro-assembler-ia32.h21
-rw-r--r--chromium/v8/src/ia32/stub-cache-ia32.cc4
15 files changed, 592 insertions, 493 deletions
diff --git a/chromium/v8/src/ia32/assembler-ia32.cc b/chromium/v8/src/ia32/assembler-ia32.cc
index 7bea3730258..e0ae0066552 100644
--- a/chromium/v8/src/ia32/assembler-ia32.cc
+++ b/chromium/v8/src/ia32/assembler-ia32.cc
@@ -1227,10 +1227,6 @@ void Assembler::test_b(Register reg, const Operand& op) {
void Assembler::test(const Operand& op, const Immediate& imm) {
- if (op.is_reg_only()) {
- test(op.reg(), imm);
- return;
- }
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(eax, op);
diff --git a/chromium/v8/src/ia32/builtins-ia32.cc b/chromium/v8/src/ia32/builtins-ia32.cc
index 59124eab757..b90a17f6c38 100644
--- a/chromium/v8/src/ia32/builtins-ia32.cc
+++ b/chromium/v8/src/ia32/builtins-ia32.cc
@@ -241,7 +241,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
if (FLAG_debug_code) {
__ cmp(esi, edi);
__ Assert(less_equal,
- kUnexpectedNumberOfPreAllocatedPropertyFields);
+ "Unexpected number of pre-allocated property fields.");
}
__ InitializeFieldsWithFiller(ecx, esi, edx);
__ mov(edx, factory->one_pointer_filler_map());
@@ -272,7 +272,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ sub(edx, ecx);
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
- __ Assert(positive, kPropertyAllocationCountFailed);
+ __ Assert(positive, "Property allocation count failed.");
// Scale the number of elements by pointer size and add the header for
// FixedArrays to the start of the next object calculation from above.
@@ -654,7 +654,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ ret(2 * kPointerSize); // Remove state, eax.
__ bind(&not_tos_eax);
- __ Abort(kNoCasesLeft);
+ __ Abort("no cases left");
}
@@ -1033,9 +1033,9 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(not_zero, "Unexpected initial map for InternalArray function");
__ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForInternalArrayFunction);
+ __ Assert(equal, "Unexpected initial map for InternalArray function");
}
// Run the native code for the InternalArray function called as a normal
@@ -1062,9 +1062,9 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(not_zero, "Unexpected initial map for Array function");
__ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(equal, "Unexpected initial map for Array function");
}
// Run the native code for the Array function called as a normal function.
@@ -1092,7 +1092,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
__ cmp(edi, ecx);
- __ Assert(equal, kUnexpectedStringFunction);
+ __ Assert(equal, "Unexpected String function");
}
// Load the first argument into eax and get rid of the rest
@@ -1137,9 +1137,9 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset),
JSValue::kSize >> kPointerSizeLog2);
- __ Assert(equal, kUnexpectedStringWrapperInstanceSize);
+ __ Assert(equal, "Unexpected string wrapper instance size");
__ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0);
- __ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper);
+ __ Assert(equal, "Unexpected unused properties of string wrapper");
}
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
diff --git a/chromium/v8/src/ia32/code-stubs-ia32.cc b/chromium/v8/src/ia32/code-stubs-ia32.cc
index 12cc499a777..5789f49216f 100644
--- a/chromium/v8/src/ia32/code-stubs-ia32.cc
+++ b/chromium/v8/src/ia32/code-stubs-ia32.cc
@@ -250,6 +250,17 @@ void ToBooleanStub::InitializeInterfaceDescriptor(
}
+void UnaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(UnaryOpIC_Miss);
+}
+
+
void StoreGlobalStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -500,8 +511,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
Label after_sentinel;
__ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
if (FLAG_debug_code) {
+ const char* message = "Expected 0 as a Smi sentinel";
__ cmp(ecx, 0);
- __ Assert(equal, kExpected0AsASmiSentinel);
+ __ Assert(equal, message);
}
__ mov(ecx, GlobalObjectOperand());
__ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
@@ -3457,9 +3469,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
__ test(ecx, Immediate(kSmiTagMask));
- __ Check(not_zero, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
__ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
- __ Check(equal, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+ __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
}
// ecx: RegExp data (FixedArray)
@@ -3819,7 +3831,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ test_b(ebx, kIsIndirectStringMask);
- __ Assert(zero, kExternalStringExpectedButNotFound);
+ __ Assert(zero, "external string expected, but not found");
}
__ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
@@ -3956,7 +3968,11 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register scratch = scratch2;
// Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(masm->isolate());
+ __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
+ __ mov(number_string_cache,
+ Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
__ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
@@ -4310,7 +4326,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
edi);
}
#ifdef DEBUG
- __ Abort(kUnexpectedFallThroughFromStringComparison);
+ __ Abort("Unexpected fall-through from string comparison");
#endif
__ bind(&check_unequal_objects);
@@ -5006,6 +5022,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(masm->isolate());
+
ASSERT_EQ(object.code(), InstanceofStub::left().code());
ASSERT_EQ(function.code(), InstanceofStub::right().code());
@@ -5025,11 +5044,18 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
if (!HasCallSiteInlineCheck()) {
// Look up the function and the map in the instanceof cache.
Label miss;
- __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
+ __ cmp(function, Operand::StaticArray(scratch,
+ times_pointer_size,
+ roots_array_start));
__ j(not_equal, &miss, Label::kNear);
- __ CompareRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
+ __ cmp(map, Operand::StaticArray(
+ scratch, times_pointer_size, roots_array_start));
__ j(not_equal, &miss, Label::kNear);
- __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(eax, Operand::StaticArray(
+ scratch, times_pointer_size, roots_array_start));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&miss);
}
@@ -5044,8 +5070,12 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Update the global instanceof or call site inlined cache with the current
// map and function. The cached answer will be set when it is known below.
if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
- __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
+ __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
+ map);
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
+ __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
+ function);
} else {
// The constants for the code patching are based on no push instructions
// at the call site.
@@ -5055,9 +5085,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
__ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
- __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp1);
+ __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
__ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
- __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp2);
+ __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
}
__ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
__ mov(Operand(scratch, 0), map);
@@ -5079,8 +5109,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&is_instance);
if (!HasCallSiteInlineCheck()) {
- __ mov(eax, Immediate(0));
- __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Set(eax, Immediate(0));
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(Operand::StaticArray(scratch,
+ times_pointer_size, roots_array_start), eax);
} else {
// Get return address and delta to inlined map check.
__ mov(eax, factory->true_value());
@@ -5088,7 +5120,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
__ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
- __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
+ __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
}
__ mov(Operand(scratch, kDeltaToMovImmediate), eax);
if (!ReturnTrueFalseObject()) {
@@ -5099,8 +5131,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&is_not_instance);
if (!HasCallSiteInlineCheck()) {
- __ mov(eax, Immediate(Smi::FromInt(1)));
- __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+ __ mov(Operand::StaticArray(
+ scratch, times_pointer_size, roots_array_start), eax);
} else {
// Get return address and delta to inlined map check.
__ mov(eax, factory->false_value());
@@ -5108,7 +5142,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
__ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
- __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
+ __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
}
__ mov(Operand(scratch, kDeltaToMovImmediate), eax);
if (!ReturnTrueFalseObject()) {
@@ -5221,7 +5255,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
// Index is not a smi.
__ bind(&index_not_smi_);
@@ -5271,7 +5305,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
}
@@ -5306,7 +5340,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
@@ -5318,7 +5352,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
}
@@ -5853,7 +5887,11 @@ void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
// Load the string table.
Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(masm->isolate());
+ __ mov(scratch, Immediate(Heap::kStringTableRootIndex));
+ __ mov(string_table,
+ Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
// Calculate capacity mask from the string table capacity.
Register mask = scratch2;
@@ -5941,7 +5979,12 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register scratch) {
// hash = (seed + character) + ((seed + character) << 10);
if (Serializer::enabled()) {
- __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(masm->isolate());
+ __ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
+ __ mov(scratch, Operand::StaticArray(scratch,
+ times_pointer_size,
+ roots_array_start));
__ SmiUntag(scratch);
__ add(scratch, character);
__ mov(hash, scratch);
@@ -7439,7 +7482,7 @@ static void CreateArrayDispatch(MacroAssembler* masm) {
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort("Unexpected ElementsKind in array constructor");
}
@@ -7502,7 +7545,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort("Unexpected ElementsKind in array constructor");
}
@@ -7567,9 +7610,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(not_zero, "Unexpected initial map for Array function");
__ CmpObjectType(ecx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(equal, "Unexpected initial map for Array function");
// We should either have undefined in ebx or a valid cell
Label okay_here;
@@ -7577,7 +7620,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &okay_here);
__ cmp(FieldOperand(ebx, 0), Immediate(cell_map));
- __ Assert(equal, kExpectedPropertyCellInRegisterEbx);
+ __ Assert(equal, "Expected property cell in register ebx");
__ bind(&okay_here);
}
@@ -7681,9 +7724,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(not_zero, "Unexpected initial map for Array function");
__ CmpObjectType(ecx, MAP_TYPE, ecx);
- __ Assert(equal, kUnexpectedInitialMapForArrayFunction);
+ __ Assert(equal, "Unexpected initial map for Array function");
}
// Figure out the right elements kind
@@ -7702,7 +7745,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ j(equal, &done);
__ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
__ Assert(equal,
- kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ "Invalid ElementsKind for InternalArray or InternalPackedArray");
__ bind(&done);
}
diff --git a/chromium/v8/src/ia32/codegen-ia32.cc b/chromium/v8/src/ia32/codegen-ia32.cc
index 28b0f4ad82f..f488718dc6d 100644
--- a/chromium/v8/src/ia32/codegen-ia32.cc
+++ b/chromium/v8/src/ia32/codegen-ia32.cc
@@ -779,7 +779,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
if (FLAG_debug_code) {
__ cmp(ebx, masm->isolate()->factory()->the_hole_value());
- __ Assert(equal, kObjectFoundInSmiOnlyArray);
+ __ Assert(equal, "object found in smi-only array");
}
if (CpuFeatures::IsSupported(SSE2)) {
@@ -1011,7 +1011,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ test(result, Immediate(kIsIndirectStringMask));
- __ Assert(zero, kExternalStringExpectedButNotFound);
+ __ Assert(zero, "external string expected, but not found");
}
// Rule out short external strings.
STATIC_CHECK(kShortExternalStringTag != 0);
diff --git a/chromium/v8/src/ia32/debug-ia32.cc b/chromium/v8/src/ia32/debug-ia32.cc
index fd703dcc0c7..68199f905b5 100644
--- a/chromium/v8/src/ia32/debug-ia32.cc
+++ b/chromium/v8/src/ia32/debug-ia32.cc
@@ -128,7 +128,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if ((non_object_regs & (1 << r)) != 0) {
if (FLAG_debug_code) {
__ test(reg, Immediate(0xc0000000));
- __ Assert(zero, kUnableToEncodeValueAsSmi);
+ __ Assert(zero, "Unable to encode value as smi");
}
__ SmiTag(reg);
__ push(reg);
diff --git a/chromium/v8/src/ia32/deoptimizer-ia32.cc b/chromium/v8/src/ia32/deoptimizer-ia32.cc
index a9bd8c50b72..48968064aa3 100644
--- a/chromium/v8/src/ia32/deoptimizer-ia32.cc
+++ b/chromium/v8/src/ia32/deoptimizer-ia32.cc
@@ -625,7 +625,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(ecx);
if (FLAG_debug_code) {
__ cmp(ecx, Immediate(kAlignmentZapValue));
- __ Assert(equal, kAlignmentMarkerExpected);
+ __ Assert(equal, "alignment marker expected");
}
__ bind(&no_padding);
} else {
diff --git a/chromium/v8/src/ia32/full-codegen-ia32.cc b/chromium/v8/src/ia32/full-codegen-ia32.cc
index f08a269e85b..8f11acc1bec 100644
--- a/chromium/v8/src/ia32/full-codegen-ia32.cc
+++ b/chromium/v8/src/ia32/full-codegen-ia32.cc
@@ -745,9 +745,9 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// Check that we're not inside a with or catch context.
__ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
__ cmp(ebx, isolate()->factory()->with_context_map());
- __ Check(not_equal, kDeclarationInWithContext);
+ __ Check(not_equal, "Declaration in with context.");
__ cmp(ebx, isolate()->factory()->catch_context_map());
- __ Check(not_equal, kDeclarationInCatchContext);
+ __ Check(not_equal, "Declaration in catch context.");
}
}
@@ -2169,7 +2169,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ Push(Smi::FromInt(resume_mode));
__ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
- __ Abort(kGeneratorFailedToResume);
+ __ Abort("Generator failed to resume.");
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
@@ -2468,7 +2468,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// Check for an uninitialized let binding.
__ mov(edx, location);
__ cmp(edx, isolate()->factory()->the_hole_value());
- __ Check(equal, kLetBindingReInitialization);
+ __ Check(equal, "Let binding re-initialization.");
}
// Perform the assignment.
__ mov(location, eax);
@@ -3430,15 +3430,15 @@ void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
Register value,
uint32_t encoding_mask) {
__ test(index, Immediate(kSmiTagMask));
- __ Check(zero, kNonSmiIndex);
+ __ Check(zero, "Non-smi index");
__ test(value, Immediate(kSmiTagMask));
- __ Check(zero, kNonSmiValue);
+ __ Check(zero, "Non-smi value");
__ cmp(index, FieldOperand(string, String::kLengthOffset));
- __ Check(less, kIndexIsTooLarge);
+ __ Check(less, "Index is too large");
__ cmp(index, Immediate(Smi::FromInt(0)));
- __ Check(greater_equal, kIndexIsNegative);
+ __ Check(greater_equal, "Index is negative");
__ push(value);
__ mov(value, FieldOperand(string, HeapObject::kMapOffset));
@@ -3446,7 +3446,7 @@ void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
__ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
__ cmp(value, Immediate(encoding_mask));
- __ Check(equal, kUnexpectedStringType);
+ __ Check(equal, "Unexpected string type");
__ pop(value);
}
@@ -3818,7 +3818,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Handle<FixedArray> jsfunction_result_caches(
isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort(kAttemptToUseUndefinedCache);
+ __ Abort("Attempt to use undefined cache.");
__ mov(eax, isolate()->factory()->undefined_value());
context()->Plug(eax);
return;
@@ -4000,7 +4000,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// scratch, string_length, elements.
if (generate_debug_code_) {
__ cmp(index, array_length);
- __ Assert(less, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+ __ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
}
__ bind(&loop);
__ mov(string, FieldOperand(elements,
@@ -4347,12 +4347,34 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
+ case Token::SUB:
+ EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
+ break;
+
+ case Token::BIT_NOT:
+ EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
+ break;
+
default:
UNREACHABLE();
}
}
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+ const char* comment) {
+ Comment cmt(masm_, comment);
+ UnaryOpStub stub(expr->op());
+ // UnaryOpStub expects the argument to be in the
+ // accumulator register eax.
+ VisitForAccumulatorValue(expr->expression());
+ SetSourcePosition(expr->position());
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
+ expr->UnaryOperationFeedbackId());
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
diff --git a/chromium/v8/src/ia32/ic-ia32.cc b/chromium/v8/src/ia32/ic-ia32.cc
index 1e0f14e7687..bf0c80b2b46 100644
--- a/chromium/v8/src/ia32/ic-ia32.cc
+++ b/chromium/v8/src/ia32/ic-ia32.cc
@@ -483,7 +483,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// based on 32 bits of the map pointer and the string hash.
if (FLAG_debug_code) {
__ cmp(eax, FieldOperand(edx, HeapObject::kMapOffset));
- __ Check(equal, kMapIsNoLongerInEax);
+ __ Check(equal, "Map is no longer in eax.");
}
__ mov(ebx, eax); // Keep the map around for later.
__ shr(eax, KeyedLookupCache::kMapHashShift);
diff --git a/chromium/v8/src/ia32/lithium-codegen-ia32.cc b/chromium/v8/src/ia32/lithium-codegen-ia32.cc
index 19c553bfa51..7a601cf39bc 100644
--- a/chromium/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/chromium/v8/src/ia32/lithium-codegen-ia32.cc
@@ -113,7 +113,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
-void LCodeGen::Abort(BailoutReason reason) {
+void LCodeGen::Abort(const char* reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -220,7 +220,7 @@ bool LCodeGen::GeneratePrologue() {
dynamic_frame_alignment_ &&
FLAG_debug_code) {
__ test(esp, Immediate(kPointerSize));
- __ Assert(zero, kFrameIsExpectedToBeAligned);
+ __ Assert(zero, "frame is expected to be aligned");
}
// Reserve space for the stack slots needed by the code.
@@ -355,8 +355,6 @@ bool LCodeGen::GenerateBody() {
if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
- RecordAndUpdatePosition(instr->position());
-
instr->CompileToNative(this);
if (!CpuFeatures::IsSupported(SSE2)) {
@@ -424,10 +422,6 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
-
- int pos = instructions_->at(code->instruction_index())->position();
- RecordAndUpdatePosition(pos);
-
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
code->instruction_index(),
@@ -769,57 +763,37 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
UNREACHABLE();
}
- int object_index = 0;
- int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
- AddToTranslation(environment,
- translation,
+
+ // TODO(mstarzinger): Introduce marker operands to indicate that this value
+ // is not present and must be reconstructed from the deoptimizer. Currently
+ // this is only used for the arguments object.
+ if (value == NULL) {
+ int arguments_count = environment->values()->length() - translation_size;
+ translation->BeginArgumentsObject(arguments_count);
+ for (int i = 0; i < arguments_count; ++i) {
+ LOperand* value = environment->values()->at(translation_size + i);
+ AddToTranslation(translation,
+ value,
+ environment->HasTaggedValueAt(translation_size + i),
+ environment->HasUint32ValueAt(translation_size + i));
+ }
+ continue;
+ }
+
+ AddToTranslation(translation,
value,
environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- &object_index,
- &dematerialized_index);
+ environment->HasUint32ValueAt(i));
}
}
-void LCodeGen::AddToTranslation(LEnvironment* environment,
- Translation* translation,
+void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer) {
- if (op == LEnvironment::materialization_marker()) {
- int object_index = (*object_index_pointer)++;
- if (environment->ObjectIsDuplicateAt(object_index)) {
- int dupe_of = environment->ObjectDuplicateOfAt(object_index);
- translation->DuplicateObject(dupe_of);
- return;
- }
- int object_length = environment->ObjectLengthAt(object_index);
- if (environment->ObjectIsArgumentsAt(object_index)) {
- translation->BeginArgumentsObject(object_length);
- } else {
- translation->BeginCapturedObject(object_length);
- }
- int dematerialized_index = *dematerialized_index_pointer;
- int env_offset = environment->translation_size() + dematerialized_index;
- *dematerialized_index_pointer += object_length;
- for (int i = 0; i < object_length; ++i) {
- LOperand* value = environment->values()->at(env_offset + i);
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(env_offset + i),
- environment->HasUint32ValueAt(env_offset + i),
- object_index_pointer,
- dematerialized_index_pointer);
- }
- return;
- }
-
+ bool is_uint32) {
if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
@@ -908,7 +882,7 @@ void LCodeGen::LoadContextFromDeferred(LOperand* context) {
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
- __ LoadObject(esi, Handle<Object>::cast(constant->handle()));
+ __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle()));
} else {
UNREACHABLE();
}
@@ -974,7 +948,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
- Abort(kBailoutWasNotPrepared);
+ Abort("bailout was not prepared");
return;
}
@@ -1010,7 +984,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
__ bind(&done);
}
- if (info()->ShouldTrapOnDeopt()) {
+ if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
Label done;
if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
__ int3();
@@ -1194,14 +1168,6 @@ void LCodeGen::RecordPosition(int position) {
}
-void LCodeGen::RecordAndUpdatePosition(int position) {
- if (position >= 0 && position != old_position_) {
- masm()->positions_recorder()->RecordPosition(position);
- old_position_ = position;
- }
-}
-
-
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@@ -1713,9 +1679,8 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(left->IsRegister());
if (right->IsConstantOperand()) {
- int32_t right_operand =
- ToRepresentation(LConstantOperand::cast(right),
- instr->hydrogen()->representation());
+ int right_operand = ToRepresentation(LConstantOperand::cast(right),
+ instr->hydrogen()->representation());
switch (instr->op()) {
case Token::BIT_AND:
__ and_(ToRegister(left), right_operand);
@@ -1724,11 +1689,7 @@ void LCodeGen::DoBitI(LBitI* instr) {
__ or_(ToRegister(left), right_operand);
break;
case Token::BIT_XOR:
- if (right_operand == int32_t(~0)) {
- __ not_(ToRegister(left));
- } else {
- __ xor_(ToRegister(left), right_operand);
- }
+ __ xor_(ToRegister(left), right_operand);
break;
default:
UNREACHABLE();
@@ -2015,7 +1976,7 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
- __ Check(equal, kUnexpectedStringType);
+ __ Check(equal, "Unexpected string type");
__ pop(value);
}
@@ -2029,6 +1990,13 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
}
+void LCodeGen::DoBitNotI(LBitNotI* instr) {
+ LOperand* input = instr->value();
+ ASSERT(input->Equals(instr->result()));
+ __ not_(ToRegister(input));
+}
+
+
void LCodeGen::DoThrow(LThrow* instr) {
__ push(ToOperand(instr->value()));
ASSERT(ToRegister(instr->context()).is(esi));
@@ -2233,17 +2201,6 @@ void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
}
-template<class InstrType>
-void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
- int false_block = instr->FalseDestination(chunk_);
- if (cc == no_condition) {
- __ jmp(chunk_->GetAssemblyLabel(false_block));
- } else {
- __ j(cc, chunk_->GetAssemblyLabel(false_block));
- }
-}
-
-
void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsSmiOrInteger32() || r.IsDouble()) {
@@ -2494,51 +2451,6 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
-void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
- if (instr->hydrogen()->representation().IsTagged()) {
- Register input_reg = ToRegister(instr->object());
- __ cmp(input_reg, factory()->the_hole_value());
- EmitBranch(instr, equal);
- return;
- }
-
- bool use_sse2 = CpuFeatures::IsSupported(SSE2);
- if (use_sse2) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->object());
- __ ucomisd(input_reg, input_reg);
- EmitFalseBranch(instr, parity_odd);
- } else {
- // Put the value to the top of stack
- X87Register src = ToX87Register(instr->object());
- X87LoadForUsage(src);
- __ fld(0);
- __ fld(0);
- __ FCmp();
- Label ok;
- __ j(parity_even, &ok);
- __ fstp(0);
- EmitFalseBranch(instr, no_condition);
- __ bind(&ok);
- }
-
-
- __ sub(esp, Immediate(kDoubleSize));
- if (use_sse2) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->object());
- __ movdbl(MemOperand(esp, 0), input_reg);
- } else {
- __ fstp_d(MemOperand(esp, 0));
- }
-
- __ add(esp, Immediate(kDoubleSize));
- int offset = sizeof(kHoleNanUpper32);
- __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
- EmitBranch(instr, equal);
-}
-
-
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
@@ -2951,7 +2863,7 @@ void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
__ cmp(Operand(esp,
(parameter_count + extra_value_count) * kPointerSize),
Immediate(kAlignmentZapValue));
- __ Assert(equal, kExpectedAlignmentMarker);
+ __ Assert(equal, "expected alignment marker");
}
__ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
} else {
@@ -2964,7 +2876,7 @@ void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
__ cmp(Operand(esp, reg, times_pointer_size,
extra_value_count * kPointerSize),
Immediate(kAlignmentZapValue));
- __ Assert(equal, kExpectedAlignmentMarker);
+ __ Assert(equal, "expected alignment marker");
}
// emit code to restore stack based on instr->parameter_count()
@@ -3178,6 +3090,47 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
+void LCodeGen::EmitLoadFieldOrConstant(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name,
+ LEnvironment* env) {
+ LookupResult lookup(isolate());
+ type->LookupDescriptor(NULL, *name, &lookup);
+ ASSERT(lookup.IsFound() || lookup.IsCacheable());
+ if (lookup.IsField()) {
+ int index = lookup.GetLocalFieldIndexFromMap(*type);
+ int offset = index * kPointerSize;
+ if (index < 0) {
+ // Negative property indices are in-object properties, indexed
+ // from the end of the fixed part of the object.
+ __ mov(result, FieldOperand(object, offset + type->instance_size()));
+ } else {
+ // Non-negative property indices are in the properties array.
+ __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
+ }
+ } else if (lookup.IsConstant()) {
+ Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate());
+ __ LoadObject(result, constant);
+ } else {
+ // Negative lookup.
+ // Check prototypes.
+ Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
+ Heap* heap = type->GetHeap();
+ while (*current != heap->null_value()) {
+ __ LoadHeapObject(result, current);
+ __ cmp(FieldOperand(result, HeapObject::kMapOffset),
+ Handle<Map>(current->map()));
+ DeoptimizeIf(not_equal, env);
+ current =
+ Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
+ }
+ __ mov(result, factory()->undefined_value());
+ }
+}
+
+
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
@@ -3196,6 +3149,68 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
}
+// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
+// prototype chain, which causes unbounded code generation.
+static bool CompactEmit(SmallMapList* list,
+ Handle<String> name,
+ int i,
+ Isolate* isolate) {
+ Handle<Map> map = list->at(i);
+ LookupResult lookup(isolate);
+ map->LookupDescriptor(NULL, *name, &lookup);
+ return lookup.IsField() || lookup.IsConstant();
+}
+
+
+void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
+ Register object = ToRegister(instr->object());
+ Register result = ToRegister(instr->result());
+
+ int map_count = instr->hydrogen()->types()->length();
+ bool need_generic = instr->hydrogen()->need_generic();
+
+ if (map_count == 0 && !need_generic) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+ Handle<String> name = instr->hydrogen()->name();
+ Label done;
+ bool all_are_compact = true;
+ for (int i = 0; i < map_count; ++i) {
+ if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
+ all_are_compact = false;
+ break;
+ }
+ }
+ for (int i = 0; i < map_count; ++i) {
+ bool last = (i == map_count - 1);
+ Handle<Map> map = instr->hydrogen()->types()->at(i);
+ Label check_passed;
+ __ CompareMap(object, map, &check_passed);
+ if (last && !need_generic) {
+ DeoptimizeIf(not_equal, instr->environment());
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstant(result, object, map, name, instr->environment());
+ } else {
+ Label next;
+ bool compact = all_are_compact ? true :
+ CompactEmit(instr->hydrogen()->types(), name, i, isolate());
+ __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstant(result, object, map, name, instr->environment());
+ __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
+ __ bind(&next);
+ }
+ }
+ if (need_generic) {
+ __ mov(ecx, name);
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ }
+ __ bind(&done);
+}
+
+
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
@@ -3432,7 +3447,7 @@ Operand LCodeGen::BuildFastArrayOperand(
if (key->IsConstantOperand()) {
int constant_value = ToInteger32(LConstantOperand::cast(key));
if (constant_value & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
+ Abort("array index constant value too big");
}
return Operand(elements_pointer_reg,
((constant_value + additional_index) << shift_size)
@@ -3806,7 +3821,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
__ pand(input_reg, scratch);
- } else if (r.IsSmiOrInteger32()) {
+ } else if (r.IsInteger32()) {
EmitIntegerMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
@@ -4148,9 +4163,6 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ Set(esi, Immediate(0));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -4159,9 +4171,6 @@ void LCodeGen::DoMathTan(LMathTan* instr) {
void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ Set(esi, Immediate(0));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -4170,9 +4179,6 @@ void LCodeGen::DoMathCos(LMathCos* instr) {
void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- // Set the context register to a GC-safe fake value. Clobbering it is
- // OK because this instruction is marked as a call.
- __ Set(esi, Immediate(0));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -5045,6 +5051,13 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Register reg = ToRegister(instr->result());
+ bool convert_hole = false;
+ HValue* change_input = instr->hydrogen()->value();
+ if (change_input->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(change_input);
+ convert_hole = load->UsesMustHandleHole();
+ }
+
bool use_sse2 = CpuFeatures::IsSupported(SSE2);
if (!use_sse2) {
// Put the value to the top of stack
@@ -5052,6 +5065,54 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
X87LoadForUsage(src);
}
+ Label no_special_nan_handling;
+ Label done;
+ if (convert_hole) {
+ if (use_sse2) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ __ ucomisd(input_reg, input_reg);
+ } else {
+ __ fld(0);
+ __ fld(0);
+ __ FCmp();
+ }
+
+ __ j(parity_odd, &no_special_nan_handling);
+ __ sub(esp, Immediate(kDoubleSize));
+ if (use_sse2) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ __ movdbl(MemOperand(esp, 0), input_reg);
+ } else {
+ __ fld(0);
+ __ fstp_d(MemOperand(esp, 0));
+ }
+ __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
+ Immediate(kHoleNanUpper32));
+ Label canonicalize;
+ __ j(not_equal, &canonicalize);
+ __ add(esp, Immediate(kDoubleSize));
+ __ mov(reg, factory()->the_hole_value());
+ if (!use_sse2) {
+ __ fstp(0);
+ }
+ __ jmp(&done);
+ __ bind(&canonicalize);
+ __ add(esp, Immediate(kDoubleSize));
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ if (use_sse2) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ __ movdbl(input_reg, Operand::StaticVariable(nan));
+ } else {
+ __ fstp(0);
+ __ fld_d(Operand::StaticVariable(nan));
+ }
+ }
+
+ __ bind(&no_special_nan_handling);
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
Register tmp = ToRegister(instr->temp());
@@ -5067,6 +5128,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
+ __ bind(&done);
}
@@ -5116,21 +5178,23 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
Register temp_reg,
X87Register res_reg,
- bool can_convert_undefined_to_nan,
+ bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
Label load_smi, done;
X87PrepareToWrite(res_reg);
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
+ NUMBER_CANDIDATE_IS_ANY_TAGGED);
+ if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- if (!can_convert_undefined_to_nan) {
+ if (!allow_undefined_as_nan) {
DeoptimizeIf(not_equal, env);
} else {
Label heap_number, convert;
@@ -5138,6 +5202,10 @@ void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
// Convert undefined (or hole) to NaN.
__ cmp(input_reg, factory()->undefined_value());
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
+ __ j(equal, &convert, Label::kNear);
+ __ cmp(input_reg, factory()->the_hole_value());
+ }
DeoptimizeIf(not_equal, env);
__ bind(&convert);
@@ -5184,20 +5252,22 @@ void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
void LCodeGen::EmitNumberUntagD(Register input_reg,
Register temp_reg,
XMMRegister result_reg,
- bool can_convert_undefined_to_nan,
+ bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
Label load_smi, done;
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
+ NUMBER_CANDIDATE_IS_ANY_TAGGED);
+ if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- if (!can_convert_undefined_to_nan) {
+ if (!allow_undefined_as_nan) {
DeoptimizeIf(not_equal, env);
} else {
Label heap_number, convert;
@@ -5205,6 +5275,10 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Convert undefined (and hole) to NaN.
__ cmp(input_reg, factory()->undefined_value());
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
+ __ j(equal, &convert, Label::kNear);
+ __ cmp(input_reg, factory()->the_hole_value());
+ }
DeoptimizeIf(not_equal, env);
__ bind(&convert);
@@ -5529,9 +5603,16 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
instr->hydrogen()->deoptimize_on_minus_zero();
Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
HValue* value = instr->hydrogen()->value();
- NumberUntagDMode mode = value->representation().IsSmi()
- ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+ if (value->representation().IsSmi()) {
+ mode = NUMBER_CANDIDATE_IS_SMI;
+ } else if (value->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(value);
+ if (load->UsesMustHandleHole()) {
+ mode = NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE;
+ }
+ }
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
@@ -5539,7 +5620,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
EmitNumberUntagD(input_reg,
temp_reg,
result_reg,
- instr->hydrogen()->can_convert_undefined_to_nan(),
+ instr->hydrogen()->allow_undefined_as_nan(),
deoptimize_on_minus_zero,
instr->environment(),
mode);
@@ -5547,7 +5628,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
EmitNumberUntagDNoSSE2(input_reg,
temp_reg,
ToX87Register(instr->result()),
- instr->hydrogen()->can_convert_undefined_to_nan(),
+ instr->hydrogen()->allow_undefined_as_nan(),
deoptimize_on_minus_zero,
instr->environment(),
mode);
@@ -5712,68 +5793,31 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
}
-void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
- {
- PushSafepointRegistersScope scope(this);
- __ push(object);
- __ xor_(esi, esi);
- __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
-
- __ test(eax, Immediate(kSmiTagMask));
- }
- DeoptimizeIf(zero, instr->environment());
+void LCodeGen::DoCheckMapCommon(Register reg,
+ Handle<Map> map,
+ LInstruction* instr) {
+ Label success;
+ __ CompareMap(reg, map, &success);
+ DeoptimizeIf(not_equal, instr->environment());
+ __ bind(&success);
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps: public LDeferredCode {
- public:
- DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
- : LDeferredCode(codegen), instr_(instr), object_(object) {
- SetExit(check_maps());
- }
- virtual void Generate() {
- codegen()->DoDeferredInstanceMigration(instr_, object_);
- }
- Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() { return instr_; }
- private:
- LCheckMaps* instr_;
- Label check_maps_;
- Register object_;
- };
-
if (instr->hydrogen()->CanOmitMapChecks()) return;
-
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- SmallMapList* map_set = instr->hydrogen()->map_set();
-
- DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->has_migration_target()) {
- deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
- __ bind(deferred->check_maps());
- }
-
Label success;
+ SmallMapList* map_set = instr->hydrogen()->map_set();
for (int i = 0; i < map_set->length() - 1; i++) {
Handle<Map> map = map_set->at(i);
__ CompareMap(reg, map, &success);
__ j(equal, &success);
}
-
Handle<Map> map = map_set->last();
- __ CompareMap(reg, map, &success);
- if (instr->hydrogen()->has_migration_target()) {
- __ j(not_equal, deferred->entry());
- } else {
- DeoptimizeIf(not_equal, instr->environment());
- }
-
+ DoCheckMapCommon(reg, map, instr);
__ bind(&success);
}
@@ -5950,6 +5994,22 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
}
+void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) return;
+ Register reg = ToRegister(instr->temp());
+
+ ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
+ ZoneList<Handle<Map> >* maps = instr->maps();
+
+ ASSERT(prototypes->length() == maps->length());
+
+ for (int i = 0; i < prototypes->length(); i++) {
+ __ LoadHeapObject(reg, prototypes->at(i));
+ DoCheckMapCommon(reg, maps->at(i), instr);
+ }
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
@@ -6277,7 +6337,6 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
if (info()->IsStub() && type == Deoptimizer::EAGER) {
type = Deoptimizer::LAZY;
}
- Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
DeoptimizeIf(no_condition, instr->environment(), type);
}
diff --git a/chromium/v8/src/ia32/lithium-codegen-ia32.h b/chromium/v8/src/ia32/lithium-codegen-ia32.h
index aa8f6c248a3..0beef85f0bc 100644
--- a/chromium/v8/src/ia32/lithium-codegen-ia32.h
+++ b/chromium/v8/src/ia32/lithium-codegen-ia32.h
@@ -71,8 +71,7 @@ class LCodeGen BASE_EMBEDDED {
x87_stack_depth_(0),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- old_position_(RelocInfo::kNoPosition) {
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@@ -164,7 +163,8 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
- void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+
+ void DoCheckMapCommon(Register reg, Handle<Map> map, LInstruction* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -212,7 +212,7 @@ class LCodeGen BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- void Abort(BailoutReason reason);
+ void Abort(const char* reason);
void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
@@ -282,13 +282,10 @@ class LCodeGen BASE_EMBEDDED {
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check);
- void AddToTranslation(LEnvironment* environment,
- Translation* translation,
+ void AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer);
+ bool is_uint32);
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -298,7 +295,7 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
X87Register ToX87Register(int index) const;
- int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
+ int ToRepresentation(LConstantOperand* op, const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
ExternalReference ToExternalReference(LConstantOperand* op) const;
@@ -323,14 +320,10 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::DeoptMode mode);
void RecordPosition(int position);
- void RecordAndUpdatePosition(int position);
-
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
template<class InstrType>
void EmitBranch(InstrType instr, Condition cc);
- template<class InstrType>
- void EmitFalseBranch(InstrType instr, Condition cc);
void EmitNumberUntagD(
Register input,
Register temp,
@@ -377,6 +370,12 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
+ void EmitLoadFieldOrConstant(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name,
+ LEnvironment* env);
+
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@@ -450,8 +449,6 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- int old_position_;
-
class PushSafepointRegistersScope BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
diff --git a/chromium/v8/src/ia32/lithium-ia32.cc b/chromium/v8/src/ia32/lithium-ia32.cc
index b3158685fcf..ea07c5a1991 100644
--- a/chromium/v8/src/ia32/lithium-ia32.cc
+++ b/chromium/v8/src/ia32/lithium-ia32.cc
@@ -487,7 +487,7 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LChunkBuilder::Abort(BailoutReason reason) {
+void LChunkBuilder::Abort(const char* reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -645,10 +645,8 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator,
- &objects_to_materialize));
+ &argument_index_accumulator));
return instr;
}
@@ -700,7 +698,7 @@ LUnallocated* LChunkBuilder::TempRegister() {
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ Abort("Out of virtual registers while trying to allocate temp register.");
vreg = 0;
}
operand->set_virtual_register(vreg);
@@ -940,7 +938,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
- instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
@@ -956,13 +953,11 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize) {
+ int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
- argument_index_accumulator,
- objects_to_materialize);
+ LEnvironment* outer =
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
@@ -977,16 +972,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
outer,
hydrogen_env->entry(),
zone());
+ bool needs_arguments_object_materialization = false;
int argument_index = *argument_index_accumulator;
- int object_index = objects_to_materialize->length();
for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
- LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
+ LOperand* op = NULL;
+ if (value->IsArgumentsObject()) {
+ needs_arguments_object_materialization = true;
+ op = NULL;
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
} else {
@@ -997,33 +992,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
- for (int i = object_index; i < objects_to_materialize->length(); ++i) {
- HValue* object_to_materialize = objects_to_materialize->at(i);
- int previously_materialized_object = -1;
- for (int prev = 0; prev < i; ++prev) {
- if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
- previously_materialized_object = prev;
- break;
- }
- }
- int length = object_to_materialize->OperandCount();
- bool is_arguments = object_to_materialize->IsArgumentsObject();
- if (previously_materialized_object >= 0) {
- result->AddDuplicateObject(previously_materialized_object);
- continue;
- } else {
- result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
- }
- for (int i = is_arguments ? 1 : 0; i < length; ++i) {
- LOperand* op;
- HValue* value = object_to_materialize->OperandAt(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else {
- ASSERT(!value->IsPushArgument());
- op = UseAny(value);
- }
+ if (needs_arguments_object_materialization) {
+ HArgumentsObject* arguments = hydrogen_env->entry() == NULL
+ ? graph()->GetArgumentsObject()
+ : hydrogen_env->entry()->arguments_object();
+ ASSERT(arguments->IsLinked());
+ for (int i = 1; i < arguments->arguments_count(); ++i) {
+ HValue* value = arguments->arguments_values()->at(i);
+ ASSERT(!value->IsArgumentsObject() && !value->IsPushArgument());
+ LOperand* op = UseAny(value);
result->AddValue(op,
value->representation(),
value->CheckFlag(HInstruction::kUint32));
@@ -1415,8 +1392,9 @@ LInstruction* LChunkBuilder::DoShl(HShl* instr) {
LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->right()->representation().Equals(
+ instr->left()->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
@@ -1436,6 +1414,16 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
+ ASSERT(instr->value()->representation().IsInteger32());
+ ASSERT(instr->representation().IsInteger32());
+ if (instr->HasNoUses()) return NULL;
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LBitNotI* result = new(zone()) LBitNotI(input);
+ return DefineSameAsFirst(result);
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
@@ -1524,8 +1512,8 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(left->representation().IsSmiOrInteger32());
+ ASSERT(right->representation().Equals(left->representation()));
if (instr->HasPowerOf2Divisor()) {
ASSERT(!right->CanBeZero());
@@ -1601,8 +1589,9 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->right()->representation().Equals(
+ instr->left()->representation()));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
@@ -1622,8 +1611,9 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->right()->representation().Equals(
+ instr->left()->representation()));
// Check to see if it would be advantageous to use an lea instruction rather
// than an add. This is the case when no overflow check is needed and there
// are multiple uses of the add's inputs, so using a 3-register add will
@@ -1656,8 +1646,9 @@ LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->right()->representation().Equals(
+ instr->left()->representation()));
left = UseRegisterAtStart(instr->BetterLeftOperand());
right = UseOrConstantAtStart(instr->BetterRightOperand());
} else {
@@ -1712,8 +1703,9 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
HCompareNumericAndBranch* instr) {
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(r));
- ASSERT(instr->right()->representation().Equals(r));
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(
+ instr->right()->representation()));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return new(zone()) LCompareNumericAndBranch(left, right);
@@ -1743,13 +1735,6 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
}
-LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
- HCompareHoleAndBranch* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return new(zone()) LCmpHoleAndBranch(object);
-}
-
-
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsSmiOrTagged());
LOperand* temp = TempRegister();
@@ -1866,6 +1851,17 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
}
+LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
+ HInductionVariableAnnotation* instr) {
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
return AssignEnvironment(new(zone()) LBoundsCheck(
UseRegisterOrConstantAtStart(instr->index()),
@@ -2053,6 +2049,15 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
}
+LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
+ LUnallocated* temp = NULL;
+ if (!instr->CanOmitPrototypeChecks()) temp = TempRegister();
+ LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
+ if (instr->CanOmitPrototypeChecks()) return result;
+ return AssignEnvironment(result);
+}
+
+
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
// If the target is in new space, we'll emit a global cell compare and so
// want the value in a register. If the target gets promoted before we
@@ -2066,16 +2071,10 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
LOperand* value = NULL;
- if (!instr->CanOmitMapChecks()) {
- value = UseRegisterAtStart(instr->value());
- if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
- }
+ if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value());
LCheckMaps* result = new(zone()) LCheckMaps(value);
- if (!instr->CanOmitMapChecks()) {
- AssignEnvironment(result);
- if (instr->has_migration_target()) return AssignPointerMap(result);
- }
- return result;
+ if (instr->CanOmitMapChecks()) return result;
+ return AssignEnvironment(result);
}
@@ -2207,6 +2206,25 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
+LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
+ HLoadNamedFieldPolymorphic* instr) {
+ ASSERT(instr->representation().IsTagged());
+ if (instr->need_generic()) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* obj = UseFixed(instr->object(), edx);
+ LLoadNamedFieldPolymorphic* result =
+ new(zone()) LLoadNamedFieldPolymorphic(context, obj);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+ } else {
+ LOperand* context = UseAny(instr->context()); // Not actually used.
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ LLoadNamedFieldPolymorphic* result =
+ new(zone()) LLoadNamedFieldPolymorphic(context, obj);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+}
+
+
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->object(), edx);
@@ -2400,7 +2418,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_external_location = instr->access().IsExternalMemory() &&
instr->access().offset() == 0;
bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = instr->has_transition() &&
+ bool needs_write_barrier_for_map = !instr->transition().is_null() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
@@ -2549,7 +2567,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort(kTooManySpillSlotsNeededForOSR);
+ Abort("Too many spill slots needed for OSR");
spill_index = 0;
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
@@ -2573,12 +2591,6 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
}
-LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
- // There are no real uses of a captured object.
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
diff --git a/chromium/v8/src/ia32/lithium-ia32.h b/chromium/v8/src/ia32/lithium-ia32.h
index 7ae87a08c8d..6b0f9d0a74c 100644
--- a/chromium/v8/src/ia32/lithium-ia32.h
+++ b/chromium/v8/src/ia32/lithium-ia32.h
@@ -50,6 +50,7 @@ class LCodeGen;
V(ArithmeticD) \
V(ArithmeticT) \
V(BitI) \
+ V(BitNotI) \
V(BoundsCheck) \
V(Branch) \
V(CallConstantFunction) \
@@ -67,6 +68,7 @@ class LCodeGen;
V(CheckMaps) \
V(CheckMapValue) \
V(CheckNonSmi) \
+ V(CheckPrototypeMaps) \
V(CheckSmi) \
V(ClampDToUint8) \
V(ClampIToUint8) \
@@ -75,7 +77,6 @@ class LCodeGen;
V(ClassOfTestAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
- V(CmpHoleAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
V(ConstantD) \
@@ -128,6 +129,7 @@ class LCodeGen;
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
+ V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
@@ -209,10 +211,7 @@ class LInstruction: public ZoneObject {
LInstruction()
: environment_(NULL),
hydrogen_value_(NULL),
- bit_field_(IsCallBits::encode(false)) {
- set_position(RelocInfo::kNoPosition);
- }
-
+ is_call_(false) { }
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
@@ -251,28 +250,19 @@ class LInstruction: public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- // The 31 bits PositionBits is used to store the int position value. And the
- // position value may be RelocInfo::kNoPosition (-1). The accessor always
- // +1/-1 so that the encoded value of position in bit_field_ is always >= 0
- // and can fit into the 31 bits PositionBits.
- void set_position(int pos) {
- bit_field_ = PositionBits::update(bit_field_, pos + 1);
- }
- int position() { return PositionBits::decode(bit_field_) - 1; }
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
- void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
- bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsCall() { is_call_ = true; }
// Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return IsCall(); }
- bool ClobbersRegisters() const { return IsCall(); }
+ bool ClobbersTemps() const { return is_call_; }
+ bool ClobbersRegisters() const { return is_call_; }
virtual bool ClobbersDoubleRegisters() const {
- return IsCall() ||
+ return is_call_ ||
(!CpuFeatures::IsSupported(SSE2) &&
// We only have rudimentary X87Stack tracking, thus in general
// cannot handle deoptimization nor phi-nodes.
@@ -305,13 +295,10 @@ class LInstruction: public ZoneObject {
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
- class IsCallBits: public BitField<bool, 0, 1> {};
- class PositionBits: public BitField<int, 1, 31> {};
-
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
- int bit_field_;
+ bool is_call_;
};
@@ -864,20 +851,8 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
-};
-
-
-class LCmpHoleAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LCmpHoleAndBranch(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+ "cmp-object-eq-and-branch")
};
@@ -1383,6 +1358,18 @@ class LThrow: public LTemplateInstruction<0, 2, 0> {
};
+class LBitNotI: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LBitNotI(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+};
+
+
class LAddI: public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
@@ -1536,6 +1523,21 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
+class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadNamedFieldPolymorphic(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
+};
+
+
class LLoadNamedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
LLoadNamedGeneric(LOperand* context, LOperand* object) {
@@ -2234,7 +2236,7 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 2> {
virtual void PrintDataTo(StringStream* stream);
- Handle<Map> transition() const { return hydrogen()->transition_map(); }
+ Handle<Map> transition() const { return hydrogen()->transition(); }
Representation representation() const {
return hydrogen()->field_representation();
}
@@ -2449,6 +2451,24 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
+ public:
+ explicit LCheckPrototypeMaps(LOperand* temp) {
+ temps_[0] = temp;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
+
+ ZoneList<Handle<JSObject> >* prototypes() const {
+ return hydrogen()->prototypes();
+ }
+ ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
+};
+
+
class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
@@ -2780,7 +2800,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(BailoutReason reason);
+ void Abort(const char* reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2869,8 +2889,7 @@ class LChunkBuilder BASE_EMBEDDED {
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize);
+ int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);
diff --git a/chromium/v8/src/ia32/macro-assembler-ia32.cc b/chromium/v8/src/ia32/macro-assembler-ia32.cc
index 67a7c0d2b49..2ab5a259321 100644
--- a/chromium/v8/src/ia32/macro-assembler-ia32.cc
+++ b/chromium/v8/src/ia32/macro-assembler-ia32.cc
@@ -54,60 +54,6 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
-void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
- if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- mov(destination, value);
- return;
- }
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(destination, Immediate(index));
- mov(destination, Operand::StaticArray(destination,
- times_pointer_size,
- roots_array_start));
-}
-
-
-void MacroAssembler::StoreRoot(Register source,
- Register scratch,
- Heap::RootListIndex index) {
- ASSERT(Heap::RootCanBeWrittenAfterInitialization(index));
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(scratch, Immediate(index));
- mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
- source);
-}
-
-
-void MacroAssembler::CompareRoot(Register with,
- Register scratch,
- Heap::RootListIndex index) {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(scratch, Immediate(index));
- cmp(with, Operand::StaticArray(scratch,
- times_pointer_size,
- roots_array_start));
-}
-
-
-void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
- ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- cmp(with, value);
-}
-
-
-void MacroAssembler::CompareRoot(const Operand& with,
- Heap::RootListIndex index) {
- ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- cmp(with, value);
-}
-
-
void MacroAssembler::InNewSpace(
Register object,
Register scratch,
@@ -486,6 +432,21 @@ void MacroAssembler::SafePush(const Immediate& x) {
}
+void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
+ // see ROOT_ACCESSOR macro in factory.h
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+ cmp(with, value);
+}
+
+
+void MacroAssembler::CompareRoot(const Operand& with,
+ Heap::RootListIndex index) {
+ // see ROOT_ACCESSOR macro in factory.h
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+ cmp(with, value);
+}
+
+
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
@@ -717,7 +678,7 @@ void MacroAssembler::AssertNumber(Register object) {
JumpIfSmi(object, &ok);
cmp(FieldOperand(object, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
- Check(equal, kOperandNotANumber);
+ Check(equal, "Operand not a number");
bind(&ok);
}
}
@@ -726,7 +687,7 @@ void MacroAssembler::AssertNumber(Register object) {
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(equal, kOperandIsNotASmi);
+ Check(equal, "Operand is not a smi");
}
}
@@ -734,12 +695,12 @@ void MacroAssembler::AssertSmi(Register object) {
void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotAString);
+ Check(not_equal, "Operand is a smi and not a string");
push(object);
mov(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
pop(object);
- Check(below, kOperandIsNotAString);
+ Check(below, "Operand is not a string");
}
}
@@ -747,12 +708,12 @@ void MacroAssembler::AssertString(Register object) {
void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmiAndNotAName);
+ Check(not_equal, "Operand is a smi and not a name");
push(object);
mov(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, LAST_NAME_TYPE);
pop(object);
- Check(below_equal, kOperandIsNotAName);
+ Check(below_equal, "Operand is not a name");
}
}
@@ -760,7 +721,7 @@ void MacroAssembler::AssertName(Register object) {
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
- Check(not_equal, kOperandIsASmi);
+ Check(not_equal, "Operand is a smi");
}
}
@@ -773,7 +734,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(Immediate(CodeObject()));
if (emit_debug_code()) {
cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
- Check(not_equal, kCodeObjectNotProperlyPatched);
+ Check(not_equal, "code object not properly patched");
}
}
@@ -782,7 +743,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
Immediate(Smi::FromInt(type)));
- Check(equal, kStackFrameTypesMustMatch);
+ Check(equal, "stack frame types must match");
}
leave();
}
@@ -1063,7 +1024,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
cmp(scratch1, Immediate(0));
- Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
+ Check(not_equal, "we should not have an empty lexical context");
}
// Load the native context of the current context.
int offset =
@@ -1076,7 +1037,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Read the first word and compare to native_context_map.
cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
isolate()->factory()->native_context_map());
- Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
+ Check(equal, "JSGlobalObject::native_context should be a native context.");
}
// Check if both contexts are the same.
@@ -1095,12 +1056,12 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
cmp(scratch2, isolate()->factory()->null_value());
- Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
+ Check(not_equal, "JSGlobalProxy::context() should not be null.");
// Read the first word and compare to native_context_map(),
cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
isolate()->factory()->native_context_map());
- Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
+ Check(equal, "JSGlobalObject::native_context should be a native context.");
}
int token_offset = Context::kHeaderSize +
@@ -1245,7 +1206,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
#ifdef DEBUG
// Assert that result actually contains top on entry.
cmp(result, Operand::StaticVariable(allocation_top));
- Check(equal, kUnexpectedAllocationTop);
+ Check(equal, "Unexpected allocation top");
#endif
return;
}
@@ -1265,7 +1226,7 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
AllocationFlags flags) {
if (emit_debug_code()) {
test(result_end, Immediate(kObjectAlignmentMask));
- Check(zero, kUnalignedAllocationInNewSpace);
+ Check(zero, "Unaligned allocation in new space");
}
ExternalReference allocation_top =
@@ -1497,7 +1458,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
and_(object, Immediate(~kHeapObjectTagMask));
#ifdef DEBUG
cmp(object, Operand::StaticVariable(new_space_allocation_top));
- Check(below, kUndoAllocationOfNonAllocatedMemory);
+ Check(below, "Undo allocation of non allocated memory");
#endif
mov(Operand::StaticVariable(new_space_allocation_top), object);
}
@@ -2101,7 +2062,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
// previous handle scope.
mov(Operand::StaticVariable(next_address), ebx);
sub(Operand::StaticVariable(level_address), Immediate(1));
- Assert(above_equal, kInvalidHandleScopeLevel);
+ Assert(above_equal, "Invalid HandleScope level");
cmp(edi, Operand::StaticVariable(limit_address));
j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
@@ -2143,7 +2104,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
cmp(return_value, isolate()->factory()->null_value());
j(equal, &ok, Label::kNear);
- Abort(kAPICallReturnedInvalidObject);
+ Abort("API call returned invalid object");
bind(&ok);
#endif
@@ -2429,7 +2390,7 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (emit_debug_code()) {
cmp(FieldOperand(dst, HeapObject::kMapOffset),
isolate()->factory()->with_context_map());
- Check(not_equal, kVariableResolvedToWithContext);
+ Check(not_equal, "Variable resolved to with context.");
}
}
@@ -2516,7 +2477,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
jmp(&ok);
bind(&fail);
- Abort(kGlobalFunctionsMustHaveInitialMap);
+ Abort("Global functions must have initial map");
bind(&ok);
}
}
@@ -2617,7 +2578,7 @@ void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
and_(eax, kTopMask);
shr(eax, 11);
cmp(eax, Immediate(tos));
- Check(equal, kUnexpectedFPUStackDepthAfterInstruction);
+ Check(equal, "Unexpected FPU stack depth after instruction");
fnclex();
pop(eax);
}
@@ -2700,8 +2661,8 @@ void MacroAssembler::DecrementCounter(Condition cc,
}
-void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
- if (emit_debug_code()) Check(cc, reason);
+void MacroAssembler::Assert(Condition cc, const char* msg) {
+ if (emit_debug_code()) Check(cc, msg);
}
@@ -2718,16 +2679,16 @@ void MacroAssembler::AssertFastElements(Register elements) {
cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(factory->fixed_cow_array_map()));
j(equal, &ok);
- Abort(kJSObjectWithFastElementsMapHasSlowElements);
+ Abort("JSObject with fast elements map has slow elements");
bind(&ok);
}
}
-void MacroAssembler::Check(Condition cc, BailoutReason reason) {
+void MacroAssembler::Check(Condition cc, const char* msg) {
Label L;
j(cc, &L);
- Abort(reason);
+ Abort(msg);
// will not return here
bind(&L);
}
@@ -2748,13 +2709,12 @@ void MacroAssembler::CheckStackAlignment() {
}
-void MacroAssembler::Abort(BailoutReason reason) {
+void MacroAssembler::Abort(const char* msg) {
// We want to pass the msg string like a smi to avoid GC
// problems, however msg is not guaranteed to be aligned
// properly. Instead, we pass an aligned pointer that is
// a proper v8 smi, but also pass the alignment difference
// from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
@@ -3158,7 +3118,7 @@ void MacroAssembler::EnsureNotWhite(
if (emit_debug_code()) {
mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
- Check(less_equal, kLiveBytesCountOverflowChunkSize);
+ Check(less_equal, "Live Bytes Count overflow chunk size");
}
bind(&done);
diff --git a/chromium/v8/src/ia32/macro-assembler-ia32.h b/chromium/v8/src/ia32/macro-assembler-ia32.h
index d537b0b2cbd..3bca930d667 100644
--- a/chromium/v8/src/ia32/macro-assembler-ia32.h
+++ b/chromium/v8/src/ia32/macro-assembler-ia32.h
@@ -61,15 +61,6 @@ class MacroAssembler: public Assembler {
// macro assembler.
MacroAssembler(Isolate* isolate, void* buffer, int size);
- // Operations on roots in the root-array.
- void LoadRoot(Register destination, Heap::RootListIndex index);
- void StoreRoot(Register source, Register scratch, Heap::RootListIndex index);
- void CompareRoot(Register with, Register scratch, Heap::RootListIndex index);
- // These methods can only be used with constant roots (i.e. non-writable
- // and not in new space).
- void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(const Operand& with, Heap::RootListIndex index);
-
// ---------------------------------------------------------------------------
// GC Support
enum RememberedSetFinalAction {
@@ -371,6 +362,10 @@ class MacroAssembler: public Assembler {
void SafeSet(Register dst, const Immediate& x);
void SafePush(const Immediate& x);
+ // Compare against a known root, e.g. undefined, null, true, ...
+ void CompareRoot(Register with, Heap::RootListIndex index);
+ void CompareRoot(const Operand& with, Heap::RootListIndex index);
+
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
@@ -812,8 +807,6 @@ class MacroAssembler: public Assembler {
void Drop(int element_count);
void Call(Label* target) { call(target); }
- void Push(Register src) { push(src); }
- void Pop(Register dst) { pop(dst); }
// Emit call to the code we are currently generating.
void CallSelf() {
@@ -851,15 +844,15 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, BailoutReason reason);
+ void Assert(Condition cc, const char* msg);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
- void Check(Condition cc, BailoutReason reason);
+ void Check(Condition cc, const char* msg);
// Print a message to stdout and abort execution.
- void Abort(BailoutReason reason);
+ void Abort(const char* msg);
// Check that the stack is aligned.
void CheckStackAlignment();
diff --git a/chromium/v8/src/ia32/stub-cache-ia32.cc b/chromium/v8/src/ia32/stub-cache-ia32.cc
index df7ad4467f9..123506fa623 100644
--- a/chromium/v8/src/ia32/stub-cache-ia32.cc
+++ b/chromium/v8/src/ia32/stub-cache-ia32.cc
@@ -2479,8 +2479,6 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(eax, &not_smi);
- // Branchless abs implementation, refer to below:
- // http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs
// Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
// otherwise.
__ mov(ebx, eax);
@@ -3155,7 +3153,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ j(equal, &miss);
} else if (FLAG_debug_code) {
__ cmp(eax, factory()->the_hole_value());
- __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
+ __ Check(not_equal, "DontDelete cells can't contain the hole");
}
HandlerFrontendFooter(name, &success, &miss);