summaryrefslogtreecommitdiff
path: root/chromium/v8/src/mips
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/mips')
-rw-r--r--chromium/v8/src/mips/assembler-mips.h5
-rw-r--r--chromium/v8/src/mips/builtins-mips.cc20
-rw-r--r--chromium/v8/src/mips/code-stubs-mips.cc58
-rw-r--r--chromium/v8/src/mips/codegen-mips.cc6
-rw-r--r--chromium/v8/src/mips/debug-mips.cc7
-rw-r--r--chromium/v8/src/mips/full-codegen-mips.cc43
-rw-r--r--chromium/v8/src/mips/lithium-codegen-mips.cc285
-rw-r--r--chromium/v8/src/mips/lithium-codegen-mips.h25
-rw-r--r--chromium/v8/src/mips/lithium-gap-resolver-mips.cc2
-rw-r--r--chromium/v8/src/mips/lithium-mips.cc142
-rw-r--r--chromium/v8/src/mips/lithium-mips.h89
-rw-r--r--chromium/v8/src/mips/macro-assembler-mips.cc95
-rw-r--r--chromium/v8/src/mips/macro-assembler-mips.h26
13 files changed, 452 insertions, 351 deletions
diff --git a/chromium/v8/src/mips/assembler-mips.h b/chromium/v8/src/mips/assembler-mips.h
index cb0896a8ded..8d533b36f40 100644
--- a/chromium/v8/src/mips/assembler-mips.h
+++ b/chromium/v8/src/mips/assembler-mips.h
@@ -358,11 +358,6 @@ class Operand BASE_EMBEDDED {
// Return true if this is a register operand.
INLINE(bool is_reg() const);
- inline int32_t immediate() const {
- ASSERT(!is_reg());
- return imm32_;
- }
-
Register rm() const { return rm_; }
private:
diff --git a/chromium/v8/src/mips/builtins-mips.cc b/chromium/v8/src/mips/builtins-mips.cc
index d424cbc7261..3f5dca00096 100644
--- a/chromium/v8/src/mips/builtins-mips.cc
+++ b/chromium/v8/src/mips/builtins-mips.cc
@@ -123,10 +123,10 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ And(t0, a2, Operand(kSmiTagMask));
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction,
+ __ Assert(ne, "Unexpected initial map for InternalArray function",
t0, Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction,
+ __ Assert(eq, "Unexpected initial map for InternalArray function",
t0, Operand(MAP_TYPE));
}
@@ -153,10 +153,10 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ And(t0, a2, Operand(kSmiTagMask));
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction1,
+ __ Assert(ne, "Unexpected initial map for Array function (1)",
t0, Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction2,
+ __ Assert(eq, "Unexpected initial map for Array function (2)",
t0, Operand(MAP_TYPE));
}
@@ -185,7 +185,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
Register function = a1;
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2);
- __ Assert(eq, kUnexpectedStringFunction, function, Operand(a2));
+ __ Assert(eq, "Unexpected String function", function, Operand(a2));
}
// Load the first arguments in a0 and get rid of the rest.
@@ -231,10 +231,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ LoadGlobalFunctionInitialMap(function, map, t0);
if (FLAG_debug_code) {
__ lbu(t0, FieldMemOperand(map, Map::kInstanceSizeOffset));
- __ Assert(eq, kUnexpectedStringWrapperInstanceSize,
+ __ Assert(eq, "Unexpected string wrapper instance size",
t0, Operand(JSValue::kSize >> kPointerSizeLog2));
__ lbu(t0, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper,
+ __ Assert(eq, "Unexpected unused properties of string wrapper",
t0, Operand(zero_reg));
}
__ sw(map, FieldMemOperand(v0, HeapObject::kMapOffset));
@@ -489,7 +489,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ addu(a0, t5, t0);
// a0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields,
+ __ Assert(le, "Unexpected number of pre-allocated property fields.",
a0, Operand(t6));
}
__ InitializeFieldsWithFiller(t5, a0, t7);
@@ -522,7 +522,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Done if no extra properties are to be allocated.
__ Branch(&allocated, eq, a3, Operand(zero_reg));
- __ Assert(greater_equal, kPropertyAllocationCountFailed,
+ __ Assert(greater_equal, "Property allocation count failed.",
a3, Operand(zero_reg));
// Scale the number of elements by pointer size and add the header for
@@ -569,7 +569,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
} else if (FLAG_debug_code) {
__ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
- __ Assert(eq, kUndefinedValueNotLoaded, t7, Operand(t8));
+ __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
}
__ jmp(&entry);
__ bind(&loop);
diff --git a/chromium/v8/src/mips/code-stubs-mips.cc b/chromium/v8/src/mips/code-stubs-mips.cc
index 8a03a9a31a5..0e1b224eadf 100644
--- a/chromium/v8/src/mips/code-stubs-mips.cc
+++ b/chromium/v8/src/mips/code-stubs-mips.cc
@@ -247,6 +247,17 @@ void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
}
+void UnaryOpStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(UnaryOpIC_Miss);
+}
+
+
void StoreGlobalStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -509,7 +520,8 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
Label after_sentinel;
__ JumpIfNotSmi(a3, &after_sentinel);
if (FLAG_debug_code) {
- __ Assert(eq, kExpected0AsASmiSentinel, a3, Operand(zero_reg));
+ const char* message = "Expected 0 as a Smi sentinel";
+ __ Assert(eq, message, a3, Operand(zero_reg));
}
__ lw(a3, GlobalObjectOperand());
__ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
@@ -667,7 +679,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Label* not_number) {
__ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
- kHeapNumberMapRegisterClobbered);
+ "HeapNumberMap register clobbered.");
Label is_smi, done;
@@ -717,7 +729,7 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
Label* not_number) {
__ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
- kHeapNumberMapRegisterClobbered);
+ "HeapNumberMap register clobbered.");
Label done;
Label not_in_int32_range;
@@ -794,7 +806,7 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ bind(&obj_is_not_smi);
__ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
- kHeapNumberMapRegisterClobbered);
+ "HeapNumberMap register clobbered.");
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Load the number.
@@ -841,7 +853,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
__ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
- kHeapNumberMapRegisterClobbered);
+ "HeapNumberMap register clobbered.");
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
@@ -4267,12 +4279,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ And(t0, regexp_data, Operand(kSmiTagMask));
__ Check(nz,
- kUnexpectedTypeForRegExpDataFixedArrayExpected,
+ "Unexpected type for RegExp data, FixedArray expected",
t0,
Operand(zero_reg));
__ GetObjectType(regexp_data, a0, a0);
__ Check(eq,
- kUnexpectedTypeForRegExpDataFixedArrayExpected,
+ "Unexpected type for RegExp data, FixedArray expected",
a0,
Operand(FIXED_ARRAY_TYPE));
}
@@ -4627,7 +4639,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Sequential strings have already been ruled out.
__ And(at, a0, Operand(kIsIndirectStringMask));
__ Assert(eq,
- kExternalStringExpectedButNotFound,
+ "external string expected, but not found",
at,
Operand(zero_reg));
}
@@ -5008,7 +5020,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
// Index is not a smi.
__ bind(&index_not_smi_);
@@ -5057,7 +5069,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ jmp(&exit_);
- __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
}
@@ -5094,7 +5106,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharFromCodeGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
- __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
@@ -5105,7 +5117,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
call_helper.AfterCall(masm);
__ Branch(&exit_);
- __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
}
@@ -5160,7 +5172,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
// that it is.
__ And(scratch4, dest, Operand(kPointerAlignmentMask));
__ Check(eq,
- kDestinationOfCopyNotAligned,
+ "Destination of copy not aligned.",
scratch4,
Operand(zero_reg));
}
@@ -5360,7 +5372,7 @@ void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
// Must be the hole (deleted entry).
if (FLAG_debug_code) {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole,
+ __ Assert(eq, "oddball in string table is not undefined or the hole",
scratch, Operand(candidate));
}
__ jmp(&next_probe[i]);
@@ -6568,7 +6580,7 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
// filled with kZapValue by the GC.
// Dereference the address and check for this.
__ lw(t0, MemOperand(t9));
- __ Assert(ne, kReceivedInvalidReturnAddress, t0,
+ __ Assert(ne, "Received invalid return address.", t0,
Operand(reinterpret_cast<uint32_t>(kZapValue)));
}
__ Jump(t9);
@@ -7319,7 +7331,7 @@ static void CreateArrayDispatch(MacroAssembler* masm) {
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort("Unexpected ElementsKind in array constructor");
}
@@ -7374,7 +7386,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
}
// If we reached this point there is a problem.
- __ Abort(kUnexpectedElementsKindInArrayConstructor);
+ __ Abort("Unexpected ElementsKind in array constructor");
}
@@ -7435,10 +7447,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ And(at, a3, Operand(kSmiTagMask));
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
+ __ Assert(ne, "Unexpected initial map for Array function",
at, Operand(zero_reg));
__ GetObjectType(a3, a3, t0);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
+ __ Assert(eq, "Unexpected initial map for Array function",
t0, Operand(MAP_TYPE));
// We should either have undefined in a2 or a valid cell.
@@ -7447,7 +7459,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&okay_here, eq, a2, Operand(at));
__ lw(a3, FieldMemOperand(a2, 0));
- __ Assert(eq, kExpectedPropertyCellInRegisterA2,
+ __ Assert(eq, "Expected property cell in register a2",
a3, Operand(cell_map));
__ bind(&okay_here);
}
@@ -7547,10 +7559,10 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ And(at, a3, Operand(kSmiTagMask));
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
+ __ Assert(ne, "Unexpected initial map for Array function",
at, Operand(zero_reg));
__ GetObjectType(a3, a3, t0);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
+ __ Assert(eq, "Unexpected initial map for Array function",
t0, Operand(MAP_TYPE));
}
@@ -7567,7 +7579,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
Label done;
__ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
__ Assert(
- eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
+ eq, "Invalid ElementsKind for InternalArray or InternalPackedArray",
a3, Operand(FAST_HOLEY_ELEMENTS));
__ bind(&done);
}
diff --git a/chromium/v8/src/mips/codegen-mips.cc b/chromium/v8/src/mips/codegen-mips.cc
index 5c847fc8f62..3f74154f58a 100644
--- a/chromium/v8/src/mips/codegen-mips.cc
+++ b/chromium/v8/src/mips/codegen-mips.cc
@@ -205,7 +205,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new FixedDoubleArray.
__ sll(scratch, t1, 2);
__ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
- __ Allocate(scratch, t2, t3, t5, &gc_required, DOUBLE_ALIGNMENT);
+ __ Allocate(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
// t2: destination FixedDoubleArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
@@ -289,7 +289,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ SmiTag(t5);
__ Or(t5, t5, Operand(1));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5));
+ __ Assert(eq, "object found in smi-only array", at, Operand(t5));
}
__ sw(t0, MemOperand(t3)); // mantissa
__ sw(t1, MemOperand(t3, kIntSize)); // exponent
@@ -489,7 +489,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ And(at, result, Operand(kIsIndirectStringMask));
- __ Assert(eq, kExternalStringExpectedButNotFound,
+ __ Assert(eq, "external string expected, but not found",
at, Operand(zero_reg));
}
// Rule out short external strings.
diff --git a/chromium/v8/src/mips/debug-mips.cc b/chromium/v8/src/mips/debug-mips.cc
index 020228fc6b5..30cc4db6340 100644
--- a/chromium/v8/src/mips/debug-mips.cc
+++ b/chromium/v8/src/mips/debug-mips.cc
@@ -142,7 +142,8 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if ((non_object_regs & (1 << r)) != 0) {
if (FLAG_debug_code) {
__ And(at, reg, 0xc0000000);
- __ Assert(eq, kUnableToEncodeValueAsSmi, at, Operand(zero_reg));
+ __ Assert(
+ eq, "Unable to encode value as smi", at, Operand(zero_reg));
}
__ sll(reg, reg, kSmiTagSize);
}
@@ -324,12 +325,12 @@ void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnMips);
+ masm->Abort("LiveEdit frame dropping is not supported on mips");
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnMips);
+ masm->Abort("LiveEdit frame dropping is not supported on mips");
}
diff --git a/chromium/v8/src/mips/full-codegen-mips.cc b/chromium/v8/src/mips/full-codegen-mips.cc
index b60502c9a5b..1084af09298 100644
--- a/chromium/v8/src/mips/full-codegen-mips.cc
+++ b/chromium/v8/src/mips/full-codegen-mips.cc
@@ -786,10 +786,10 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// Check that we're not inside a with or catch context.
__ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ LoadRoot(t0, Heap::kWithContextMapRootIndex);
- __ Check(ne, kDeclarationInWithContext,
+ __ Check(ne, "Declaration in with context.",
a1, Operand(t0));
__ LoadRoot(t0, Heap::kCatchContextMapRootIndex);
- __ Check(ne, kDeclarationInCatchContext,
+ __ Check(ne, "Declaration in catch context.",
a1, Operand(t0));
}
}
@@ -2529,7 +2529,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// Check for an uninitialized let binding.
__ lw(a2, location);
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ Check(eq, kLetBindingReInitialization, a2, Operand(t0));
+ __ Check(eq, "Let binding re-initialization.", a2, Operand(t0));
}
// Perform the assignment.
__ sw(v0, location);
@@ -3492,21 +3492,21 @@ void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
Register value,
uint32_t encoding_mask) {
__ And(at, index, Operand(kSmiTagMask));
- __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
+ __ Check(eq, "Non-smi index", at, Operand(zero_reg));
__ And(at, value, Operand(kSmiTagMask));
- __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
+ __ Check(eq, "Non-smi value", at, Operand(zero_reg));
__ lw(at, FieldMemOperand(string, String::kLengthOffset));
- __ Check(lt, kIndexIsTooLarge, index, Operand(at));
+ __ Check(lt, "Index is too large", index, Operand(at));
- __ Check(ge, kIndexIsNegative, index, Operand(zero_reg));
+ __ Check(ge, "Index is negative", index, Operand(zero_reg));
__ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
__ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
__ Subu(at, at, Operand(encoding_mask));
- __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
+ __ Check(eq, "Unexpected string type", at, Operand(zero_reg));
}
@@ -3881,7 +3881,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Handle<FixedArray> jsfunction_result_caches(
isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort(kAttemptToUseUndefinedCache);
+ __ Abort("Attempt to use undefined cache.");
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
context()->Plug(v0);
return;
@@ -4063,7 +4063,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// element: Current array element.
// elements_end: Array end.
if (generate_debug_code_) {
- __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin,
+ __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin",
array_length, Operand(zero_reg));
}
__ bind(&loop);
@@ -4382,12 +4382,35 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
}
+ case Token::SUB:
+ EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
+ break;
+
+ case Token::BIT_NOT:
+ EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
+ break;
+
default:
UNREACHABLE();
}
}
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+ const char* comment) {
+ // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+ Comment cmt(masm_, comment);
+ UnaryOpStub stub(expr->op());
+ // GenericUnaryOpStub expects the argument to be in a0.
+ VisitForAccumulatorValue(expr->expression());
+ SetSourcePosition(expr->position());
+ __ mov(a0, result_register());
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
+ expr->UnaryOperationFeedbackId());
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
diff --git a/chromium/v8/src/mips/lithium-codegen-mips.cc b/chromium/v8/src/mips/lithium-codegen-mips.cc
index 2bc52e4f3ff..88e7eb8f1d4 100644
--- a/chromium/v8/src/mips/lithium-codegen-mips.cc
+++ b/chromium/v8/src/mips/lithium-codegen-mips.cc
@@ -91,7 +91,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
-void LChunkBuilder::Abort(BailoutReason reason) {
+void LChunkBuilder::Abort(const char* reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -268,8 +268,6 @@ bool LCodeGen::GenerateBody() {
instr->Mnemonic());
}
- RecordAndUpdatePosition(instr->position());
-
instr->CompileToNative(this);
}
EnsureSpaceForLazyDeopt();
@@ -283,10 +281,6 @@ bool LCodeGen::GenerateDeferredCode() {
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
-
- int pos = instructions_->at(code->instruction_index())->position();
- RecordAndUpdatePosition(pos);
-
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
code->instruction_index(),
@@ -330,7 +324,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
// end of the jump table.
if (!is_int16((masm()->pc_offset() / Assembler::kInstrSize) +
deopt_jump_table_.length() * 12)) {
- Abort(kGeneratedCodeIsTooLarge);
+ Abort("Generated code is too large");
}
if (deopt_jump_table_.length() > 0) {
@@ -417,7 +411,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
ASSERT(constant->HasSmiValue());
__ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
} else if (r.IsDouble()) {
- Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
+ Abort("EmitLoadRegister: Unsupported double immediate.");
} else {
ASSERT(r.IsTagged());
__ LoadObject(scratch, literal);
@@ -455,9 +449,9 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
__ cvt_d_w(dbl_scratch, flt_scratch);
return dbl_scratch;
} else if (r.IsDouble()) {
- Abort(kUnsupportedDoubleImmediate);
+ Abort("unsupported double immediate");
} else if (r.IsTagged()) {
- Abort(kUnsupportedTaggedImmediate);
+ Abort("unsupported tagged immediate");
}
} else if (op->IsStackSlot() || op->IsArgument()) {
MemOperand mem_op = ToMemOperand(op);
@@ -526,14 +520,14 @@ Operand LCodeGen::ToOperand(LOperand* op) {
ASSERT(constant->HasInteger32Value());
return Operand(constant->Integer32Value());
} else if (r.IsDouble()) {
- Abort(kToOperandUnsupportedDoubleImmediate);
+ Abort("ToOperand Unsupported double immediate.");
}
ASSERT(r.IsTagged());
return Operand(constant->handle());
} else if (op->IsRegister()) {
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
- Abort(kToOperandIsDoubleRegisterUnimplemented);
+ Abort("ToOperand IsDoubleRegister unimplemented");
return Operand(0);
}
// Stack slots not implemented, use ToMemOperand instead.
@@ -597,57 +591,37 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
break;
}
- int object_index = 0;
- int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
- AddToTranslation(environment,
- translation,
+
+ // TODO(mstarzinger): Introduce marker operands to indicate that this value
+ // is not present and must be reconstructed from the deoptimizer. Currently
+ // this is only used for the arguments object.
+ if (value == NULL) {
+ int arguments_count = environment->values()->length() - translation_size;
+ translation->BeginArgumentsObject(arguments_count);
+ for (int i = 0; i < arguments_count; ++i) {
+ LOperand* value = environment->values()->at(translation_size + i);
+ AddToTranslation(translation,
+ value,
+ environment->HasTaggedValueAt(translation_size + i),
+ environment->HasUint32ValueAt(translation_size + i));
+ }
+ continue;
+ }
+
+ AddToTranslation(translation,
value,
environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- &object_index,
- &dematerialized_index);
+ environment->HasUint32ValueAt(i));
}
}
-void LCodeGen::AddToTranslation(LEnvironment* environment,
- Translation* translation,
+void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer) {
- if (op == LEnvironment::materialization_marker()) {
- int object_index = (*object_index_pointer)++;
- if (environment->ObjectIsDuplicateAt(object_index)) {
- int dupe_of = environment->ObjectDuplicateOfAt(object_index);
- translation->DuplicateObject(dupe_of);
- return;
- }
- int object_length = environment->ObjectLengthAt(object_index);
- if (environment->ObjectIsArgumentsAt(object_index)) {
- translation->BeginArgumentsObject(object_length);
- } else {
- translation->BeginCapturedObject(object_length);
- }
- int dematerialized_index = *dematerialized_index_pointer;
- int env_offset = environment->translation_size() + dematerialized_index;
- *dematerialized_index_pointer += object_length;
- for (int i = 0; i < object_length; ++i) {
- LOperand* value = environment->values()->at(env_offset + i);
- AddToTranslation(environment,
- translation,
- value,
- environment->HasTaggedValueAt(env_offset + i),
- environment->HasUint32ValueAt(env_offset + i),
- object_index_pointer,
- dematerialized_index_pointer);
- }
- return;
- }
-
+ bool is_uint32) {
if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
@@ -774,7 +748,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
- Abort(kBailoutWasNotPrepared);
+ Abort("bailout was not prepared");
return;
}
@@ -787,7 +761,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
return;
}
- if (info()->ShouldTrapOnDeopt()) {
+ if (FLAG_trap_on_deopt && info()->IsOptimizing()) {
Label skip;
if (cc != al) {
__ Branch(&skip, NegateCondition(cc), src1, src2);
@@ -986,14 +960,6 @@ void LCodeGen::RecordPosition(int position) {
}
-void LCodeGen::RecordAndUpdatePosition(int position) {
- if (position >= 0 && position != old_position_) {
- masm()->positions_recorder()->RecordPosition(position);
- old_position_ = position;
- }
-}
-
-
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@@ -1536,11 +1502,7 @@ void LCodeGen::DoBitI(LBitI* instr) {
__ Or(result, left, right);
break;
case Token::BIT_XOR:
- if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
- __ Nor(result, zero_reg, left);
- } else {
- __ Xor(result, left, right);
- }
+ __ Xor(result, left, right);
break;
default:
UNREACHABLE();
@@ -1801,7 +1763,7 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
- __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
+ __ Check(eq, "Unexpected string type", at, Operand(zero_reg));
}
__ Addu(scratch,
@@ -1818,6 +1780,13 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
}
+void LCodeGen::DoBitNotI(LBitNotI* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ Nor(result, zero_reg, Operand(input));
+}
+
+
void LCodeGen::DoThrow(LThrow* instr) {
Register input_reg = EmitLoadRegister(instr->value(), at);
__ push(input_reg);
@@ -2921,6 +2890,90 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
+void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name,
+ LEnvironment* env) {
+ LookupResult lookup(isolate());
+ type->LookupDescriptor(NULL, *name, &lookup);
+ ASSERT(lookup.IsFound() || lookup.IsCacheable());
+ if (lookup.IsField()) {
+ int index = lookup.GetLocalFieldIndexFromMap(*type);
+ int offset = index * kPointerSize;
+ if (index < 0) {
+ // Negative property indices are in-object properties, indexed
+ // from the end of the fixed part of the object.
+ __ lw(result, FieldMemOperand(object, offset + type->instance_size()));
+ } else {
+ // Non-negative property indices are in the properties array.
+ __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
+ }
+ } else if (lookup.IsConstant()) {
+ Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate());
+ __ LoadObject(result, constant);
+ } else {
+ // Negative lookup.
+ // Check prototypes.
+ Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
+ Heap* heap = type->GetHeap();
+ while (*current != heap->null_value()) {
+ __ LoadHeapObject(result, current);
+ __ lw(result, FieldMemOperand(result, HeapObject::kMapOffset));
+ DeoptimizeIf(ne, env, result, Operand(Handle<Map>(current->map())));
+ current =
+ Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
+ }
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ }
+}
+
+
+void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
+ Register object = ToRegister(instr->object());
+ Register result = ToRegister(instr->result());
+ Register object_map = scratch0();
+
+ int map_count = instr->hydrogen()->types()->length();
+ bool need_generic = instr->hydrogen()->need_generic();
+
+ if (map_count == 0 && !need_generic) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
+ Handle<String> name = instr->hydrogen()->name();
+ Label done;
+ __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ for (int i = 0; i < map_count; ++i) {
+ bool last = (i == map_count - 1);
+ Handle<Map> map = instr->hydrogen()->types()->at(i);
+ Label check_passed;
+ __ CompareMapAndBranch(object_map, map, &check_passed, eq, &check_passed);
+ if (last && !need_generic) {
+ DeoptimizeIf(al, instr->environment());
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
+ } else {
+ Label next;
+ __ Branch(&next);
+ __ bind(&check_passed);
+ EmitLoadFieldOrConstantFunction(
+ result, object, map, name, instr->environment());
+ __ Branch(&done);
+ __ bind(&next);
+ }
+ }
+ if (need_generic) {
+ __ li(a2, Operand(name));
+ Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ }
+ __ bind(&done);
+}
+
+
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(a0));
ASSERT(ToRegister(instr->result()).is(v0));
@@ -3016,7 +3069,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
+ Abort("array index constant value too big.");
}
} else {
key = ToRegister(instr->key());
@@ -3102,7 +3155,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
+ Abort("array index constant value too big.");
}
} else {
key = ToRegister(instr->key());
@@ -3373,7 +3426,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->value();
if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort(kDoPushArgumentNotImplementedForDoubleType);
+ Abort("DoPushArgument not implemented for double type.");
} else {
Register argument_reg = EmitLoadRegister(argument, at);
__ push(argument_reg);
@@ -3592,7 +3645,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
FPURegister input = ToDoubleRegister(instr->value());
FPURegister result = ToDoubleRegister(instr->result());
__ abs_d(result, input);
- } else if (r.IsSmiOrInteger32()) {
+ } else if (r.IsInteger32()) {
EmitIntegerMathAbs(instr);
} else {
// Representation is tagged.
@@ -4198,7 +4251,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
+ Abort("array index constant value too big.");
}
} else {
key = ToRegister(instr->key());
@@ -4276,7 +4329,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
- Abort(kArrayIndexConstantValueTooBig);
+ Abort("array index constant value too big.");
}
} else {
key = ToRegister(instr->key());
@@ -4405,13 +4458,12 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, GetRAState(), kDontSaveFPRegs);
} else {
- PushSafepointRegistersScope scope(
- this, Safepoint::kWithRegistersAndDoubles);
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ mov(a0, object_reg);
__ li(a1, Operand(to_map));
TransitionElementsKindStub stub(from_kind, to_kind);
__ CallStub(&stub);
- RecordSafepointWithRegistersAndDoubles(
+ RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
}
__ bind(&not_applicable);
@@ -4717,7 +4769,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ Move(reg, scratch0(), input_reg);
Label canonicalize;
__ Branch(&canonicalize, ne, scratch0(), Operand(kHoleNanUpper32));
- __ li(reg, factory()->undefined_value());
+ __ li(reg, factory()->the_hole_value());
__ Branch(&done);
__ bind(&canonicalize);
__ Move(input_reg,
@@ -5122,7 +5174,7 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
AllowDeferredHandleDereference smi_check;
if (isolate()->heap()->InNewSpace(*target)) {
Register reg = ToRegister(instr->value());
- Handle<Cell> cell = isolate()->factory()->NewCell(target);
+ Handle<Cell> cell = isolate()->factory()->NewPropertyCell(target);
__ li(at, Operand(Handle<Object>(cell)));
__ lw(at, FieldMemOperand(at, Cell::kValueOffset));
DeoptimizeIf(ne, instr->environment(), reg,
@@ -5134,63 +5186,31 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
}
-void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
- {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ push(object);
- CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
- __ StoreToSafepointRegisterSlot(v0, scratch0());
- }
- __ And(at, scratch0(), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+void LCodeGen::DoCheckMapCommon(Register map_reg,
+ Handle<Map> map,
+ LEnvironment* env) {
+ Label success;
+ __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
+ DeoptimizeIf(al, env);
+ __ bind(&success);
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- class DeferredCheckMaps: public LDeferredCode {
- public:
- DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
- : LDeferredCode(codegen), instr_(instr), object_(object) {
- SetExit(check_maps());
- }
- virtual void Generate() {
- codegen()->DoDeferredInstanceMigration(instr_, object_);
- }
- Label* check_maps() { return &check_maps_; }
- virtual LInstruction* instr() { return instr_; }
- private:
- LCheckMaps* instr_;
- Label check_maps_;
- Register object_;
- };
-
if (instr->hydrogen()->CanOmitMapChecks()) return;
Register map_reg = scratch0();
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
+ Label success;
SmallMapList* map_set = instr->hydrogen()->map_set();
__ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
-
- DeferredCheckMaps* deferred = NULL;
- if (instr->hydrogen()->has_migration_target()) {
- deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
- __ bind(deferred->check_maps());
- }
-
- Label success;
for (int i = 0; i < map_set->length() - 1; i++) {
Handle<Map> map = map_set->at(i);
__ CompareMapAndBranch(map_reg, map, &success, eq, &success);
}
Handle<Map> map = map_set->last();
- // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
- if (instr->hydrogen()->has_migration_target()) {
- __ Branch(deferred->entry(), ne, map_reg, Operand(map));
- } else {
- DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
- }
-
+ DoCheckMapCommon(map_reg, map, instr->environment());
__ bind(&success);
}
@@ -5245,6 +5265,25 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
+void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) return;
+
+ Register prototype_reg = ToRegister(instr->temp());
+ Register map_reg = ToRegister(instr->temp2());
+
+ ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
+ ZoneList<Handle<Map> >* maps = instr->maps();
+
+ ASSERT(prototypes->length() == maps->length());
+
+ for (int i = 0; i < prototypes->length(); i++) {
+ __ LoadHeapObject(prototype_reg, prototypes->at(i));
+ __ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
+ DoCheckMapCommon(map_reg, maps->at(i), instr->environment());
+ }
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
@@ -5625,8 +5664,6 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
if (info()->IsStub() && type == Deoptimizer::EAGER) {
type = Deoptimizer::LAZY;
}
-
- Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
}
diff --git a/chromium/v8/src/mips/lithium-codegen-mips.h b/chromium/v8/src/mips/lithium-codegen-mips.h
index b97a3cdbaf1..a485b67db94 100644
--- a/chromium/v8/src/mips/lithium-codegen-mips.h
+++ b/chromium/v8/src/mips/lithium-codegen-mips.h
@@ -65,8 +65,7 @@ class LCodeGen BASE_EMBEDDED {
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- old_position_(RelocInfo::kNoPosition) {
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@@ -115,7 +114,7 @@ class LCodeGen BASE_EMBEDDED {
DoubleRegister EmitLoadDoubleRegister(LOperand* op,
FloatRegister flt_scratch,
DoubleRegister dbl_scratch);
- int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
+ int ToRepresentation(LConstantOperand* op, const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
@@ -154,7 +153,7 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
- void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoCheckMapCommon(Register map_reg, Handle<Map> map, LEnvironment* env);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -214,7 +213,7 @@ class LCodeGen BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- void Abort(BailoutReason reason);
+ void Abort(const char* reason);
void FPRINTF_CHECKING Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
@@ -291,13 +290,10 @@ class LCodeGen BASE_EMBEDDED {
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void AddToTranslation(LEnvironment* environment,
- Translation* translation,
+ void AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32,
- int* object_index_pointer,
- int* dematerialized_index_pointer);
+ bool is_uint32);
void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -323,7 +319,6 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
Safepoint::DeoptMode mode);
void RecordPosition(int position);
- void RecordAndUpdatePosition(int position);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
@@ -378,6 +373,12 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
+ void EmitLoadFieldOrConstantFunction(Register result,
+ Register object,
+ Handle<Map> type,
+ Handle<String> name,
+ LEnvironment* env);
+
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@@ -434,8 +435,6 @@ class LCodeGen BASE_EMBEDDED {
Safepoint::Kind expected_safepoint_kind_;
- int old_position_;
-
class PushSafepointRegistersScope BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
diff --git a/chromium/v8/src/mips/lithium-gap-resolver-mips.cc b/chromium/v8/src/mips/lithium-gap-resolver-mips.cc
index 460e13bf0a9..771b22862ee 100644
--- a/chromium/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/chromium/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -258,7 +258,7 @@ void LGapResolver::EmitMove(int index) {
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
- } else if (destination->IsDoubleRegister()) {
+ } else if (source->IsDoubleRegister()) {
DoubleRegister result = cgen_->ToDoubleRegister(destination);
double v = cgen_->ToDouble(constant_source);
__ Move(result, v);
diff --git a/chromium/v8/src/mips/lithium-mips.cc b/chromium/v8/src/mips/lithium-mips.cc
index 23f48a7709d..760be2e6e85 100644
--- a/chromium/v8/src/mips/lithium-mips.cc
+++ b/chromium/v8/src/mips/lithium-mips.cc
@@ -442,7 +442,7 @@ LPlatformChunk* LChunkBuilder::Build() {
}
-void LCodeGen::Abort(BailoutReason reason) {
+void LCodeGen::Abort(const char* reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@@ -598,10 +598,8 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
- ZoneList<HValue*> objects_to_materialize(0, zone());
instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator,
- &objects_to_materialize));
+ &argument_index_accumulator));
return instr;
}
@@ -652,7 +650,7 @@ LUnallocated* LChunkBuilder::TempRegister() {
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
- Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+ Abort("Out of virtual registers while trying to allocate temp register.");
vreg = 0;
}
operand->set_virtual_register(vreg);
@@ -890,7 +888,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
#endif
- instr->set_position(position_);
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
@@ -906,13 +903,11 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize) {
+ int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
- argument_index_accumulator,
- objects_to_materialize);
+ LEnvironment* outer =
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
@@ -927,16 +922,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
outer,
hydrogen_env->entry(),
zone());
+ bool needs_arguments_object_materialization = false;
int argument_index = *argument_index_accumulator;
- int object_index = objects_to_materialize->length();
for (int i = 0; i < hydrogen_env->length(); ++i) {
if (hydrogen_env->is_special_index(i)) continue;
- LOperand* op;
HValue* value = hydrogen_env->values()->at(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
+ LOperand* op = NULL;
+ if (value->IsArgumentsObject()) {
+ needs_arguments_object_materialization = true;
+ op = NULL;
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument(argument_index++);
} else {
@@ -947,33 +942,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
value->CheckFlag(HInstruction::kUint32));
}
- for (int i = object_index; i < objects_to_materialize->length(); ++i) {
- HValue* object_to_materialize = objects_to_materialize->at(i);
- int previously_materialized_object = -1;
- for (int prev = 0; prev < i; ++prev) {
- if (objects_to_materialize->at(prev) == objects_to_materialize->at(i)) {
- previously_materialized_object = prev;
- break;
- }
- }
- int length = object_to_materialize->OperandCount();
- bool is_arguments = object_to_materialize->IsArgumentsObject();
- if (previously_materialized_object >= 0) {
- result->AddDuplicateObject(previously_materialized_object);
- continue;
- } else {
- result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
- }
- for (int i = is_arguments ? 1 : 0; i < length; ++i) {
- LOperand* op;
- HValue* value = object_to_materialize->OperandAt(i);
- if (value->IsArgumentsObject() || value->IsCapturedObject()) {
- objects_to_materialize->Add(value, zone());
- op = LEnvironment::materialization_marker();
- } else {
- ASSERT(!value->IsPushArgument());
- op = UseAny(value);
- }
+ if (needs_arguments_object_materialization) {
+ HArgumentsObject* arguments = hydrogen_env->entry() == NULL
+ ? graph()->GetArgumentsObject()
+ : hydrogen_env->entry()->arguments_object();
+ ASSERT(arguments->IsLinked());
+ for (int i = 1; i < arguments->arguments_count(); ++i) {
+ HValue* value = arguments->arguments_values()->at(i);
+ ASSERT(!value->IsArgumentsObject() && !value->IsPushArgument());
+ LOperand* op = UseAny(value);
result->AddValue(op,
value->representation(),
value->CheckFlag(HInstruction::kUint32));
@@ -1350,6 +1327,15 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
+ ASSERT(instr->value()->representation().IsInteger32());
+ ASSERT(instr->representation().IsInteger32());
+ if (instr->HasNoUses()) return NULL;
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new(zone()) LBitNotI(value));
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
@@ -1628,8 +1614,9 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
HCompareNumericAndBranch* instr) {
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(r));
- ASSERT(instr->right()->representation().Equals(r));
+ ASSERT(instr->left()->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(
+ instr->right()->representation()));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new(zone()) LCompareNumericAndBranch(left, right);
@@ -1764,6 +1751,17 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
}
+LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
+ HInductionVariableAnnotation* instr) {
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
@@ -1937,6 +1935,19 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
}
+LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
+ LUnallocated* temp1 = NULL;
+ LOperand* temp2 = NULL;
+ if (!instr->CanOmitPrototypeChecks()) {
+ temp1 = TempRegister();
+ temp2 = TempRegister();
+ }
+ LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
+ if (instr->CanOmitPrototypeChecks()) return result;
+ return AssignEnvironment(result);
+}
+
+
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckFunction(value));
@@ -1945,16 +1956,10 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
LOperand* value = NULL;
- if (!instr->CanOmitMapChecks()) {
- value = UseRegisterAtStart(instr->value());
- if (instr->has_migration_target()) info()->MarkAsDeferredCalling();
- }
- LCheckMaps* result = new(zone()) LCheckMaps(value);
- if (!instr->CanOmitMapChecks()) {
- AssignEnvironment(result);
- if (instr->has_migration_target()) return AssignPointerMap(result);
- }
- return result;
+ if (!instr->CanOmitMapChecks()) value = UseRegisterAtStart(instr->value());
+ LInstruction* result = new(zone()) LCheckMaps(value);
+ if (instr->CanOmitMapChecks()) return result;
+ return AssignEnvironment(result);
}
@@ -2066,6 +2071,23 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
+LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
+ HLoadNamedFieldPolymorphic* instr) {
+ ASSERT(instr->representation().IsTagged());
+ if (instr->need_generic()) {
+ LOperand* obj = UseFixed(instr->object(), a0);
+ LLoadNamedFieldPolymorphic* result =
+ new(zone()) LLoadNamedFieldPolymorphic(obj);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+ } else {
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ LLoadNamedFieldPolymorphic* result =
+ new(zone()) LLoadNamedFieldPolymorphic(obj);
+ return AssignEnvironment(DefineAsRegister(result));
+ }
+}
+
+
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), a0);
LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), v0);
@@ -2221,7 +2243,7 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = instr->has_transition() &&
+ bool needs_write_barrier_for_map = !instr->transition().is_null() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
@@ -2341,7 +2363,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
- Abort(kTooManySpillSlotsNeededForOSR);
+ Abort("Too many spill slots needed for OSR");
spill_index = 0;
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
@@ -2363,12 +2385,6 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
}
-LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
- // There are no real uses of a captured object.
- return NULL;
-}
-
-
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
info()->MarkAsRequiresFrame();
LOperand* args = UseRegister(instr->arguments());
diff --git a/chromium/v8/src/mips/lithium-mips.h b/chromium/v8/src/mips/lithium-mips.h
index a1792b17b22..44c909ea766 100644
--- a/chromium/v8/src/mips/lithium-mips.h
+++ b/chromium/v8/src/mips/lithium-mips.h
@@ -50,6 +50,7 @@ class LCodeGen;
V(ArithmeticD) \
V(ArithmeticT) \
V(BitI) \
+ V(BitNotI) \
V(BoundsCheck) \
V(Branch) \
V(CallConstantFunction) \
@@ -67,6 +68,7 @@ class LCodeGen;
V(CheckMaps) \
V(CheckMapValue) \
V(CheckNonSmi) \
+ V(CheckPrototypeMaps) \
V(CheckSmi) \
V(ClampDToUint8) \
V(ClampIToUint8) \
@@ -126,6 +128,7 @@ class LCodeGen;
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
+ V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
@@ -204,12 +207,9 @@ class LCodeGen;
class LInstruction: public ZoneObject {
public:
LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- bit_field_(IsCallBits::encode(false)) {
- set_position(RelocInfo::kNoPosition);
- }
-
+ : environment_(NULL),
+ hydrogen_value_(NULL),
+ is_call_(false) { }
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
@@ -248,30 +248,20 @@ class LInstruction: public ZoneObject {
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- // The 31 bits PositionBits is used to store the int position value. And the
- // position value may be RelocInfo::kNoPosition (-1). The accessor always
- // +1/-1 so that the encoded value of position in bit_field_ is always >= 0
- // and can fit into the 31 bits PositionBits.
- void set_position(int pos) {
- bit_field_ = PositionBits::update(bit_field_, pos + 1);
- }
- int position() { return PositionBits::decode(bit_field_) - 1; }
-
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
- void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
- bool IsCall() const { return IsCallBits::decode(bit_field_); }
+ void MarkAsCall() { is_call_ = true; }
// Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return IsCall(); }
- bool ClobbersRegisters() const { return IsCall(); }
- bool ClobbersDoubleRegisters() const { return IsCall(); }
+ bool ClobbersTemps() const { return is_call_; }
+ bool ClobbersRegisters() const { return is_call_; }
+ bool ClobbersDoubleRegisters() const { return is_call_; }
// Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return IsCall(); }
+ bool IsMarkedAsCall() const { return is_call_; }
virtual bool HasResult() const = 0;
virtual LOperand* result() const = 0;
@@ -295,13 +285,10 @@ class LInstruction: public ZoneObject {
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
- class IsCallBits: public BitField<bool, 0, 1> {};
- class PositionBits: public BitField<int, 1, 31> {};
-
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
- int bit_field_;
+ bool is_call_;
};
@@ -1369,6 +1356,18 @@ class LThrow: public LTemplateInstruction<0, 1, 0> {
};
+class LBitNotI: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LBitNotI(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+};
+
+
class LAddI: public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
@@ -1504,6 +1503,19 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
+class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LLoadNamedFieldPolymorphic(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
+ DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
+};
+
+
class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
@@ -2116,7 +2128,7 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
virtual void PrintDataTo(StringStream* stream);
- Handle<Map> transition() const { return hydrogen()->transition_map(); }
+ Handle<Map> transition() const { return hydrogen()->transition(); }
Representation representation() const {
return hydrogen()->field_representation();
}
@@ -2312,6 +2324,26 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
+ public:
+ LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+ DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
+
+ ZoneList<Handle<JSObject> >* prototypes() const {
+ return hydrogen()->prototypes();
+ }
+ ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
+};
+
+
class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
@@ -2610,7 +2642,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
- void Abort(BailoutReason reason);
+ void Abort(const char* reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@@ -2692,8 +2724,7 @@ class LChunkBuilder BASE_EMBEDDED {
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator,
- ZoneList<HValue*>* objects_to_materialize);
+ int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);
diff --git a/chromium/v8/src/mips/macro-assembler-mips.cc b/chromium/v8/src/mips/macro-assembler-mips.cc
index e53f10afaca..ea08a552be5 100644
--- a/chromium/v8/src/mips/macro-assembler-mips.cc
+++ b/chromium/v8/src/mips/macro-assembler-mips.cc
@@ -256,7 +256,7 @@ void MacroAssembler::RecordWrite(Register object,
if (emit_debug_code()) {
lw(at, MemOperand(address));
Assert(
- eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
+ eq, "Wrong address or value passed to RecordWrite", at, Operand(value));
}
Label done;
@@ -358,7 +358,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
- Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
+ Check(ne, "we should not have an empty lexical context",
scratch, Operand(zero_reg));
#endif
@@ -374,7 +374,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Read the first word and compare to the native_context_map.
lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
LoadRoot(at, Heap::kNativeContextMapRootIndex);
- Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
+ Check(eq, "JSGlobalObject::native_context should be a native context.",
holder_reg, Operand(at));
pop(holder_reg); // Restore holder.
}
@@ -388,12 +388,12 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
push(holder_reg); // Temporarily save holder on the stack.
mov(holder_reg, at); // Move at to its holding place.
LoadRoot(at, Heap::kNullValueRootIndex);
- Check(ne, kJSGlobalProxyContextShouldNotBeNull,
+ Check(ne, "JSGlobalProxy::context() should not be null.",
holder_reg, Operand(at));
lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
LoadRoot(at, Heap::kNativeContextMapRootIndex);
- Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
+ Check(eq, "JSGlobalObject::native_context should be a native context.",
holder_reg, Operand(at));
// Restore at is not needed. at is reloaded below.
pop(holder_reg); // Restore holder.
@@ -2923,7 +2923,9 @@ void MacroAssembler::Allocate(int object_size,
// Set up allocation top address and object size registers.
Register topaddr = scratch1;
+ Register obj_size_reg = scratch2;
li(topaddr, Operand(allocation_top));
+ li(obj_size_reg, Operand(object_size));
// This code stores a temporary value in t9.
if ((flags & RESULT_CONTAINS_TOP) == 0) {
@@ -2936,29 +2938,15 @@ void MacroAssembler::Allocate(int object_size,
// immediately below so this use of t9 does not cause difference with
// respect to register content between debug and release mode.
lw(t9, MemOperand(topaddr));
- Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+ Check(eq, "Unexpected allocation top", result, Operand(t9));
}
// Load allocation limit into t9. Result already contains allocation top.
lw(t9, MemOperand(topaddr, limit - top));
}
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- And(scratch2, result, Operand(kDoubleAlignmentMask));
- Label aligned;
- Branch(&aligned, eq, scratch2, Operand(zero_reg));
- li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- sw(scratch2, MemOperand(result));
- Addu(result, result, Operand(kDoubleSize / 2));
- bind(&aligned);
- }
-
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
- Addu(scratch2, result, Operand(object_size));
+ Addu(scratch2, result, Operand(obj_size_reg));
Branch(gc_required, Ugreater, scratch2, Operand(t9));
sw(scratch2, MemOperand(topaddr));
@@ -3020,26 +3008,12 @@ void MacroAssembler::Allocate(Register object_size,
// immediately below so this use of t9 does not cause difference with
// respect to register content between debug and release mode.
lw(t9, MemOperand(topaddr));
- Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+ Check(eq, "Unexpected allocation top", result, Operand(t9));
}
// Load allocation limit into t9. Result already contains allocation top.
lw(t9, MemOperand(topaddr, limit - top));
}
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- And(scratch2, result, Operand(kDoubleAlignmentMask));
- Label aligned;
- Branch(&aligned, eq, scratch2, Operand(zero_reg));
- li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- sw(scratch2, MemOperand(result));
- Addu(result, result, Operand(kDoubleSize / 2));
- bind(&aligned);
- }
-
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
@@ -3054,7 +3028,7 @@ void MacroAssembler::Allocate(Register object_size,
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
And(t9, scratch2, Operand(kObjectAlignmentMask));
- Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
+ Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
}
sw(scratch2, MemOperand(topaddr));
@@ -3076,7 +3050,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object,
// Check that the object un-allocated is below the current top.
li(scratch, Operand(new_space_allocation_top));
lw(scratch, MemOperand(scratch));
- Check(less, kUndoAllocationOfNonAllocatedMemory,
+ Check(less, "Undo allocation of non allocated memory",
object, Operand(scratch));
#endif
// Write the address of the object to un-allocate as the current top.
@@ -3329,7 +3303,7 @@ void MacroAssembler::CopyBytes(Register src,
bind(&word_loop);
if (emit_debug_code()) {
And(scratch, src, kPointerSize - 1);
- Assert(eq, kExpectingAlignmentForCopyBytes,
+ Assert(eq, "Expecting alignment for CopyBytes",
scratch, Operand(zero_reg));
}
Branch(&byte_loop, lt, length, Operand(kPointerSize));
@@ -4055,7 +4029,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
sw(s0, MemOperand(s3, kNextOffset));
if (emit_debug_code()) {
lw(a1, MemOperand(s3, kLevelOffset));
- Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
+ Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
}
Subu(s2, s2, Operand(1));
sw(s2, MemOperand(s3, kLevelOffset));
@@ -4409,10 +4383,10 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
// -----------------------------------------------------------------------------
// Debugging.
-void MacroAssembler::Assert(Condition cc, BailoutReason reason,
+void MacroAssembler::Assert(Condition cc, const char* msg,
Register rs, Operand rt) {
if (emit_debug_code())
- Check(cc, reason, rs, rt);
+ Check(cc, msg, rs, rt);
}
@@ -4420,7 +4394,7 @@ void MacroAssembler::AssertRegisterIsRoot(Register reg,
Heap::RootListIndex index) {
if (emit_debug_code()) {
LoadRoot(at, index);
- Check(eq, kRegisterDidNotMatchExpectedRoot, reg, Operand(at));
+ Check(eq, "Register did not match expected root", reg, Operand(at));
}
}
@@ -4437,24 +4411,24 @@ void MacroAssembler::AssertFastElements(Register elements) {
Branch(&ok, eq, elements, Operand(at));
LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
Branch(&ok, eq, elements, Operand(at));
- Abort(kJSObjectWithFastElementsMapHasSlowElements);
+ Abort("JSObject with fast elements map has slow elements");
bind(&ok);
pop(elements);
}
}
-void MacroAssembler::Check(Condition cc, BailoutReason reason,
+void MacroAssembler::Check(Condition cc, const char* msg,
Register rs, Operand rt) {
Label L;
Branch(&L, cc, rs, rt);
- Abort(reason);
+ Abort(msg);
// Will not return here.
bind(&L);
}
-void MacroAssembler::Abort(BailoutReason reason) {
+void MacroAssembler::Abort(const char* msg) {
Label abort_start;
bind(&abort_start);
// We want to pass the msg string like a smi to avoid GC
@@ -4462,7 +4436,6 @@ void MacroAssembler::Abort(BailoutReason reason) {
// properly. Instead, we pass an aligned pointer that is
// a proper v8 smi, but also pass the alignment difference
// from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
@@ -4606,7 +4579,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
Branch(&ok);
bind(&fail);
- Abort(kGlobalFunctionsMustHaveInitialMap);
+ Abort("Global functions must have initial map");
bind(&ok);
}
}
@@ -4889,7 +4862,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
andi(at, object, kSmiTagMask);
- Check(ne, kOperandIsASmi, at, Operand(zero_reg));
+ Check(ne, "Operand is a smi", at, Operand(zero_reg));
}
}
@@ -4898,7 +4871,7 @@ void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
andi(at, object, kSmiTagMask);
- Check(eq, kOperandIsASmi, at, Operand(zero_reg));
+ Check(eq, "Operand is a smi", at, Operand(zero_reg));
}
}
@@ -4907,11 +4880,11 @@ void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
And(t0, object, Operand(kSmiTagMask));
- Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
+ Check(ne, "Operand is a smi and not a string", t0, Operand(zero_reg));
push(object);
lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
- Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
+ Check(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
pop(object);
}
}
@@ -4921,11 +4894,11 @@ void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
And(t0, object, Operand(kSmiTagMask));
- Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
+ Check(ne, "Operand is a smi and not a name", t0, Operand(zero_reg));
push(object);
lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
- Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
+ Check(le, "Operand is not a name", object, Operand(LAST_NAME_TYPE));
pop(object);
}
}
@@ -4933,11 +4906,11 @@ void MacroAssembler::AssertName(Register object) {
void MacroAssembler::AssertRootValue(Register src,
Heap::RootListIndex root_value_index,
- BailoutReason reason) {
+ const char* message) {
if (emit_debug_code()) {
ASSERT(!src.is(at));
LoadRoot(at, root_value_index);
- Check(eq, reason, src, Operand(at));
+ Check(eq, message, src, Operand(at));
}
}
@@ -5154,7 +5127,7 @@ void MacroAssembler::PatchRelocatedValue(Register li_location,
// At this point scratch is a lui(at, ...) instruction.
if (emit_debug_code()) {
And(scratch, scratch, kOpcodeMask);
- Check(eq, kTheInstructionToPatchShouldBeALui,
+ Check(eq, "The instruction to patch should be a lui.",
scratch, Operand(LUI));
lw(scratch, MemOperand(li_location));
}
@@ -5166,7 +5139,7 @@ void MacroAssembler::PatchRelocatedValue(Register li_location,
// scratch is now ori(at, ...).
if (emit_debug_code()) {
And(scratch, scratch, kOpcodeMask);
- Check(eq, kTheInstructionToPatchShouldBeAnOri,
+ Check(eq, "The instruction to patch should be an ori.",
scratch, Operand(ORI));
lw(scratch, MemOperand(li_location, kInstrSize));
}
@@ -5183,7 +5156,7 @@ void MacroAssembler::GetRelocatedValue(Register li_location,
lw(value, MemOperand(li_location));
if (emit_debug_code()) {
And(value, value, kOpcodeMask);
- Check(eq, kTheInstructionShouldBeALui,
+ Check(eq, "The instruction should be a lui.",
value, Operand(LUI));
lw(value, MemOperand(li_location));
}
@@ -5194,7 +5167,7 @@ void MacroAssembler::GetRelocatedValue(Register li_location,
lw(scratch, MemOperand(li_location, kInstrSize));
if (emit_debug_code()) {
And(scratch, scratch, kOpcodeMask);
- Check(eq, kTheInstructionShouldBeAnOri,
+ Check(eq, "The instruction should be an ori.",
scratch, Operand(ORI));
lw(scratch, MemOperand(li_location, kInstrSize));
}
diff --git a/chromium/v8/src/mips/macro-assembler-mips.h b/chromium/v8/src/mips/macro-assembler-mips.h
index 61a0c3a228f..bc3e7c48b4a 100644
--- a/chromium/v8/src/mips/macro-assembler-mips.h
+++ b/chromium/v8/src/mips/macro-assembler-mips.h
@@ -51,6 +51,20 @@ class JumpTarget;
// MIPS generated code calls C code, it must be via t9 register.
+// Flags used for the AllocateInNewSpace functions.
+enum AllocationFlags {
+ // No special flags.
+ NO_ALLOCATION_FLAGS = 0,
+ // Return the pointer to the allocated already tagged as a heap object.
+ TAG_OBJECT = 1 << 0,
+ // The content of the result register already contains the allocation top in
+ // new space.
+ RESULT_CONTAINS_TOP = 1 << 1,
+ // Specify that the requested size of the space to allocate is specified in
+ // words instead of bytes.
+ SIZE_IN_WORDS = 1 << 2
+};
+
// Flags used for AllocateHeapNumber
enum TaggingMode {
// Tag the result.
@@ -613,11 +627,11 @@ class MacroAssembler: public Assembler {
void MultiPushFPU(RegList regs);
void MultiPushReversedFPU(RegList regs);
+ // Lower case push() for compatibility with arch-independent code.
void push(Register src) {
Addu(sp, sp, Operand(-kPointerSize));
sw(src, MemOperand(sp, 0));
}
- void Push(Register src) { push(src); }
// Push a handle.
void Push(Handle<Object> handle);
@@ -662,11 +676,11 @@ class MacroAssembler: public Assembler {
void MultiPopFPU(RegList regs);
void MultiPopReversedFPU(RegList regs);
+ // Lower case pop() for compatibility with arch-independent code.
void pop(Register dst) {
lw(dst, MemOperand(sp, 0));
Addu(sp, sp, Operand(kPointerSize));
}
- void Pop(Register dst) { pop(dst); }
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
@@ -1272,15 +1286,15 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
+ void Assert(Condition cc, const char* msg, Register rs, Operand rt);
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
- void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
+ void Check(Condition cc, const char* msg, Register rs, Operand rt);
// Print a message to stdout and abort execution.
- void Abort(BailoutReason msg);
+ void Abort(const char* msg);
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
@@ -1364,7 +1378,7 @@ class MacroAssembler: public Assembler {
// enabled via --debug-code.
void AssertRootValue(Register src,
Heap::RootListIndex root_value_index,
- BailoutReason reason);
+ const char* message);
// ---------------------------------------------------------------------------
// HeapNumber utilities.