summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/mips')
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h36
-rw-r--r--deps/v8/src/mips/assembler-mips.cc12
-rw-r--r--deps/v8/src/mips/builtins-mips.cc1151
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc1060
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h246
-rw-r--r--deps/v8/src/mips/codegen-mips.cc8
-rw-r--r--deps/v8/src/mips/codegen-mips.h15
-rw-r--r--deps/v8/src/mips/debug-mips.cc83
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc3
-rw-r--r--deps/v8/src/mips/frames-mips.h14
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc252
-rw-r--r--deps/v8/src/mips/ic-mips.cc180
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc1033
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h418
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc26
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc410
16 files changed, 3433 insertions, 1514 deletions
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index c4c4fd259..553c511c3 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -78,7 +78,6 @@ bool Operand::is_reg() const {
}
-
// -----------------------------------------------------------------------------
// RelocInfo.
@@ -120,6 +119,11 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
+ if (host() != NULL && IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
}
@@ -149,6 +153,10 @@ Object** RelocInfo::target_object_address() {
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+ if (host() != NULL && target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
}
@@ -180,6 +188,12 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
+ if (host() != NULL) {
+ // TODO(1550) We are passing NULL as a slot because cell can never be on
+ // evacuation candidate.
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), NULL, cell);
+ }
}
@@ -200,6 +214,11 @@ void RelocInfo::set_call_address(Address target) {
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
Assembler::set_target_address_at(pc_, target);
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
}
@@ -242,12 +261,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- Object** p = target_object_address();
- Object* orig = *p;
- visitor->VisitPointer(p);
- if (*p != orig) {
- set_target_object(*p);
- }
+ visitor->VisitEmbeddedPointer(this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
@@ -257,9 +271,9 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence())) &&
Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
@@ -273,7 +287,7 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(heap, target_object_address());
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index e01a0ca70..e933181d4 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -74,7 +74,9 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
void CpuFeatures::Probe() {
- ASSERT(!initialized_);
+ unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
+ CpuFeaturesImpliedByCompiler());
+ ASSERT(supported_ == 0 || supported_ == standard_features);
#ifdef DEBUG
initialized_ = true;
#endif
@@ -82,8 +84,7 @@ void CpuFeatures::Probe() {
// Get the features implied by the OS and the compiler settings. This is the
// minimal set of features which is also allowed for generated code in the
// snapshot.
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- supported_ |= CpuFeaturesImpliedByCompiler();
+ supported_ |= standard_features;
if (Serializer::enabled()) {
// No probing for features if we might serialize (generate snapshot).
@@ -2018,7 +2019,8 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- RelocInfo rinfo(pc_, rmode, data); // We do not try to reuse pool constants.
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(pc_, rmode, data, NULL);
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
// Adjust code for new modes.
ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@@ -2041,7 +2043,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
+ RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index d77230448..5609d5ee4 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -587,10 +587,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&convert_argument);
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
- __ EnterInternalFrame();
- __ push(v0);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(v0);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ }
__ pop(function);
__ mov(argument, v0);
__ Branch(&argument_is_string);
@@ -606,10 +607,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// create a string wrapper.
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
- __ EnterInternalFrame();
- __ push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ }
__ Ret();
}
@@ -622,13 +624,13 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
- Label non_function_call;
+ Label slow, non_function_call;
// Check that the function is not a smi.
__ And(t0, a1, Operand(kSmiTagMask));
__ Branch(&non_function_call, eq, t0, Operand(zero_reg));
// Check that the function is a JSFunction.
__ GetObjectType(a1, a2, a2);
- __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
// Jump to the function-specific construct stub.
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
@@ -638,13 +640,21 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// a0: number of arguments
// a1: called object
+ // a2: object type
+ Label do_call;
+ __ bind(&slow);
+ __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ jmp(&do_call);
+
__ bind(&non_function_call);
+ __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ bind(&do_call);
// CALL_NON_FUNCTION expects the non-function constructor as receiver
// (instead of the original receiver from the call site). The receiver is
// stack element argc.
// Set expected number of arguments to zero (not changing a0).
__ mov(a2, zero_reg);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ SetCallKind(t1, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -667,331 +677,336 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -----------------------------------
// Enter a construct frame.
- __ EnterConstructFrame();
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
- // Preserve the two incoming parameters on the stack.
- __ sll(a0, a0, kSmiTagSize); // Tag arguments count.
- __ MultiPushReversed(a0.bit() | a1.bit());
+ // Preserve the two incoming parameters on the stack.
+ __ sll(a0, a0, kSmiTagSize); // Tag arguments count.
+ __ MultiPushReversed(a0.bit() | a1.bit());
- // Use t7 to hold undefined, which is used in several places below.
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ // Use t7 to hold undefined, which is used in several places below.
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
- Label rt_call, allocated;
- // Try to allocate the object without transitioning into C code. If any of the
- // preconditions is not met, the code bails out to the runtime call.
- if (FLAG_inline_new) {
- Label undo_allocation;
+ Label rt_call, allocated;
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ if (FLAG_inline_new) {
+ Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ li(a2, Operand(debug_step_in_fp));
- __ lw(a2, MemOperand(a2));
- __ Branch(&rt_call, ne, a2, Operand(zero_reg));
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ li(a2, Operand(debug_step_in_fp));
+ __ lw(a2, MemOperand(a2));
+ __ Branch(&rt_call, ne, a2, Operand(zero_reg));
#endif
- // Load the initial map and verify that it is in fact a map.
- // a1: constructor function
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a2, Operand(kSmiTagMask));
- __ Branch(&rt_call, eq, t0, Operand(zero_reg));
- __ GetObjectType(a2, a3, t4);
- __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
-
- // Check that the constructor is not constructing a JSFunction (see comments
- // in Runtime_NewObject in runtime.cc). In which case the initial map's
- // instance type would be JS_FUNCTION_TYPE.
- // a1: constructor function
- // a2: initial map
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
- __ lbu(t0, constructor_count);
- __ Subu(t0, t0, Operand(1));
- __ sb(t0, constructor_count);
- __ Branch(&allocate, ne, t0, Operand(zero_reg));
-
- __ Push(a1, a2);
-
- __ push(a1); // Constructor.
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(a2);
- __ pop(a1);
-
- __ bind(&allocate);
- }
+ // Load the initial map and verify that it is in fact a map.
+ // a1: constructor function
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ And(t0, a2, Operand(kSmiTagMask));
+ __ Branch(&rt_call, eq, t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, t4);
+ __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
- // Now allocate the JSObject on the heap.
- // a1: constructor function
- // a2: initial map
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ MemOperand constructor_count =
+ FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
+ __ lbu(t0, constructor_count);
+ __ Subu(t0, t0, Operand(1));
+ __ sb(t0, constructor_count);
+ __ Branch(&allocate, ne, t0, Operand(zero_reg));
+
+ __ Push(a1, a2);
+
+ __ push(a1); // Constructor.
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+ __ pop(a2);
+ __ pop(a1);
+
+ __ bind(&allocate);
+ }
- // Allocated the JSObject, now initialize the fields. Map is set to initial
- // map and properties and elements are set to empty fixed array.
- // a1: constructor function
- // a2: initial map
- // a3: object size
- // t4: JSObject (not tagged)
- __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(t5, t4);
- __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
- __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
- __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
- __ Addu(t5, t5, Operand(3*kPointerSize));
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-
- // Fill all the in-object properties with appropriate filler.
- // a1: constructor function
- // a2: initial map
- // a3: object size (in words)
- // t4: JSObject (not tagged)
- // t5: First in-object property of JSObject (not tagged)
- __ sll(t0, a3, kPointerSizeLog2);
- __ addu(t6, t4, t0); // End of object.
- ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- { Label loop, entry;
+ // Now allocate the JSObject on the heap.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size
+ // t4: JSObject (not tagged)
+ __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(t5, t4);
+ __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+ __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+ __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+ __ Addu(t5, t5, Operand(3*kPointerSize));
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+ ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+ // Fill all the in-object properties with appropriate filler.
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size (in words)
+ // t4: JSObject (not tagged)
+ // t5: First in-object property of JSObject (not tagged)
+ __ sll(t0, a3, kPointerSizeLog2);
+ __ addu(t6, t4, t0); // End of object.
+ ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
+ __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+ __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ sll(t0, a0, kPointerSizeLog2);
+ __ addu(a0, t5, t0);
+ // a0: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ Assert(le, "Unexpected number of pre-allocated property fields.",
+ a0, Operand(t6));
+ }
+ __ InitializeFieldsWithFiller(t5, a0, t7);
// To allow for truncation.
__ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
- } else {
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
}
- __ jmp(&entry);
- __ bind(&loop);
- __ sw(t7, MemOperand(t5, 0));
- __ addiu(t5, t5, kPointerSize);
- __ bind(&entry);
- __ Branch(&loop, Uless, t5, Operand(t6));
- }
-
- // Add the object tag to make the JSObject real, so that we can continue and
- // jump into the continuation code at any time from now on. Any failures
- // need to undo the allocation, so that the heap is in a consistent state
- // and verifiable.
- __ Addu(t4, t4, Operand(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed. Continue with allocated
- // object if not fall through to runtime call if it is.
- // a1: constructor function
- // t4: JSObject
- // t5: start of next object (not tagged)
- __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields and
- // in-object properties.
- __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
- __ And(t6,
- a0,
- Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
- __ srl(t0, t6, Map::kPreAllocatedPropertyFieldsByte * 8);
- __ Addu(a3, a3, Operand(t0));
- __ And(t6, a0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
- __ srl(t0, t6, Map::kInObjectPropertiesByte * 8);
- __ subu(a3, a3, t0);
-
- // Done if no extra properties are to be allocated.
- __ Branch(&allocated, eq, a3, Operand(zero_reg));
- __ Assert(greater_equal, "Property allocation count failed.",
- a3, Operand(zero_reg));
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // a1: constructor
- // a3: number of elements in properties array
- // t4: JSObject
- // t5: start of next object
- __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateInNewSpace(
- a0,
- t5,
- t6,
- a2,
- &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
- // Initialize the FixedArray.
- // a1: constructor
- // a3: number of elements in properties array (un-tagged)
- // t4: JSObject
- // t5: start of next object
- __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
- __ mov(a2, t5);
- __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
- __ sll(a0, a3, kSmiTagSize);
- __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
- __ Addu(a2, a2, Operand(2 * kPointerSize));
-
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
-
- // Initialize the fields to undefined.
- // a1: constructor
- // a2: First element of FixedArray (not tagged)
- // a3: number of elements in properties array
- // t4: JSObject
- // t5: FixedArray (not tagged)
- __ sll(t3, a3, kPointerSizeLog2);
- __ addu(t6, a2, t3); // End of object.
- ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- { Label loop, entry;
- if (count_constructions) {
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
- __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+ __ InitializeFieldsWithFiller(t5, t6, t7);
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ __ Addu(t4, t4, Operand(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed. Continue with
+ // allocated object if not fall through to runtime call if it is.
+ // a1: constructor function
+ // t4: JSObject
+ // t5: start of next object (not tagged)
+ __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields
+ // and in-object properties.
+ __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+ __ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ Addu(a3, a3, Operand(t6));
+ __ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
+ kBitsPerByte);
+ __ subu(a3, a3, t6);
+
+ // Done if no extra properties are to be allocated.
+ __ Branch(&allocated, eq, a3, Operand(zero_reg));
+ __ Assert(greater_equal, "Property allocation count failed.",
+ a3, Operand(zero_reg));
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // a1: constructor
+ // a3: number of elements in properties array
+ // t4: JSObject
+ // t5: start of next object
+ __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ AllocateInNewSpace(
+ a0,
+ t5,
+ t6,
+ a2,
+ &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+ // Initialize the FixedArray.
+ // a1: constructor
+ // a3: number of elements in properties array (un-tagged)
+ // t4: JSObject
+ // t5: start of next object
+ __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
+ __ mov(a2, t5);
+ __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
+ __ sll(a0, a3, kSmiTagSize);
+ __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
+ __ Addu(a2, a2, Operand(2 * kPointerSize));
+
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+
+ // Initialize the fields to undefined.
+ // a1: constructor
+ // a2: First element of FixedArray (not tagged)
+ // a3: number of elements in properties array
+ // t4: JSObject
+ // t5: FixedArray (not tagged)
+ __ sll(t3, a3, kPointerSizeLog2);
+ __ addu(t6, a2, t3); // End of object.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ { Label loop, entry;
+ if (count_constructions) {
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+ } else if (FLAG_debug_code) {
+ __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
+ __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+ }
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sw(t7, MemOperand(a2));
+ __ addiu(a2, a2, kPointerSize);
+ __ bind(&entry);
+ __ Branch(&loop, less, a2, Operand(t6));
}
- __ jmp(&entry);
- __ bind(&loop);
- __ sw(t7, MemOperand(a2));
- __ addiu(a2, a2, kPointerSize);
- __ bind(&entry);
- __ Branch(&loop, less, a2, Operand(t6));
+
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject.
+ // a1: constructor function
+ // t4: JSObject
+ // t5: FixedArray (not tagged)
+ __ Addu(t5, t5, Operand(kHeapObjectTag)); // Add the heap tag.
+ __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
+
+ // Continue with JSObject being successfully allocated.
+ // a1: constructor function
+ // a4: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // t4: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(t4, t5);
}
- // Store the initialized FixedArray into the properties field of
- // the JSObject.
+ __ bind(&rt_call);
+ // Allocate the new receiver object using the runtime call.
// a1: constructor function
- // t4: JSObject
- // t5: FixedArray (not tagged)
- __ Addu(t5, t5, Operand(kHeapObjectTag)); // Add the heap tag.
- __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
+ __ push(a1); // Argument for Runtime_NewObject.
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ mov(t4, v0);
- // Continue with JSObject being successfully allocated.
- // a1: constructor function
- // a4: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // t4: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(t4, t5);
- }
+ // Receiver for constructor call allocated.
+ // t4: JSObject
+ __ bind(&allocated);
+ __ push(t4);
- __ bind(&rt_call);
- // Allocate the new receiver object using the runtime call.
- // a1: constructor function
- __ push(a1); // Argument for Runtime_NewObject.
- __ CallRuntime(Runtime::kNewObject, 1);
- __ mov(t4, v0);
+ // Push the function and the allocated receiver from the stack.
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ lw(a1, MemOperand(sp, kPointerSize));
+ __ MultiPushReversed(a1.bit() | t4.bit());
- // Receiver for constructor call allocated.
- // t4: JSObject
- __ bind(&allocated);
- __ push(t4);
+ // Reload the number of arguments from the stack.
+ // a1: constructor function
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ lw(a3, MemOperand(sp, 4 * kPointerSize));
- // Push the function and the allocated receiver from the stack.
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ lw(a1, MemOperand(sp, kPointerSize));
- __ MultiPushReversed(a1.bit() | t4.bit());
+ // Setup pointer to last argument.
+ __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- // Reload the number of arguments from the stack.
- // a1: constructor function
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ lw(a3, MemOperand(sp, 4 * kPointerSize));
+ // Setup number of arguments for function call below.
+ __ srl(a0, a3, kSmiTagSize);
- // Setup pointer to last argument.
- __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ // Copy arguments and receiver to the expression stack.
+ // a0: number of arguments
+ // a1: constructor function
+ // a2: address of last argument (caller sp)
+ // a3: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ Label loop, entry;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, a2, Operand(t0));
+ __ lw(t1, MemOperand(t0));
+ __ push(t1);
+ __ bind(&entry);
+ __ Addu(a3, a3, Operand(-2));
+ __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
- // Setup number of arguments for function call below.
- __ srl(a0, a3, kSmiTagSize);
+ // Call the function.
+ // a0: number of arguments
+ // a1: constructor function
+ if (is_api_function) {
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+ } else {
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
- // Copy arguments and receiver to the expression stack.
- // a0: number of arguments
- // a1: constructor function
- // a2: address of last argument (caller sp)
- // a3: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- Label loop, entry;
- __ jmp(&entry);
- __ bind(&loop);
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, Operand(t0));
- __ lw(t1, MemOperand(t0));
- __ push(t1);
- __ bind(&entry);
- __ Addu(a3, a3, Operand(-2));
- __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+ // Pop the function from the stack.
+ // v0: result
+ // sp[0]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ Pop();
- // Call the function.
- // a0: number of arguments
- // a1: constructor function
- if (is_api_function) {
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
- } else {
- ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Restore context from the frame.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ And(t0, v0, Operand(kSmiTagMask));
+ __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(v0, a3, a3);
+ __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ lw(v0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+
+ // Leave construct frame.
}
- // Pop the function from the stack.
- // v0: result
- // sp[0]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ Pop();
-
- // Restore context from the frame.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ And(t0, v0, Operand(kSmiTagMask));
- __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ GetObjectType(v0, a3, a3);
- __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ lw(v0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ lw(a1, MemOperand(sp, 2 * kPointerSize));
- __ LeaveConstructFrame();
__ sll(t0, a1, kPointerSizeLog2 - 1);
__ Addu(sp, sp, t0);
__ Addu(sp, sp, kPointerSize);
@@ -1031,58 +1046,60 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(cp, zero_reg);
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Set up the context from the function argument.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // Set up the context from the function argument.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // Set up the roots register.
- ExternalReference roots_address =
- ExternalReference::roots_address(masm->isolate());
- __ li(s6, Operand(roots_address));
+ // Set up the roots register.
+ ExternalReference roots_address =
+ ExternalReference::roots_address(masm->isolate());
+ __ li(s6, Operand(roots_address));
- // Push the function and the receiver onto the stack.
- __ Push(a1, a2);
+ // Push the function and the receiver onto the stack.
+ __ Push(a1, a2);
- // Copy arguments to the stack in a loop.
- // a3: argc
- // s0: argv, ie points to first arg
- Label loop, entry;
- __ sll(t0, a3, kPointerSizeLog2);
- __ addu(t2, s0, t0);
- __ b(&entry);
- __ nop(); // Branch delay slot nop.
- // t2 points past last arg.
- __ bind(&loop);
- __ lw(t0, MemOperand(s0)); // Read next parameter.
- __ addiu(s0, s0, kPointerSize);
- __ lw(t0, MemOperand(t0)); // Dereference handle.
- __ push(t0); // Push parameter.
- __ bind(&entry);
- __ Branch(&loop, ne, s0, Operand(t2));
-
- // Initialize all JavaScript callee-saved registers, since they will be seen
- // by the garbage collector as part of handlers.
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ mov(s1, t0);
- __ mov(s2, t0);
- __ mov(s3, t0);
- __ mov(s4, t0);
- __ mov(s5, t0);
- // s6 holds the root address. Do not clobber.
- // s7 is cp. Do not init.
-
- // Invoke the code and pass argc as a0.
- __ mov(a0, a3);
- if (is_construct) {
- __ Call(masm->isolate()->builtins()->JSConstructCall());
- } else {
- ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
+ // Copy arguments to the stack in a loop.
+ // a3: argc
+ // s0: argv, ie points to first arg
+ Label loop, entry;
+ __ sll(t0, a3, kPointerSizeLog2);
+ __ addu(t2, s0, t0);
+ __ b(&entry);
+ __ nop(); // Branch delay slot nop.
+ // t2 points past last arg.
+ __ bind(&loop);
+ __ lw(t0, MemOperand(s0)); // Read next parameter.
+ __ addiu(s0, s0, kPointerSize);
+ __ lw(t0, MemOperand(t0)); // Dereference handle.
+ __ push(t0); // Push parameter.
+ __ bind(&entry);
+ __ Branch(&loop, ne, s0, Operand(t2));
- __ LeaveInternalFrame();
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ mov(s1, t0);
+ __ mov(s2, t0);
+ __ mov(s3, t0);
+ __ mov(s4, t0);
+ __ mov(s5, t0);
+ // s6 holds the root address. Do not clobber.
+ // s7 is cp. Do not init.
+
+ // Invoke the code and pass argc as a0.
+ __ mov(a0, a3);
+ if (is_construct) {
+ __ Call(masm->isolate()->builtins()->JSConstructCall());
+ } else {
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
+
+ // Leave internal frame.
+ }
__ Jump(ra);
}
@@ -1100,27 +1117,28 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve the function.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
+ // Preserve the function.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
- // Push the function on the stack as the argument to the runtime function.
- __ push(a1);
- // Call the runtime function.
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(a1);
+ // Call the runtime function.
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ // Calculate the entry point.
+ __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
- // Restore call kind information.
- __ pop(t1);
- // Restore saved function.
- __ pop(a1);
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore saved function.
+ __ pop(a1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down temporary frame.
+ }
// Do a tail-call of the compiled function.
__ Jump(t9);
@@ -1129,26 +1147,27 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve the function.
- __ push(a1);
- // Push call kind information.
- __ push(t1);
+ // Preserve the function.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
- // Push the function on the stack as the argument to the runtime function.
- __ push(a1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(a1);
+ __ CallRuntime(Runtime::kLazyRecompile, 1);
+ // Calculate the entry point.
+ __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore call kind information.
- __ pop(t1);
- // Restore saved function.
- __ pop(a1);
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore saved function.
+ __ pop(a1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down temporary frame.
+ }
// Do a tail-call of the compiled function.
__ Jump(t9);
@@ -1190,19 +1209,20 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack, check
// if it is a function.
// a0: actual number of arguments
- Label non_function;
+ Label slow, non_function;
__ sll(at, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ lw(a1, MemOperand(at));
__ And(at, a1, Operand(kSmiTagMask));
__ Branch(&non_function, eq, at, Operand(zero_reg));
__ GetObjectType(a1, a2, a2);
- __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
// 3a. Patch the first argument if necessary when calling a function.
// a0: actual number of arguments
// a1: function
Label shift_arguments;
+ __ li(t0, Operand(0, RelocInfo::NONE)); // Indicate regular JS_FUNCTION.
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
@@ -1210,13 +1230,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Do not transform the receiver for strict mode functions.
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize)));
- __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+ __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
// Do not transform the receiver for native (Compilerhints already in a3).
- __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+ __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
// Compute the receiver in non-strict mode.
// Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
@@ -1238,21 +1258,25 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
__ bind(&convert_to_object);
- __ EnterInternalFrame(); // In order to preserve argument count.
- __ sll(a0, a0, kSmiTagSize); // Smi tagged.
- __ push(a0);
-
- __ push(a2);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(a2, v0);
-
- __ pop(a0);
- __ sra(a0, a0, kSmiTagSize); // Un-tag.
- __ LeaveInternalFrame();
- // Restore the function to a1.
+ // Enter an internal frame in order to preserve argument count.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ sll(a0, a0, kSmiTagSize); // Smi tagged.
+ __ push(a0);
+
+ __ push(a2);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a2, v0);
+
+ __ pop(a0);
+ __ sra(a0, a0, kSmiTagSize); // Un-tag.
+ // Leave internal frame.
+ }
+ // Restore the function to a1, and the flag to t0.
__ sll(at, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ lw(a1, MemOperand(at));
+ __ li(t0, Operand(0, RelocInfo::NONE));
__ Branch(&patch_receiver);
// Use the global receiver object from the called function as the
@@ -1273,25 +1297,31 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Branch(&shift_arguments);
}
- // 3b. Patch the first argument when calling a non-function. The
+ // 3b. Check for function proxy.
+ __ bind(&slow);
+ __ li(t0, Operand(1, RelocInfo::NONE)); // Indicate function proxy.
+ __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+
+ __ bind(&non_function);
+ __ li(t0, Operand(2, RelocInfo::NONE)); // Indicate non-function.
+
+ // 3c. Patch the first argument when calling a non-function. The
// CALL_NON_FUNCTION builtin expects the non-function callee as
// receiver, so overwrite the first argument which will ultimately
// become the receiver.
// a0: actual number of arguments
// a1: function
- __ bind(&non_function);
- // Restore the function in case it has been modified.
+ // t0: call type (0: JS function, 1: function proxy, 2: non-function)
__ sll(at, a0, kPointerSizeLog2);
__ addu(a2, sp, at);
__ sw(a1, MemOperand(a2, -kPointerSize));
- // Clear a1 to indicate a non-function being called.
- __ mov(a1, zero_reg);
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
// a0: actual number of arguments
// a1: function
+ // t0: call type (0: JS function, 1: function proxy, 2: non-function)
__ bind(&shift_arguments);
{ Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
@@ -1309,14 +1339,26 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Pop();
}
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+ // or a function proxy via CALL_FUNCTION_PROXY.
// a0: actual number of arguments
// a1: function
- { Label function;
- __ Branch(&function, ne, a1, Operand(zero_reg));
- __ mov(a2, zero_reg); // expected arguments is 0 for CALL_NON_FUNCTION
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+ // t0: call type (0: JS function, 1: function proxy, 2: non-function)
+ { Label function, non_proxy;
+ __ Branch(&function, eq, t0, Operand(zero_reg));
+ // Expected number of arguments is 0 for CALL_NON_FUNCTION.
+ __ mov(a2, zero_reg);
__ SetCallKind(t1, CALL_AS_METHOD);
+ __ Branch(&non_proxy, ne, t0, Operand(1));
+
+ __ push(a1); // Re-add proxy object as additional argument.
+ __ Addu(a0, a0, Operand(1));
+ __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&non_proxy);
+ __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&function);
@@ -1350,134 +1392,161 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kRecvOffset = 3 * kPointerSize;
const int kFunctionOffset = 4 * kPointerSize;
- __ EnterInternalFrame();
-
- __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
- __ push(a0);
- __ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
- __ push(a0);
- // Returns (in v0) number of arguments to copy to stack as Smi.
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying need to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
- // Make a2 the space we have left. The stack might already be overflowed
- // here which will cause a2 to become negative.
- __ subu(a2, sp, a2);
- // Check if the arguments will overflow the stack.
- __ sll(t0, v0, kPointerSizeLog2 - kSmiTagSize);
- __ Branch(&okay, gt, a2, Operand(t0)); // Signed comparison.
-
- // Out of stack space.
- __ lw(a1, MemOperand(fp, kFunctionOffset));
- __ push(a1);
- __ push(v0);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- // End of stack check.
-
- // Push current limit and index.
- __ bind(&okay);
- __ push(v0); // Limit.
- __ mov(a1, zero_reg); // Initial index.
- __ push(a1);
-
- // Change context eagerly to get the right global object if necessary.
- __ lw(a0, MemOperand(fp, kFunctionOffset));
- __ lw(cp, FieldMemOperand(a0, JSFunction::kContextOffset));
- // Load the shared function info while the function is still in a0.
- __ lw(a1, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
-
- // Compute the receiver.
- Label call_to_object, use_global_receiver, push_receiver;
- __ lw(a0, MemOperand(fp, kRecvOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ lw(a2, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
-
- // Do not transform the receiver for native (Compilerhints already in a2).
- __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
-
- // Compute the receiver in non-strict mode.
- __ And(t0, a0, Operand(kSmiTagMask));
- __ Branch(&call_to_object, eq, t0, Operand(zero_reg));
- __ LoadRoot(a1, Heap::kNullValueRootIndex);
- __ Branch(&use_global_receiver, eq, a0, Operand(a1));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ Branch(&use_global_receiver, eq, a0, Operand(a2));
-
- // Check if the receiver is already a JavaScript object.
- // a0: receiver
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ GetObjectType(a0, a1, a1);
- __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Convert the receiver to a regular object.
- // a0: receiver
- __ bind(&call_to_object);
- __ push(a0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
- __ Branch(&push_receiver);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
- __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- // a0: receiver
- __ bind(&push_receiver);
- __ push(a0);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ lw(a0, MemOperand(fp, kIndexOffset));
- __ Branch(&entry);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Load the current argument from the arguments array and push it to the
- // stack.
- // a0: current argument index
- __ bind(&loop);
- __ lw(a1, MemOperand(fp, kArgsOffset));
- __ push(a1);
- __ push(a0);
+ __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
+ __ push(a0);
+ __ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
+ __ push(a0);
+ // Returns (in v0) number of arguments to copy to stack as Smi.
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ // Make a2 the space we have left. The stack might already be overflowed
+ // here which will cause a2 to become negative.
+ __ subu(a2, sp, a2);
+ // Check if the arguments will overflow the stack.
+ __ sll(t3, v0, kPointerSizeLog2 - kSmiTagSize);
+ __ Branch(&okay, gt, a2, Operand(t3)); // Signed comparison.
+
+ // Out of stack space.
+ __ lw(a1, MemOperand(fp, kFunctionOffset));
+ __ push(a1);
+ __ push(v0);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ // End of stack check.
+
+ // Push current limit and index.
+ __ bind(&okay);
+ __ push(v0); // Limit.
+ __ mov(a1, zero_reg); // Initial index.
+ __ push(a1);
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ push(v0);
+ // Get the receiver.
+ __ lw(a0, MemOperand(fp, kRecvOffset));
- // Use inline caching to access the arguments.
- __ lw(a0, MemOperand(fp, kIndexOffset));
- __ Addu(a0, a0, Operand(1 << kSmiTagSize));
- __ sw(a0, MemOperand(fp, kIndexOffset));
+ // Check that the function is a JS function (otherwise it must be a proxy).
+ Label push_receiver;
+ __ lw(a1, MemOperand(fp, kFunctionOffset));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE));
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ lw(a1, MemOperand(fp, kLimitOffset));
- __ Branch(&loop, ne, a0, Operand(a1));
- // Invoke the function.
- ParameterCount actual(a0);
- __ sra(a0, a0, kSmiTagSize);
- __ lw(a1, MemOperand(fp, kFunctionOffset));
- __ InvokeFunction(a1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-
- // Tear down the internal frame and remove function, receiver and args.
- __ LeaveInternalFrame();
- __ Addu(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
+ // Change context eagerly to get the right global object if necessary.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // Load the shared function info while the function is still in a1.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+
+ // Compute the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label call_to_object, use_global_receiver;
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
+
+ // Do not transform the receiver for native (Compilerhints already in a2).
+ __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
+
+ // Compute the receiver in non-strict mode.
+ __ And(t3, a0, Operand(kSmiTagMask));
+ __ Branch(&call_to_object, eq, t3, Operand(zero_reg));
+ __ LoadRoot(a1, Heap::kNullValueRootIndex);
+ __ Branch(&use_global_receiver, eq, a0, Operand(a1));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&use_global_receiver, eq, a0, Operand(a2));
+
+ // Check if the receiver is already a JavaScript object.
+ // a0: receiver
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ GetObjectType(a0, a1, a1);
+ __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+ // Convert the receiver to a regular object.
+ // a0: receiver
+ __ bind(&call_to_object);
+ __ push(a0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
+ __ Branch(&push_receiver);
+
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+ __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ // a0: receiver
+ __ bind(&push_receiver);
+ __ push(a0);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ lw(a0, MemOperand(fp, kIndexOffset));
+ __ Branch(&entry);
+
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // a0: current argument index
+ __ bind(&loop);
+ __ lw(a1, MemOperand(fp, kArgsOffset));
+ __ push(a1);
+ __ push(a0);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(v0);
+
+ // Use inline caching to access the arguments.
+ __ lw(a0, MemOperand(fp, kIndexOffset));
+ __ Addu(a0, a0, Operand(1 << kSmiTagSize));
+ __ sw(a0, MemOperand(fp, kIndexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ lw(a1, MemOperand(fp, kLimitOffset));
+ __ Branch(&loop, ne, a0, Operand(a1));
+
+ // Invoke the function.
+ Label call_proxy;
+ ParameterCount actual(a0);
+ __ sra(a0, a0, kSmiTagSize);
+ __ lw(a1, MemOperand(fp, kFunctionOffset));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+
+ scope.GenerateLeaveFrame();
+
+ __ Ret(USE_DELAY_SLOT);
+ __ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
+
+ // Invoke the function proxy.
+ __ bind(&call_proxy);
+ __ push(a1); // Add function proxy as last argument.
+ __ Addu(a0, a0, Operand(1));
+ __ li(a2, Operand(0, RelocInfo::NONE));
+ __ SetCallKind(t1, CALL_AS_METHOD);
+ __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+ __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+ // Tear down the internal frame and remove function, receiver and args.
+ }
+
+ __ Ret(USE_DELAY_SLOT);
+ __ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
}
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 521b8e58f..fe251b9e6 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -190,6 +190,71 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [sp]: function.
+ // [sp + kPointerSize]: serialized scope info
+
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace(FixedArray::SizeFor(length),
+ v0, a1, a2, &gc, TAG_OBJECT);
+
+ // Load the function from the stack.
+ __ lw(a3, MemOperand(sp, 0));
+
+ // Load the serialized scope info from the stack.
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+ // Setup the object header.
+ __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
+ __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ li(a2, Operand(Smi::FromInt(length)));
+ __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+
+ // If this block context is nested in the global context we get a smi
+ // sentinel instead of a function. The block context should get the
+ // canonical empty function of the global context as its closure which
+ // we still have to look up.
+ Label after_sentinel;
+ __ JumpIfNotSmi(a3, &after_sentinel);
+ if (FLAG_debug_code) {
+ const char* message = "Expected 0 as a Smi sentinel";
+ __ Assert(eq, message, a3, Operand(zero_reg));
+ }
+ __ lw(a3, GlobalObjectOperand());
+ __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
+ __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
+ __ bind(&after_sentinel);
+
+ // Setup the fixed slots.
+ __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
+ __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
+ __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
+
+ // Copy the global object from the previous context.
+ __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ sw(a1, ContextOperand(v0, Context::GLOBAL_INDEX));
+
+ // Initialize the rest of the slots to the hole value.
+ __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
+ for (int i = 0; i < slots_; i++) {
+ __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
+ }
+
+ // Remove the on-stack argument and return.
+ __ mov(cp, v0);
+ __ Addu(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
// [sp]: constant elements.
@@ -615,7 +680,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
Register object,
Destination destination,
- FPURegister double_dst,
+ DoubleRegister double_dst,
Register dst1,
Register dst2,
Register heap_number_map,
@@ -651,25 +716,16 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
// Load the double value.
__ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
- // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
- // On MIPS a lot of things cannot be implemented the same way so right
- // now it makes a lot more sense to just do things manually.
-
- // Save FCSR.
- __ cfc1(scratch1, FCSR);
- // Disable FPU exceptions.
- __ ctc1(zero_reg, FCSR);
- __ trunc_w_d(single_scratch, double_dst);
- // Retrieve FCSR.
- __ cfc1(scratch2, FCSR);
- // Restore FCSR.
- __ ctc1(scratch1, FCSR);
-
- // Check for inexact conversion or exception.
- __ And(scratch2, scratch2, kFCSRFlagMask);
+ Register except_flag = scratch2;
+ __ EmitFPUTruncate(kRoundToZero,
+ single_scratch,
+ double_dst,
+ scratch1,
+ except_flag,
+ kCheckForInexactConversion);
// Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+ __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
if (destination == kCoreRegisters) {
__ Move(dst1, dst2, double_dst);
@@ -706,7 +762,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- FPURegister double_scratch,
+ DoubleRegister double_scratch,
Label* not_int32) {
ASSERT(!dst.is(object));
ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
@@ -735,27 +791,19 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
// Load the double value.
__ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
- // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
- // On MIPS a lot of things cannot be implemented the same way so right
- // now it makes a lot more sense to just do things manually.
-
- // Save FCSR.
- __ cfc1(scratch1, FCSR);
- // Disable FPU exceptions.
- __ ctc1(zero_reg, FCSR);
- __ trunc_w_d(double_scratch, double_scratch);
- // Retrieve FCSR.
- __ cfc1(scratch2, FCSR);
- // Restore FCSR.
- __ ctc1(scratch1, FCSR);
-
- // Check for inexact conversion or exception.
- __ And(scratch2, scratch2, kFCSRFlagMask);
+ FPURegister single_scratch = double_scratch.low();
+ Register except_flag = scratch2;
+ __ EmitFPUTruncate(kRoundToZero,
+ single_scratch,
+ double_scratch,
+ scratch1,
+ except_flag,
+ kCheckForInexactConversion);
// Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+ __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
// Get the result in the destination register.
- __ mfc1(dst, double_scratch);
+ __ mfc1(dst, single_scratch);
} else {
// Load the double value in the destination registers.
@@ -881,9 +929,11 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ Move(f12, a0, a1);
__ Move(f14, a2, a3);
}
- // Call C routine that may not cause GC or other trouble.
- __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
- 4);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+ }
// Store answer in the overwritable heap number.
if (!IsMipsSoftFloatABI) {
CpuFeatures::Scope scope(FPU);
@@ -901,6 +951,35 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
}
+bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+ // These variants are compiled ahead of time. See next method.
+ if (the_int_.is(a1) &&
+ the_heap_number_.is(v0) &&
+ scratch_.is(a2) &&
+ sign_.is(a3)) {
+ return true;
+ }
+ if (the_int_.is(a2) &&
+ the_heap_number_.is(v0) &&
+ scratch_.is(a3) &&
+ sign_.is(a0)) {
+ return true;
+ }
+ // Other register combinations are generated as and when they are needed,
+ // so it is unsafe to call them from stubs (we can't generate a stub while
+ // we are generating a stub).
+ return false;
+}
+
+
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
+ WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
+ WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
+ stub1.GetCode()->set_is_pregenerated(true);
+ stub2.GetCode()->set_is_pregenerated(true);
+}
+
+
// See comment for class, this does NOT work for int32's that are in Smi range.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@@ -1258,7 +1337,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
if (!CpuFeatures::IsSupported(FPU)) {
__ push(ra);
- __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments.
+ __ PrepareCallCFunction(0, 2, t4);
if (!IsMipsSoftFloatABI) {
// We are not using MIPS FPU instructions, and parameters for the runtime
// function call are prepaired in a0-a3 registers, but function we are
@@ -1268,19 +1347,17 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
__ Move(f12, a0, a1);
__ Move(f14, a2, a3);
}
- __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
+ 0, 2);
__ pop(ra); // Because this function returns int, result is in v0.
__ Ret();
} else {
CpuFeatures::Scope scope(FPU);
Label equal, less_than;
- __ c(EQ, D, f12, f14);
- __ bc1t(&equal);
- __ nop();
-
- __ c(OLT, D, f12, f14);
- __ bc1t(&less_than);
- __ nop();
+ __ BranchF(&equal, NULL, eq, f12, f14);
+ __ BranchF(&less_than, NULL, lt, f12, f14);
// Not equal, not less, not NaN, must be greater.
__ li(v0, Operand(GREATER));
@@ -1303,7 +1380,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
Label first_non_object;
// Get the type of the first operand into a2 and compare it with
// FIRST_SPEC_OBJECT_TYPE.
@@ -1473,9 +1550,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
__ JumpIfSmi(probe, not_found);
__ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
__ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
- __ c(EQ, D, f12, f14);
- __ bc1t(&load_result_from_cache);
- __ nop(); // bc1t() requires explicit fill of branch delay slot.
+ __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
__ Branch(not_found);
} else {
// Note that there is no cache check for non-FPU case, even though
@@ -1591,9 +1666,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ li(t2, Operand(EQUAL));
// Check if either rhs or lhs is NaN.
- __ c(UN, D, f12, f14);
- __ bc1t(&nan);
- __ nop();
+ __ BranchF(NULL, &nan, eq, f12, f14);
// Check if LESS condition is satisfied. If true, move conditionally
// result to v0.
@@ -1711,88 +1784,144 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
-// The stub returns zero for false, and a non-zero value for true.
+// The stub expects its argument in the tos_ register and returns its result in
+// it, too: zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub uses FPU instructions.
CpuFeatures::Scope scope(FPU);
- Label false_result;
- Label not_heap_number;
- Register scratch0 = t5.is(tos_) ? t3 : t5;
-
- // undefined -> false
- __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
- __ Branch(&false_result, eq, tos_, Operand(scratch0));
-
- // Boolean -> its value
- __ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
- __ Branch(&false_result, eq, tos_, Operand(scratch0));
- __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
- // "tos_" is a register and contains a non-zero value. Hence we implicitly
- // return true if the equal condition is satisfied.
- __ Ret(eq, tos_, Operand(scratch0));
-
- // Smis: 0 -> false, all other -> true
- __ And(scratch0, tos_, tos_);
- __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
- __ And(scratch0, tos_, Operand(kSmiTagMask));
- // "tos_" is a register and contains a non-zero value. Hence we implicitly
- // return true if the not equal condition is satisfied.
- __ Ret(eq, scratch0, Operand(zero_reg));
-
- // 'null' -> false
- __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
- __ Branch(&false_result, eq, tos_, Operand(scratch0));
-
- // HeapNumber => false if +0, -0, or NaN.
- __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&not_heap_number, ne, scratch0, Operand(at));
-
- __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
- __ fcmp(f12, 0.0, UEQ);
-
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ movt(tos_, zero_reg);
- __ Ret();
+ Label patch;
+ const Register map = t5.is(tos_) ? t3 : t5;
- __ bind(&not_heap_number);
-
- // It can be an undetectable object.
- // Undetectable => false.
- __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
- __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
- __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
-
- // JavaScript object => true.
- __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
-
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Check for string.
- __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
-
- // String value => false iff empty, i.e., length is zero.
- __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
- // If length is zero, "tos_" contains zero ==> false.
- // If length is not zero, "tos_" contains a non-zero value ==> true.
- __ Ret();
+ // undefined -> false.
+ CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
+
+ // Boolean -> its value.
+ CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
+ CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
+
+ // 'null' -> false.
+ CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
- // Return 0 in "tos_" for false.
- __ bind(&false_result);
- __ mov(tos_, zero_reg);
+ if (types_.Contains(SMI)) {
+ // Smis: 0 -> false, all other -> true
+ __ And(at, tos_, kSmiTagMask);
+ // tos_ contains the correct return value already
+ __ Ret(eq, at, Operand(zero_reg));
+ } else if (types_.NeedsMap()) {
+ // If we need a map later and have a Smi -> patch.
+ __ JumpIfSmi(tos_, &patch);
+ }
+
+ if (types_.NeedsMap()) {
+ __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
+
+ if (types_.CanBeUndetectable()) {
+ __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ And(at, at, Operand(1 << Map::kIsUndetectable));
+ // Undetectable -> false.
+ __ movn(tos_, zero_reg, at);
+ __ Ret(ne, at, Operand(zero_reg));
+ }
+ }
+
+ if (types_.Contains(SPEC_OBJECT)) {
+ // Spec object -> true.
+ __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ // tos_ contains the correct non-zero return value already.
+ __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+ }
+
+ if (types_.Contains(STRING)) {
+ // String value -> false iff empty.
+ __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Label skip;
+ __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
+ __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+ __ Ret(); // the string length is OK as the return value
+ __ bind(&skip);
+ }
+
+ if (types_.Contains(HEAP_NUMBER)) {
+ // Heap number -> false iff +0, -0, or NaN.
+ Label not_heap_number;
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(&not_heap_number, ne, map, Operand(at));
+ Label zero_or_nan, number;
+ __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+ __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
+ // "tos_" is a register, and contains a non zero value by default.
+ // Hence we only need to overwrite "tos_" with zero to return false for
+ // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+ __ bind(&zero_or_nan);
+ __ mov(tos_, zero_reg);
+ __ bind(&number);
+ __ Ret();
+ __ bind(&not_heap_number);
+ }
+
+ __ bind(&patch);
+ GenerateTypeTransition(masm);
+}
+
+
+void ToBooleanStub::CheckOddball(MacroAssembler* masm,
+ Type type,
+ Heap::RootListIndex value,
+ bool result) {
+ if (types_.Contains(type)) {
+ // If we see an expected oddball, return its ToBoolean value tos_.
+ __ LoadRoot(at, value);
+ __ Subu(at, at, tos_); // This is a check for equality for the movz below.
+ // The value of a root is never NULL, so we can avoid loading a non-null
+ // value into tos_ when we want to return 'true'.
+ if (!result) {
+ __ movz(tos_, zero_reg, at);
+ }
+ __ Ret(eq, at, Operand(zero_reg));
+ }
+}
+
+
+void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
+ __ Move(a3, tos_);
+ __ li(a2, Operand(Smi::FromInt(tos_.code())));
+ __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
+ __ Push(a3, a2, a1);
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
+ 3,
+ 1);
+}
+
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ __ MultiPush(kJSCallerSaved | ra.bit());
+ if (save_doubles_ == kSaveFPRegs) {
+ CpuFeatures::Scope scope(FPU);
+ __ MultiPushFPU(kCallerSavedFPU);
+ }
+ const int argument_count = 1;
+ const int fp_argument_count = 0;
+ const Register scratch = a1;
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+ __ li(a0, Operand(ExternalReference::isolate_address()));
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ argument_count);
+ if (save_doubles_ == kSaveFPRegs) {
+ CpuFeatures::Scope scope(FPU);
+ __ MultiPopFPU(kCallerSavedFPU);
+ }
+
+ __ MultiPop(kJSCallerSaved | ra.bit());
__ Ret();
}
@@ -1951,12 +2080,13 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- __ EnterInternalFrame();
- __ push(a0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(a1, v0);
- __ pop(a0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(a0);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(a1, v0);
+ __ pop(a0);
+ }
__ bind(&heapnumber_allocated);
__ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
@@ -1998,13 +2128,14 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- __ EnterInternalFrame();
- __ push(v0); // Push the heap number, not the untagged int32.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(a2, v0); // Move the new heap number into a2.
- // Get the heap number into v0, now that the new heap number is in a2.
- __ pop(v0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(v0); // Push the heap number, not the untagged int32.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(a2, v0); // Move the new heap number into a2.
+ // Get the heap number into v0, now that the new heap number is in a2.
+ __ pop(v0);
+ }
// Convert the heap number in v0 to an untagged integer in a1.
// This can't go slow-case because it's the same number we already
@@ -2115,6 +2246,9 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
@@ -2717,26 +2851,16 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Otherwise return a heap number if allowed, or jump to type
// transition.
- // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
- // On MIPS a lot of things cannot be implemented the same way so right
- // now it makes a lot more sense to just do things manually.
-
- // Save FCSR.
- __ cfc1(scratch1, FCSR);
- // Disable FPU exceptions.
- __ ctc1(zero_reg, FCSR);
- __ trunc_w_d(single_scratch, f10);
- // Retrieve FCSR.
- __ cfc1(scratch2, FCSR);
- // Restore FCSR.
- __ ctc1(scratch1, FCSR);
-
- // Check for inexact conversion or exception.
- __ And(scratch2, scratch2, kFCSRFlagMask);
+ Register except_flag = scratch2;
+ __ EmitFPUTruncate(kRoundToZero,
+ single_scratch,
+ f10,
+ scratch1,
+ except_flag);
if (result_type_ <= BinaryOpIC::INT32) {
- // If scratch2 != 0, result does not fit in a 32-bit integer.
- __ Branch(&transition, ne, scratch2, Operand(zero_reg));
+ // If except_flag != 0, result does not fit in a 32-bit integer.
+ __ Branch(&transition, ne, except_flag, Operand(zero_reg));
}
// Check if the result fits in a smi.
@@ -2929,9 +3053,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ Ret();
} else {
// Tail call that writes the int32 in a2 to the heap number in v0, using
- // a3 and a1 as scratch. v0 is preserved and returned.
+ // a3 and a0 as scratch. v0 is preserved and returned.
__ mov(a0, t1);
- WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1);
+ WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
__ TailCallStub(&stub);
}
@@ -3225,7 +3349,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ lw(t0, MemOperand(cache_entry, 0));
__ lw(t1, MemOperand(cache_entry, 4));
__ lw(t2, MemOperand(cache_entry, 8));
- __ Addu(cache_entry, cache_entry, 12);
__ Branch(&calculate, ne, a2, Operand(t0));
__ Branch(&calculate, ne, a3, Operand(t1));
// Cache hit. Load result, cleanup and return.
@@ -3259,13 +3382,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Register a0 holds precalculated cache entry address; preserve
// it on the stack and pop it into register cache_entry after the
// call.
- __ push(cache_entry);
+ __ Push(cache_entry, a2, a3);
GenerateCallCFunction(masm, scratch0);
__ GetCFunctionDoubleResult(f4);
// Try to update the cache. If we cannot allocate a
// heap number, we return the result without updating.
- __ pop(cache_entry);
+ __ Pop(cache_entry, a2, a3);
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
__ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
@@ -3283,10 +3406,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
__ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
- __ EnterInternalFrame();
- __ push(a0);
- __ CallRuntime(RuntimeFunction(), 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(a0);
+ __ CallRuntime(RuntimeFunction(), 1);
+ }
__ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
@@ -3299,14 +3423,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// We return the value in f4 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
- __ EnterInternalFrame();
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ li(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Allocate an aligned object larger than a HeapNumber.
+ ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+ __ li(scratch0, Operand(4 * kPointerSize));
+ __ push(scratch0);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ }
__ Ret();
}
}
@@ -3317,22 +3442,26 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
__ push(ra);
__ PrepareCallCFunction(2, scratch);
if (IsMipsSoftFloatABI) {
- __ Move(v0, v1, f4);
+ __ Move(a0, a1, f4);
} else {
__ mov_d(f12, f4);
}
+ AllowExternalCallThatCantCauseGC scope(masm);
switch (type_) {
case TranscendentalCache::SIN:
__ CallCFunction(
- ExternalReference::math_sin_double_function(masm->isolate()), 2);
+ ExternalReference::math_sin_double_function(masm->isolate()),
+ 0, 1);
break;
case TranscendentalCache::COS:
__ CallCFunction(
- ExternalReference::math_cos_double_function(masm->isolate()), 2);
+ ExternalReference::math_cos_double_function(masm->isolate()),
+ 0, 1);
break;
case TranscendentalCache::LOG:
__ CallCFunction(
- ExternalReference::math_log_double_function(masm->isolate()), 2);
+ ExternalReference::math_log_double_function(masm->isolate()),
+ 0, 1);
break;
default:
UNIMPLEMENTED();
@@ -3415,12 +3544,15 @@ void MathPowStub::Generate(MacroAssembler* masm) {
heapnumbermap,
&call_runtime);
__ push(ra);
- __ PrepareCallCFunction(3, scratch);
+ __ PrepareCallCFunction(1, 1, scratch);
__ SetCallCDoubleArguments(double_base, exponent);
- __ CallCFunction(
- ExternalReference::power_double_int_function(masm->isolate()), 3);
- __ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(masm->isolate()), 1, 1);
+ __ pop(ra);
+ __ GetCFunctionDoubleResult(double_result);
+ }
__ sdc1(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(v0, heapnumber);
@@ -3443,15 +3575,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
heapnumbermap,
&call_runtime);
__ push(ra);
- __ PrepareCallCFunction(4, scratch);
+ __ PrepareCallCFunction(0, 2, scratch);
// ABI (o32) for func(double a, double b): a in f12, b in f14.
ASSERT(double_base.is(f12));
ASSERT(double_exponent.is(f14));
__ SetCallCDoubleArguments(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()), 4);
- __ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0,
+ 2);
+ __ pop(ra);
+ __ GetCFunctionDoubleResult(double_result);
+ }
__ sdc1(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(v0, heapnumber);
@@ -3468,6 +3605,37 @@ bool CEntryStub::NeedsImmovableCode() {
}
+bool CEntryStub::IsPregenerated() {
+ return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+ result_size_ == 1;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+ CEntryStub::GenerateAheadOfTime();
+ WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+ CEntryStub save_doubles(1, kSaveFPRegs);
+ Handle<Code> code = save_doubles.GetCode();
+ code->set_is_pregenerated(true);
+ StoreBufferOverflowStub stub(kSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ code->GetIsolate()->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+ CEntryStub stub(1, kDontSaveFPRegs);
+ Handle<Code> code = stub.GetCode();
+ code->set_is_pregenerated(true);
+}
+
+
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(v0);
}
@@ -3490,16 +3658,17 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// s1: pointer to the first argument (C callee-saved)
// s2: pointer to builtin function (C callee-saved)
+ Isolate* isolate = masm->isolate();
+
if (do_gc) {
// Move result passed in v0 into a0 to call PerformGC.
__ mov(a0, v0);
- __ PrepareCallCFunction(1, a1);
- __ CallCFunction(
- ExternalReference::perform_gc_function(masm->isolate()), 1);
+ __ PrepareCallCFunction(1, 0, a1);
+ __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
}
ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
+ ExternalReference::heap_always_allocate_scope_depth(isolate);
if (always_allocate) {
__ li(a0, Operand(scope_depth));
__ lw(a1, MemOperand(a0));
@@ -3588,18 +3757,16 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
// Retrieve the pending exception and clear the variable.
- __ li(t0,
- Operand(ExternalReference::the_hole_value_location(masm->isolate())));
- __ lw(a3, MemOperand(t0));
+ __ li(a3, Operand(isolate->factory()->the_hole_value()));
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- masm->isolate())));
+ isolate)));
__ lw(v0, MemOperand(t0));
__ sw(a3, MemOperand(t0));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
__ Branch(throw_termination_exception, eq,
- v0, Operand(masm->isolate()->factory()->termination_exception()));
+ v0, Operand(isolate->factory()->termination_exception()));
// Handle normal exception.
__ jmp(throw_normal_exception);
@@ -3628,6 +3795,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Subu(s1, s1, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
// Setup argc and the builtin function in callee-saved registers.
@@ -3681,6 +3849,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, exit;
+ Isolate* isolate = masm->isolate();
// Registers:
// a0: entry address
@@ -3699,8 +3868,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
CpuFeatures::Scope scope(FPU);
// Save callee-saved FPU registers.
__ MultiPushFPU(kCalleeSavedFPU);
+ // Set up the reserved register for 0.0.
+ __ Move(kDoubleRegZero, 0.0);
}
+
// Load argv in s0 register.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
if (CpuFeatures::IsSupported(FPU)) {
@@ -3715,7 +3887,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ li(t2, Operand(Smi::FromInt(marker)));
__ li(t1, Operand(Smi::FromInt(marker)));
__ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
- masm->isolate())));
+ isolate)));
__ lw(t0, MemOperand(t0));
__ Push(t3, t2, t1, t0);
// Setup frame pointer for the frame to be pushed.
@@ -3739,8 +3911,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
- masm->isolate());
+ ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
__ li(t1, Operand(ExternalReference(js_entry_sp)));
__ lw(t2, MemOperand(t1));
__ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
@@ -3763,7 +3934,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Coming in here the fp will be invalid because the PushTryHandler below
// sets it to 0 to signal the existence of the JSEntry frame.
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- masm->isolate())));
+ isolate)));
__ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
__ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
__ b(&exit); // b exposes branch delay slot.
@@ -3778,11 +3949,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// saved values before returning a failure to C.
// Clear any pending exceptions.
- __ li(t0,
- Operand(ExternalReference::the_hole_value_location(masm->isolate())));
- __ lw(t1, MemOperand(t0));
+ __ li(t1, Operand(isolate->factory()->the_hole_value()));
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- masm->isolate())));
+ isolate)));
__ sw(t1, MemOperand(t0));
// Invoke the function by calling through JS entry trampoline builtin.
@@ -3805,7 +3974,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
if (is_construct) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- masm->isolate());
+ isolate);
__ li(t0, Operand(construct_entry));
} else {
ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
@@ -3833,7 +4002,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Restore the top frame descriptors from the stack.
__ pop(t1);
__ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
- masm->isolate())));
+ isolate)));
__ sw(t1, MemOperand(t0));
// Reset the stack to the callee saved registers.
@@ -3857,11 +4026,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// * object: a0 or at sp + 1 * kPointerSize.
// * function: a1 or at sp.
//
-// Inlined call site patching is a crankshaft-specific feature that is not
-// implemented on MIPS.
+// An inlined call site may have been generated before calling this stub.
+// In this case the offset to the inline site to patch is passed on the stack,
+// in the safepoint slot for register t0.
void InstanceofStub::Generate(MacroAssembler* masm) {
- // This is a crankshaft-specific feature that has not been implemented yet.
- ASSERT(!HasCallSiteInlineCheck());
// Call site inlining and patching implies arguments in registers.
ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
// ReturnTrueFalse is only implemented for inlined call sites.
@@ -3875,6 +4043,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
const Register inline_site = t5;
const Register scratch = a2;
+ const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
+
Label slow, loop, is_instance, is_not_instance, not_js_object;
if (!HasArgsInRegisters()) {
@@ -3890,10 +4060,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// real lookup and update the call site cache.
if (!HasCallSiteInlineCheck()) {
Label miss;
- __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
- __ Branch(&miss, ne, function, Operand(t1));
- __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
- __ Branch(&miss, ne, map, Operand(t1));
+ __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
+ __ Branch(&miss, ne, function, Operand(at));
+ __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
+ __ Branch(&miss, ne, map, Operand(at));
__ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
@@ -3913,7 +4083,15 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
} else {
- UNIMPLEMENTED_MIPS();
+ ASSERT(HasArgsInRegisters());
+ // Patch the (relocated) inlined map check.
+
+ // The offset was stored in t0 safepoint slot.
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
+ __ LoadFromSafepointRegisterSlot(scratch, t0);
+ __ Subu(inline_site, ra, scratch);
+ // Patch the relocated value to map.
+ __ PatchRelocatedValue(inline_site, scratch, map);
}
// Register mapping: a3 is object map and t0 is function prototype.
@@ -3939,7 +4117,16 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ mov(v0, zero_reg);
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
} else {
- UNIMPLEMENTED_MIPS();
+ // Patch the call site to return true.
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ // Get the boolean result location in scratch and patch it.
+ __ PatchRelocatedValue(inline_site, scratch, v0);
+
+ if (!ReturnTrueFalseObject()) {
+ ASSERT_EQ(Smi::FromInt(0), 0);
+ __ mov(v0, zero_reg);
+ }
}
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
@@ -3948,8 +4135,17 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ li(v0, Operand(Smi::FromInt(1)));
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
} else {
- UNIMPLEMENTED_MIPS();
+ // Patch the call site to return false.
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ // Get the boolean result location in scratch and patch it.
+ __ PatchRelocatedValue(inline_site, scratch, v0);
+
+ if (!ReturnTrueFalseObject()) {
+ __ li(v0, Operand(Smi::FromInt(1)));
+ }
}
+
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
Label object_not_null, object_not_null_or_smi;
@@ -3986,10 +4182,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
- __ EnterInternalFrame();
- __ Push(a0, a1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a0, a1);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ }
__ mov(a0, v0);
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
__ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
@@ -4411,10 +4608,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
// Stack frame on entry.
// sp[0]: last_match_info (expected JSArray)
@@ -4427,6 +4620,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static const int kSubjectOffset = 2 * kPointerSize;
static const int kJSRegExpOffset = 3 * kPointerSize;
+ Isolate* isolate = masm->isolate();
+
Label runtime, invoke_regexp;
// Allocation of registers for this function. These are in callee save
@@ -4442,9 +4637,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
ExternalReference::address_of_regexp_stack_memory_address(
- masm->isolate());
+ isolate);
ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
+ ExternalReference::address_of_regexp_stack_memory_size(isolate);
__ li(a0, Operand(address_of_regexp_stack_memory_size));
__ lw(a0, MemOperand(a0, 0));
__ Branch(&runtime, eq, a0, Operand(zero_reg));
@@ -4525,7 +4720,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
FieldMemOperand(a0, JSArray::kElementsOffset));
__ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ Branch(&runtime, ne, a0, Operand(
- masm->isolate()->factory()->fixed_array_map()));
+ isolate->factory()->fixed_array_map()));
// Check that the last match info has space for the capture registers and the
// additional information.
__ lw(a0,
@@ -4616,7 +4811,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(),
+ __ IncrementCounter(isolate->counters()->regexp_entry_native(),
1, a0, a2);
// Isolates: note we add an additional parameter here (isolate pointer).
@@ -4656,13 +4851,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 5: static offsets vector buffer.
__ li(a0, Operand(
- ExternalReference::address_of_static_offsets_vector(masm->isolate())));
+ ExternalReference::address_of_static_offsets_vector(isolate)));
__ sw(a0, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data
// and calculate the shift of the index (0 for ASCII and 1 for two byte).
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ Addu(t2, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4715,11 +4909,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ li(a1, Operand(
- ExternalReference::the_hole_value_location(masm->isolate())));
- __ lw(a1, MemOperand(a1, 0));
+ __ li(a1, Operand(isolate->factory()->the_hole_value()));
__ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- masm->isolate())));
+ isolate)));
__ lw(v0, MemOperand(a2, 0));
__ Branch(&runtime, eq, v0, Operand(a1));
@@ -4737,7 +4929,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&failure);
// For failure and exception return null.
- __ li(v0, Operand(masm->isolate()->factory()->null_value()));
+ __ li(v0, Operand(isolate->factory()->null_value()));
__ Addu(sp, sp, Operand(4 * kPointerSize));
__ Ret();
@@ -4757,20 +4949,29 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ sw(a2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
- __ mov(a3, last_match_info_elements); // Moved up to reduce latency.
__ sw(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
- __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0);
+ __ mov(a2, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset,
+ a2,
+ t3,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
__ sw(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
- __ mov(a3, last_match_info_elements);
- __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastInputOffset,
+ subject,
+ t3,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(masm->isolate());
+ ExternalReference::address_of_static_offsets_vector(isolate);
__ li(a2, Operand(address_of_static_offsets_vector));
// a1: number of capture registers
@@ -4895,8 +5096,24 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
+void CallFunctionStub::FinishCode(Code* code) {
+ code->set_has_function_cache(false);
+}
+
+
+void CallFunctionStub::Clear(Heap* heap, Address address) {
+ UNREACHABLE();
+}
+
+
+Object* CallFunctionStub::GetCachedValue(Address address) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow;
+ Label slow, non_function;
// The receiver might implicitly be the global object. This is
// indicated by passing the hole as the receiver to the call
@@ -4922,7 +5139,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Check that the function is really a JavaScript function.
// a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &slow);
+ __ JumpIfSmi(a1, &non_function);
// Get the map of the function object.
__ GetObjectType(a1, a2, a2);
__ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
@@ -4950,8 +5167,22 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Slow-case: Non-function called.
__ bind(&slow);
+ // Check for function proxy.
+ __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ push(a1); // Put proxy as additional argument.
+ __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
+ __ li(a2, Operand(0, RelocInfo::NONE));
+ __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+ __ SetCallKind(t1, CALL_AS_FUNCTION);
+ {
+ Handle<Code> adaptor =
+ masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
+ __ bind(&non_function);
__ sw(a1, MemOperand(sp, argc_ * kPointerSize));
__ li(a0, Operand(argc_)); // Setup the number of arguments.
__ mov(a2, zero_reg);
@@ -5057,24 +5288,27 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ Branch(&call_runtime_, ne, result_, Operand(t0));
// Get the first of the two strings and load its instance type.
- __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+ __ lw(result_, FieldMemOperand(object_, ConsString::kFirstOffset));
__ jmp(&assure_seq_string);
// SlicedString, unpack and add offset.
__ bind(&sliced_string);
__ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
__ addu(scratch_, scratch_, result_);
- __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
+ __ lw(result_, FieldMemOperand(object_, SlicedString::kParentOffset));
// Assure that we are dealing with a sequential string. Go to runtime if not.
__ bind(&assure_seq_string);
- __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ lw(result_, FieldMemOperand(result_, HeapObject::kMapOffset));
__ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// Check that parent is not an external string. Go to runtime otherwise.
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t0, result_, Operand(kStringRepresentationMask));
__ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
+ // Actually fetch the parent string if it is confirmed to be sequential.
+ STATIC_ASSERT(SlicedString::kParentOffset == ConsString::kFirstOffset);
+ __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
@@ -6463,39 +6697,25 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ Subu(a2, a0, Operand(kHeapObjectTag));
__ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
- Label fpu_eq, fpu_lt, fpu_gt;
- // Compare operands (test if unordered).
- __ c(UN, D, f0, f2);
- // Don't base result on status bits when a NaN is involved.
- __ bc1t(&unordered);
- __ nop();
+ // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
+ Label fpu_eq, fpu_lt;
+ // Test if equal, and also handle the unordered/NaN case.
+ __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
- // Test if equal.
- __ c(EQ, D, f0, f2);
- __ bc1t(&fpu_eq);
- __ nop();
+ // Test if less (unordered case is already handled).
+ __ BranchF(&fpu_lt, NULL, lt, f0, f2);
- // Test if unordered or less (unordered case is already handled).
- __ c(ULT, D, f0, f2);
- __ bc1t(&fpu_lt);
- __ nop();
+ // Otherwise it's greater, so just fall thru, and return.
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(GREATER)); // In delay slot.
- // Otherwise it's greater.
- __ bc1f(&fpu_gt);
- __ nop();
-
- // Return a result of -1, 0, or 1.
__ bind(&fpu_eq);
- __ li(v0, Operand(EQUAL));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(EQUAL)); // In delay slot.
__ bind(&fpu_lt);
- __ li(v0, Operand(LESS));
- __ Ret();
-
- __ bind(&fpu_gt);
- __ li(v0, Operand(GREATER));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ li(v0, Operand(LESS)); // In delay slot.
__ bind(&unordered);
}
@@ -6646,12 +6866,13 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Call the runtime system in a fresh internal frame.
ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
masm->isolate());
- __ EnterInternalFrame();
- __ Push(a1, a0);
- __ li(t0, Operand(Smi::FromInt(op_)));
- __ push(t0);
- __ CallExternalReference(miss, 3);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a0);
+ __ li(t0, Operand(Smi::FromInt(op_)));
+ __ push(t0);
+ __ CallExternalReference(miss, 3);
+ }
// Compute the entry point of the rewritten stub.
__ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -6867,6 +7088,8 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
// Registers:
// result: StringDictionary to probe
// a1: key
@@ -6960,6 +7183,269 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
+struct AheadOfTimeWriteBarrierStubList {
+ Register object, value, address;
+ RememberedSetAction action;
+};
+
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+ // Used in RegExpExecStub.
+ { s2, s0, t3, EMIT_REMEMBERED_SET },
+ { s2, a2, t3, EMIT_REMEMBERED_SET },
+ // Used in CompileArrayPushCall.
+ // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
+ // Also used in KeyedStoreIC::GenerateGeneric.
+ { a3, t0, t1, EMIT_REMEMBERED_SET },
+ // Used in CompileStoreGlobal.
+ { t0, a1, a2, OMIT_REMEMBERED_SET },
+ // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
+ { a1, a2, a3, EMIT_REMEMBERED_SET },
+ { a3, a2, a1, EMIT_REMEMBERED_SET },
+ // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+ { a2, a1, a3, EMIT_REMEMBERED_SET },
+ { a3, a1, a2, EMIT_REMEMBERED_SET },
+ // KeyedStoreStubCompiler::GenerateStoreFastElement.
+ { t0, a2, a3, EMIT_REMEMBERED_SET },
+ // Null termination.
+ { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+};
+
+
+bool RecordWriteStub::IsPregenerated() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ if (object_.is(entry->object) &&
+ value_.is(entry->value) &&
+ address_.is(entry->address) &&
+ remembered_set_action_ == entry->action &&
+ save_fp_regs_mode_ == kDontSaveFPRegs) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool StoreBufferOverflowStub::IsPregenerated() {
+ return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+ StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+ stub1.GetCode()->set_is_pregenerated(true);
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ RecordWriteStub stub(entry->object,
+ entry->value,
+ entry->address,
+ entry->action,
+ kDontSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_. A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed. The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // The first two branch+nop instructions are generated with labels so as to
+ // get the offset fixed up correctly by the bind(Label*) call. We patch it
+ // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
+ // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
+ // incremental heap marking.
+ // See RecordWriteStub::Patch for details.
+ __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
+ __ nop();
+ __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
+ __ nop();
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ }
+ __ Ret();
+
+ __ bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+ // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+ // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+
+ PatchBranchIntoNop(masm, 0);
+ PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
+ __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
+ regs_.scratch0(),
+ &dont_need_remembered_set);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch0(),
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ ne,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ int argument_count = 3;
+ __ PrepareCallCFunction(argument_count, regs_.scratch0());
+ Register address =
+ a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+ ASSERT(!address.is(regs_.object()));
+ ASSERT(!address.is(a0));
+ __ Move(address, regs_.address());
+ __ Move(a0, regs_.object());
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ Move(a1, address);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ lw(a1, MemOperand(address, 0));
+ }
+ __ li(a2, Operand(ExternalReference::isolate_address()));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ CallCFunction(
+ ExternalReference::incremental_evacuation_record_write_function(
+ masm->isolate()),
+ argument_count);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
+ }
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label on_black;
+ Label need_incremental;
+ Label need_incremental_pop_scratch;
+
+ // Let's look at the color of the object: If it is not black we don't have
+ // to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&on_black);
+
+ // Get the value from the slot.
+ __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlag(regs_.scratch0(), // Contains value.
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateMask,
+ eq,
+ &ensure_not_white);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ eq,
+ &need_incremental);
+
+ __ bind(&ensure_not_white);
+ }
+
+ // We need extra registers for this, so we push the object and the address
+ // register temporarily.
+ __ Push(regs_.object(), regs_.address());
+ __ EnsureNotWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ __ bind(&need_incremental);
+
+ // Fall through when we need to inform the incremental marker.
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index aa224bcfa..ef6b88908 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -59,6 +59,25 @@ class TranscendentalCacheStub: public CodeStub {
};
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+ explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+ : save_doubles_(save_fp) { }
+
+ void Generate(MacroAssembler* masm);
+
+ virtual bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ SaveFPRegsMode save_doubles_;
+
+ Major MajorKey() { return StoreBufferOverflow; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@@ -324,7 +343,15 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
: the_int_(the_int),
the_heap_number_(the_heap_number),
scratch_(scratch),
- sign_(scratch2) { }
+ sign_(scratch2) {
+ ASSERT(IntRegisterBits::is_valid(the_int_.code()));
+ ASSERT(HeapNumberRegisterBits::is_valid(the_heap_number_.code()));
+ ASSERT(ScratchRegisterBits::is_valid(scratch_.code()));
+ ASSERT(SignRegisterBits::is_valid(sign_.code()));
+ }
+
+ bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
private:
Register the_int_;
@@ -336,13 +363,15 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
class IntRegisterBits: public BitField<int, 0, 4> {};
class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
class ScratchRegisterBits: public BitField<int, 8, 4> {};
+ class SignRegisterBits: public BitField<int, 12, 4> {};
Major MajorKey() { return WriteInt32ToHeapNumber; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return IntRegisterBits::encode(the_int_.code())
| HeapNumberRegisterBits::encode(the_heap_number_.code())
- | ScratchRegisterBits::encode(scratch_.code());
+ | ScratchRegisterBits::encode(scratch_.code())
+ | SignRegisterBits::encode(sign_.code());
}
void Generate(MacroAssembler* masm);
@@ -375,6 +404,215 @@ class NumberToStringStub: public CodeStub {
};
+class RecordWriteStub: public CodeStub {
+ public:
+ RecordWriteStub(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
+ : object_(object),
+ value_(value),
+ address_(address),
+ remembered_set_action_(remembered_set_action),
+ save_fp_regs_mode_(fp_mode),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ }
+
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ virtual bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
+ const unsigned offset = masm->instr_at(pos) & kImm16Mask;
+ masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
+ (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
+ ASSERT(Assembler::IsBne(masm->instr_at(pos)));
+ }
+
+ static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
+ const unsigned offset = masm->instr_at(pos) & kImm16Mask;
+ masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
+ (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
+ ASSERT(Assembler::IsBeq(masm->instr_at(pos)));
+ }
+
+ static Mode GetMode(Code* stub) {
+ Instr first_instruction = Assembler::instr_at(stub->instruction_start());
+ Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+ 2 * Assembler::kInstrSize);
+
+ if (Assembler::IsBeq(first_instruction)) {
+ return INCREMENTAL;
+ }
+
+ ASSERT(Assembler::IsBne(first_instruction));
+
+ if (Assembler::IsBeq(second_instruction)) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(Assembler::IsBne(second_instruction));
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ static void Patch(Code* stub, Mode mode) {
+ MacroAssembler masm(NULL,
+ stub->instruction_start(),
+ stub->instruction_size());
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ PatchBranchIntoNop(&masm, 0);
+ PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, 0);
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
+ CPU::FlushICache(stub->instruction_start(), 4 * Assembler::kInstrSize);
+ }
+
+ private:
+ // This is a helper class for freeing up 3 scratch registers. The input is
+ // two registers that must be preserved and one scratch register provided by
+ // the caller.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object,
+ Register address,
+ Register scratch0)
+ : object_(object),
+ address_(address),
+ scratch0_(scratch0) {
+ ASSERT(!AreAliased(scratch0, object, address, no_reg));
+ scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+ }
+
+ void Save(MacroAssembler* masm) {
+ ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+ // We don't have to save scratch0_ because it was given to us as
+ // a scratch register.
+ masm->push(scratch1_);
+ }
+
+ void Restore(MacroAssembler* masm) {
+ masm->pop(scratch1_);
+ }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved. The scratch registers
+ // will be restored by other means so we don't bother pushing them here.
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+ masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
+ if (mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(FPU);
+ masm->MultiPushFPU(kCallerSavedFPU);
+ }
+ }
+
+ inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+ SaveFPRegsMode mode) {
+ if (mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(FPU);
+ masm->MultiPopFPU(kCallerSavedFPU);
+ }
+ masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
+ }
+
+ inline Register object() { return object_; }
+ inline Register address() { return address_; }
+ inline Register scratch0() { return scratch0_; }
+ inline Register scratch1() { return scratch1_; }
+
+ private:
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+
+ Register GetRegThatIsNotOneOf(Register r1,
+ Register r2,
+ Register r3) {
+ for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (candidate.is(r1)) continue;
+ if (candidate.is(r2)) continue;
+ if (candidate.is(r3)) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+ }
+ friend class RecordWriteStub;
+ };
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ return ObjectBits::encode(object_.code()) |
+ ValueBits::encode(value_.code()) |
+ AddressBits::encode(address_.code()) |
+ RememberedSetActionBits::encode(remembered_set_action_) |
+ SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+ }
+
+ bool MustBeInStubCache() {
+ // All stubs must be registered in the stub cache
+ // otherwise IncrementalMarker would not be able to find
+ // and patch it.
+ return true;
+ }
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ class ObjectBits: public BitField<int, 0, 5> {};
+ class ValueBits: public BitField<int, 5, 5> {};
+ class AddressBits: public BitField<int, 10, 5> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
+ class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
+
+ Register object_;
+ Register value_;
+ Register address_;
+ RememberedSetAction remembered_set_action_;
+ SaveFPRegsMode save_fp_regs_mode_;
+ Label slow_;
+ RegisterAllocation regs_;
+};
+
+
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM and MIPS.
@@ -578,6 +816,8 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0,
Register r1);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
@@ -590,7 +830,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryNegativeLookup; }
+ Major MajorKey() { return StringDictionaryLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 4400b643a..ff146dd4e 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -38,12 +38,16 @@ namespace internal {
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterInternalFrame();
+ masm->EnterFrame(StackFrame::INTERNAL);
+ ASSERT(!masm->has_frame());
+ masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveInternalFrame();
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ ASSERT(masm->has_frame());
+ masm->set_has_frame(false);
}
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index a8de9c861..b020d8057 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -71,21 +71,6 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
- // Constants related to patching of inlined load/store.
- static int GetInlinedKeyedLoadInstructionsAfterPatch() {
- // This is in correlation with the padding in MacroAssembler::Abort.
- return FLAG_debug_code ? 45 : 20;
- }
-
- static const int kInlinedKeyedStoreInstructionsAfterPatch = 13;
-
- static int GetInlinedNamedStoreInstructionsAfterPatch() {
- ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
- // Magic number 5: instruction count after patched map load:
- // li: 2 (liu & ori), Branch : 2 (bne & nop), sw : 1
- return Isolate::Current()->inlined_write_barrier_size() + 5;
- }
-
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index e323c505e..5b3ae89db 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -124,55 +124,58 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
- __ EnterInternalFrame();
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- if ((object_regs | non_object_regs) != 0) {
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ And(at, reg, 0xc0000000);
- __ Assert(eq, "Unable to encode value as smi", at, Operand(zero_reg));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ if ((object_regs | non_object_regs) != 0) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ And(at, reg, 0xc0000000);
+ __ Assert(
+ eq, "Unable to encode value as smi", at, Operand(zero_reg));
+ }
+ __ sll(reg, reg, kSmiTagSize);
}
- __ sll(reg, reg, kSmiTagSize);
}
+ __ MultiPush(object_regs | non_object_regs);
}
- __ MultiPush(object_regs | non_object_regs);
- }
#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ mov(a0, zero_reg); // No arguments.
- __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- if ((object_regs | non_object_regs) != 0) {
- __ MultiPop(object_regs | non_object_regs);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- __ srl(reg, reg, kSmiTagSize);
- }
- if (FLAG_debug_code &&
- (((object_regs |non_object_regs) & (1 << r)) == 0)) {
- __ li(reg, kDebugZapValue);
+ __ mov(a0, zero_reg); // No arguments.
+ __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub ceb(1);
+ __ CallStub(&ceb);
+
+ // Restore the register values from the expression stack.
+ if ((object_regs | non_object_regs) != 0) {
+ __ MultiPop(object_regs | non_object_regs);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ srl(reg, reg, kSmiTagSize);
+ }
+ if (FLAG_debug_code &&
+ (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+ __ li(reg, kDebugZapValue);
+ }
}
}
- }
- __ LeaveInternalFrame();
+ // Leave the internal frame.
+ }
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 18b623199..280b8cb54 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -53,7 +53,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index 2c838938b..a2ebce682 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -85,6 +85,20 @@ static const RegList kCalleeSavedFPU =
1 << 30; // f30
static const int kNumCalleeSavedFPU = 6;
+
+static const RegList kCallerSavedFPU =
+ 1 << 0 | // f0
+ 1 << 2 | // f2
+ 1 << 4 | // f4
+ 1 << 6 | // f6
+ 1 << 8 | // f8
+ 1 << 10 | // f10
+ 1 << 12 | // f12
+ 1 << 14 | // f14
+ 1 << 16 | // f16
+ 1 << 18; // f18
+
+
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
static const int kNumSafepointRegisters = 24;
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index b042a3eca..b3f054087 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -47,6 +47,7 @@
#include "stub-cache.h"
#include "mips/code-stubs-mips.h"
+#include "mips/macro-assembler-mips.h"
namespace v8 {
namespace internal {
@@ -62,9 +63,11 @@ static unsigned GetPropertyId(Property* property) {
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
// method EmitPatchInfo to record a marker back to the patchable code. This
-// marker is a andi at, rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16
-// bit immediate value is used) is the delta from the pc to the first
+// marker is a andi zero_reg, rx, #yyyy instruction, and rx * 0x0000ffff + yyyy
+// (raw 16 bit immediate value is used) is the delta from the pc to the first
// instruction of the patchable code.
+// The marker instruction is effectively a NOP (dest is zero_reg) and will
+// never be emitted by normal code.
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -103,7 +106,7 @@ class JumpPatchSite BASE_EMBEDDED {
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
- __ andi(at, reg, delta_to_patch_site % kImm16Mask);
+ __ andi(zero_reg, reg, delta_to_patch_site % kImm16Mask);
#ifdef DEBUG
info_emitted_ = true;
#endif
@@ -162,6 +165,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ bind(&ok);
}
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
int locals_count = info->scope()->num_stack_slots();
__ Push(ra, fp, cp, a1);
@@ -207,14 +215,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Load parameter from stack.
__ lw(a0, MemOperand(fp, parameter_offset));
// Store it in the context.
- __ li(a1, Operand(Context::SlotOffset(var->index())));
- __ addu(a2, cp, a1);
- __ sw(a0, MemOperand(a2, 0));
- // Update the write barrier. This clobbers all involved
- // registers, so we have to use two more registers to avoid
- // clobbering cp.
- __ mov(a2, cp);
- __ RecordWrite(a2, a1, a3);
+ MemOperand target = ContextOperand(cp, var->index());
+ __ sw(a0, target);
+
+ // Update the write barrier.
+ __ RecordWriteContextSlot(
+ cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
}
}
}
@@ -272,7 +278,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
- EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+ EmitDeclaration(scope()->function(), CONST, NULL, &ignored);
}
VisitDeclarations(scope()->declarations());
}
@@ -310,17 +316,25 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+ // The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
+ // to make sure it is constant. Branch may emit a skip-or-jump sequence
+ // instead of the normal Branch. It seems that the "skip" part of that
+ // sequence is about as long as this Branch would be so it is safe to ignore
+ // that.
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Comment cmnt(masm_, "[ Stack check");
Label ok;
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(t0));
+ __ sltu(at, sp, t0);
+ __ beq(at, zero_reg, &ok);
+ // CallStub will emit a li t9, ... first, so it is safe to use the delay slot.
StackCheckStub stub;
+ __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
- __ CallStub(&stub);
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
@@ -670,10 +684,12 @@ void FullCodeGenerator::SetVar(Variable* var,
__ sw(src, location);
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
- __ RecordWrite(scratch0,
- Operand(Context::SlotOffset(var->index())),
- scratch1,
- src);
+ __ RecordWriteContextSlot(scratch0,
+ location.offset(),
+ src,
+ scratch1,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs);
}
}
@@ -705,7 +721,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
- Variable::Mode mode,
+ VariableMode mode,
FunctionLiteral* function,
int* global_count) {
// If it was not possible to allocate the variable at compile time, we
@@ -723,7 +739,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ sw(result_register(), StackOperand(variable));
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (mode == CONST || mode == LET) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
__ sw(t0, StackOperand(variable));
@@ -750,10 +766,16 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ sw(result_register(), ContextOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
- __ mov(a1, cp);
- __ RecordWrite(a1, Operand(offset), a2, result_register());
+ __ RecordWriteContextSlot(cp,
+ offset,
+ result_register(),
+ a2,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (mode == CONST || mode == LET) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ sw(at, ContextOperand(cp, variable->index()));
@@ -766,10 +788,8 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
__ li(a2, Operand(variable->name()));
// Declaration nodes are always introduced in one of three modes.
- ASSERT(mode == Variable::VAR ||
- mode == Variable::CONST ||
- mode == Variable::LET);
- PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+ ASSERT(mode == VAR || mode == CONST || mode == LET);
+ PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE;
__ li(a1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -779,7 +799,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ Push(cp, a2, a1);
// Push initial value for function declaration.
VisitForStackValue(function);
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (mode == CONST || mode == LET) {
__ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
__ Push(cp, a2, a1, a0);
} else {
@@ -1201,17 +1221,25 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
- if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+ if (var->mode() == DYNAMIC_GLOBAL) {
EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
__ Branch(done);
- } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+ } else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == Variable::CONST) {
+ if (local->mode() == CONST ||
+ local->mode() == LET) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
+ if (local->mode() == CONST) {
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ __ movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
+ } else { // LET
+ __ Branch(done, ne, at, Operand(zero_reg));
+ __ li(a0, Operand(var->name()));
+ __ push(a0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
}
__ Branch(done);
}
@@ -1244,14 +1272,14 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
- if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
+ if (var->mode() != LET && var->mode() != CONST) {
context()->Plug(var);
} else {
// Let and const need a read barrier.
GetVar(v0, var);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (var->mode() == Variable::LET) {
+ if (var->mode() == LET) {
Label done;
__ Branch(&done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
@@ -1491,14 +1519,23 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
// Store the subexpression value in the array's elements.
- __ lw(a1, MemOperand(sp)); // Copy of array literal.
- __ lw(a1, FieldMemOperand(a1, JSObject::kElementsOffset));
+ __ lw(t6, MemOperand(sp)); // Copy of array literal.
+ __ lw(a1, FieldMemOperand(t6, JSObject::kElementsOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ sw(result_register(), FieldMemOperand(a1, offset));
+ Label no_map_change;
+ __ JumpIfSmi(result_register(), &no_map_change);
// Update the write barrier for the array store with v0 as the scratch
// register.
- __ RecordWrite(a1, Operand(offset), a2, result_register());
+ __ RecordWriteField(
+ a1, offset, result_register(), a2, kRAHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ CheckFastSmiOnlyElements(a3, a2, &no_map_change);
+ __ push(t6); // Copy of array literal.
+ __ CallRuntime(Runtime::kNonSmiElementStored, 1);
+ __ bind(&no_map_change);
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@@ -1850,7 +1887,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
- } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
__ push(v0); // Value.
@@ -1875,11 +1912,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// RecordWrite may destroy all its register arguments.
__ mov(a3, result_register());
int offset = Context::SlotOffset(var->index());
- __ RecordWrite(a1, Operand(offset), a2, a3);
+ __ RecordWriteContextSlot(
+ a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
}
}
- } else if (var->mode() != Variable::CONST) {
+ } else if (var->mode() != CONST) {
// Assignment to var or initializing assignment to let.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, a1);
@@ -1893,7 +1931,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ sw(v0, location);
if (var->IsContextSlot()) {
__ mov(a3, v0);
- __ RecordWrite(a1, Operand(Context::SlotOffset(var->index())), a2, a3);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
}
} else {
ASSERT(var->IsLookupSlot());
@@ -2121,10 +2161,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
__ push(a1);
// Push the strict mode flag. In harmony mode every eval call
// is a strict mode eval call.
- StrictModeFlag strict_mode = strict_mode_flag();
- if (FLAG_harmony_block_scoping) {
- strict_mode = kStrictMode;
- }
+ StrictModeFlag strict_mode =
+ FLAG_harmony_scoping ? kStrictMode : strict_mode_flag();
__ li(a1, Operand(Smi::FromInt(strict_mode)));
__ push(a1);
@@ -2170,7 +2208,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// context lookup in the runtime system.
Label done;
Variable* var = proxy->var();
- if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
+ if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) {
Label slow;
EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
// Push the function and resolve eval.
@@ -2671,18 +2709,23 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ GetObjectType(v0, v0, a1); // Map is now in v0.
__ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
- // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
- // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
- // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
- __ Branch(&function, ge, a1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ Branch(&function, eq, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
- // Check if the constructor in the map is a function.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ __ Branch(&function, eq, a1, Operand(LAST_SPEC_OBJECT_TYPE));
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+ // Check if the constructor in the map is a JS function.
__ lw(v0, FieldMemOperand(v0, Map::kConstructorOffset));
__ GetObjectType(v0, a1, a1);
__ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE));
@@ -2861,7 +2904,9 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset));
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
- __ RecordWrite(a1, Operand(JSValue::kValueOffset - kHeapObjectTag), a2, a3);
+ __ mov(a2, v0);
+ __ RecordWriteField(
+ a1, JSValue::kValueOffset, a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
__ bind(&done);
context()->Plug(v0);
@@ -3154,16 +3199,31 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ sw(scratch1, MemOperand(index2, 0));
__ sw(scratch2, MemOperand(index1, 0));
- Label new_space;
- __ InNewSpace(elements, scratch1, eq, &new_space);
+ Label no_remembered_set;
+ __ CheckPageFlag(elements,
+ scratch1,
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ ne,
+ &no_remembered_set);
// Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask).
- __ mov(scratch1, elements);
- __ RecordWriteHelper(elements, index1, scratch2);
- __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements.
+ // We are swapping two objects in an array and the incremental marker never
+ // pauses in the middle of scanning a single object. Therefore the
+ // incremental marker is not disturbed, so we don't need to call the
+ // RecordWrite stub that notifies the incremental marker.
+ __ RememberedSetHelper(elements,
+ index1,
+ scratch2,
+ kDontSaveFPRegs,
+ MacroAssembler::kFallThroughAtEnd);
+ __ RememberedSetHelper(elements,
+ index2,
+ scratch2,
+ kDontSaveFPRegs,
+ MacroAssembler::kFallThroughAtEnd);
- __ bind(&new_space);
+ __ bind(&no_remembered_set);
// We are done. Drop elements from the stack, and return undefined.
__ Drop(3);
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
@@ -3921,10 +3981,14 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
}
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Handle<String> check,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
+ Handle<String> check) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
{ AccumulatorValueContext context(this);
VisitForTypeofValue(expr);
}
@@ -3964,10 +4028,11 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(v0, if_false);
- __ GetObjectType(v0, a1, v0); // Leave map in a1.
- Split(ge, v0, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE),
- if_true, if_false, fall_through);
-
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ GetObjectType(v0, v0, a1);
+ __ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE));
+ Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE),
+ if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(v0, if_false);
if (!FLAG_harmony_typeof) {
@@ -3986,18 +4051,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else {
if (if_false != fall_through) __ jmp(if_false);
}
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- VisitForAccumulatorValue(expr);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+ context()->Plug(if_true, if_false);
}
@@ -4005,9 +4059,12 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position());
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr)) return;
+
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
-
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4015,13 +4072,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
- context()->Plug(if_true, if_false);
- return;
- }
-
Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
@@ -4046,11 +4096,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cc = eq;
- bool strict = false;
switch (op) {
case Token::EQ_STRICT:
- strict = true;
- // Fall through.
case Token::EQ:
cc = eq;
__ mov(a0, result_register());
@@ -4109,8 +4156,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
- Comment cmnt(masm_, "[ CompareToNull");
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4118,15 +4166,21 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- VisitForAccumulatorValue(expr->expression());
+ VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
__ mov(a0, result_register());
- __ LoadRoot(a1, Heap::kNullValueRootIndex);
- if (expr->is_strict()) {
+ __ LoadRoot(a1, nil_value);
+ if (expr->op() == Token::EQ_STRICT) {
Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
} else {
+ Heap::RootListIndex other_nil_value = nil == kNullValue ?
+ Heap::kUndefinedValueRootIndex :
+ Heap::kNullValueRootIndex;
__ Branch(if_true, eq, a0, Operand(a1));
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(a1, other_nil_value);
__ Branch(if_true, eq, a0, Operand(a1));
__ And(at, a0, Operand(kSmiTagMask));
__ Branch(if_false, eq, at, Operand(zero_reg));
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index a76c215a4..fb33eb665 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -210,7 +210,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update the write barrier. Make sure not to clobber the value.
__ mov(scratch1, value);
- __ RecordWrite(elements, scratch2, scratch1);
+ __ RecordWrite(
+ elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs);
}
@@ -504,21 +505,22 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Get the receiver of the function from the stack.
__ lw(a3, MemOperand(sp, argc*kPointerSize));
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the receiver and the name of the function.
- __ Push(a3, a2);
+ // Push the receiver and the name of the function.
+ __ Push(a3, a2);
- // Call the entry.
- __ li(a0, Operand(2));
- __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
+ // Call the entry.
+ __ li(a0, Operand(2));
+ __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
- CEntryStub stub(1);
- __ CallStub(&stub);
+ CEntryStub stub(1);
+ __ CallStub(&stub);
- // Move result to a1 and leave the internal frame.
- __ mov(a1, v0);
- __ LeaveInternalFrame();
+ // Move result to a1 and leave the internal frame.
+ __ mov(a1, v0);
+ }
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
@@ -649,12 +651,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
- __ EnterInternalFrame();
- __ push(a2); // Save the key.
- __ Push(a1, a2); // Pass the receiver and the key.
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(a2); // Restore the key.
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(a2); // Save the key.
+ __ Push(a1, a2); // Pass the receiver and the key.
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(a2); // Restore the key.
+ }
__ mov(a1, v0);
__ jmp(&do_call);
@@ -902,9 +905,9 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
MemOperand mapped_location =
GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, &notin, &slow);
__ sw(a0, mapped_location);
- // Verify mapped_location MemOperand is register, with no offset.
- ASSERT_EQ(mapped_location.offset(), 0);
- __ RecordWrite(a3, mapped_location.rm(), t5);
+ __ Addu(t2, a3, t1);
+ __ mov(t5, a0);
+ __ RecordWrite(a3, t2, t5, kRAHasNotBeenSaved, kDontSaveFPRegs);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // (In delay slot) return the value stored in v0.
__ bind(&notin);
@@ -912,8 +915,9 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
MemOperand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
__ sw(a0, unmapped_location);
- ASSERT_EQ(unmapped_location.offset(), 0);
- __ RecordWrite(a3, unmapped_location.rm(), t5);
+ __ Addu(t2, a3, t0);
+ __ mov(t5, a0);
+ __ RecordWrite(a3, t2, t5, kRAHasNotBeenSaved, kDontSaveFPRegs);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // (In delay slot) return the value stored in v0.
__ bind(&slow);
@@ -1201,109 +1205,144 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- a2 : receiver
// -- ra : return address
// -----------------------------------
-
- Label slow, fast, array, extra, exit;
+ Label slow, array, extra, check_if_double_array;
+ Label fast_object_with_map_check, fast_object_without_map_check;
+ Label fast_double_with_map_check, fast_double_without_map_check;
// Register usage.
Register value = a0;
Register key = a1;
Register receiver = a2;
Register elements = a3; // Elements array of the receiver.
- // t0 is used as ip in the arm version.
- // t3-t4 are used as temporaries.
+ Register elements_map = t2;
+ Register receiver_map = t3;
+ // t0 and t1 are used as general scratch registers.
// Check that the key is a smi.
__ JumpIfNotSmi(key, &slow);
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, &slow);
-
// Get the map of the object.
- __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ lbu(t0, FieldMemOperand(t3, Map::kBitFieldOffset));
+ __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
__ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
__ Branch(&slow, ne, t0, Operand(zero_reg));
// Check if the object is a JS array or not.
- __ lbu(t3, FieldMemOperand(t3, Map::kInstanceTypeOffset));
-
- __ Branch(&array, eq, t3, Operand(JS_ARRAY_TYPE));
+ __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
+ __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
// Check that the object is some kind of JSObject.
- __ Branch(&slow, lt, t3, Operand(FIRST_JS_RECEIVER_TYPE));
- __ Branch(&slow, eq, t3, Operand(JS_PROXY_TYPE));
- __ Branch(&slow, eq, t3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
// Object case: Check key against length in the elements array.
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check that the object is in fast mode and writable.
- __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
- __ Branch(&slow, ne, t3, Operand(t0));
// Check array bounds. Both the key and the length of FixedArray are smis.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(&fast, lo, key, Operand(t0));
- // Fall thru to slow if un-tagged index >= length.
+ __ Branch(&fast_object_with_map_check, lo, key, Operand(t0));
// Slow case, handle jump to runtime.
__ bind(&slow);
-
// Entry registers are intact.
// a0: value.
// a1: key.
// a2: receiver.
-
GenerateRuntimeSetProperty(masm, strict_mode);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length].
-
__ bind(&extra);
+ // Condition code from comparing key and array length is still available.
// Only support writing to array[array.length].
__ Branch(&slow, ne, key, Operand(t0));
// Check for room in the elements backing store.
// Both the key and the length of FixedArray are smis.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Branch(&slow, hs, key, Operand(t0));
+ __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Branch(&check_if_double_array, ne, elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
// Calculate key + 1 as smi.
- STATIC_ASSERT(0 == kSmiTag);
- __ Addu(t3, key, Operand(Smi::FromInt(1)));
- __ sw(t3, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Branch(&fast);
-
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Addu(t0, key, Operand(Smi::FromInt(1)));
+ __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Branch(&fast_object_without_map_check);
+
+ __ bind(&check_if_double_array);
+ __ Branch(&slow, ne, elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ // Add 1 to key, and go to common element store code for doubles.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Addu(t0, key, Operand(Smi::FromInt(1)));
+ __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ jmp(&fast_double_without_map_check);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
// is the length is always a smi.
-
__ bind(&array);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
- __ Branch(&slow, ne, t3, Operand(t0));
// Check the key against the length in the array.
__ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Branch(&extra, hs, key, Operand(t0));
// Fall through to fast case.
- __ bind(&fast);
- // Fast case, store the value to the elements backing store.
- __ Addu(t4, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(t1, key, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t4, t4, Operand(t1));
- __ sw(value, MemOperand(t4));
- // Skip write barrier if the written value is a smi.
- __ JumpIfSmi(value, &exit);
-
+ __ bind(&fast_object_with_map_check);
+ Register scratch_value = t0;
+ Register address = t1;
+ __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ Branch(&fast_double_with_map_check, ne, elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ bind(&fast_object_without_map_check);
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(value, &non_smi_value);
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(address, address, scratch_value);
+ __ sw(value, MemOperand(address));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, value);
+
+ __ bind(&non_smi_value);
+ // Escape to slow case when writing non-smi into smi-only array.
+ __ CheckFastObjectElements(receiver_map, scratch_value, &slow);
+ // Fast elements array, store the value to the elements backing store.
+ __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(address, address, scratch_value);
+ __ sw(value, MemOperand(address));
// Update write barrier for the elements array address.
- __ Subu(t3, t4, Operand(elements));
-
- __ RecordWrite(elements, Operand(t3), t4, t5);
- __ bind(&exit);
-
- __ mov(v0, a0); // Return the value written.
+ __ mov(v0, value); // Preserve the value which is returned.
+ __ RecordWrite(elements,
+ address,
+ value,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
__ Ret();
+
+ __ bind(&fast_double_with_map_check);
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ Branch(&slow, ne, elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value,
+ key,
+ receiver,
+ elements,
+ t0,
+ t1,
+ t2,
+ t3,
+ &slow);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, value);
}
@@ -1572,7 +1611,8 @@ void PatchInlinedSmiCode(Address address) {
// If the instruction following the call is not a andi at, rx, #yyy, nothing
// was inlined.
Instr instr = Assembler::instr_at(andi_instruction_address);
- if (!Assembler::IsAndImmediate(instr)) {
+ if (!(Assembler::IsAndImmediate(instr) &&
+ Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) {
return;
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 4c48ef183..2964fbc86 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -42,7 +42,8 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true) {
+ allow_stub_calls_(true),
+ has_frame_(false) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
@@ -80,46 +81,15 @@ void MacroAssembler::StoreRoot(Register source,
}
-void MacroAssembler::RecordWriteHelper(Register object,
- Register address,
- Register scratch) {
- if (emit_debug_code()) {
- // Check that the object is not in new space.
- Label not_in_new_space;
- InNewSpace(object, scratch, ne, &not_in_new_space);
- Abort("new-space object passed to RecordWriteHelper");
- bind(&not_in_new_space);
- }
-
- // Calculate page address: Clear bits from 0 to kPageSizeBits.
- if (mips32r2) {
- Ins(object, zero_reg, 0, kPageSizeBits);
- } else {
- // The Ins macro is slow on r1, so use shifts instead.
- srl(object, object, kPageSizeBits);
- sll(object, object, kPageSizeBits);
- }
-
- // Calculate region number.
- Ext(address, address, Page::kRegionSizeLog2,
- kPageSizeBits - Page::kRegionSizeLog2);
-
- // Mark region dirty.
- lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
- li(at, Operand(1));
- sllv(at, at, address);
- or_(scratch, scratch, at);
- sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
-}
-
-
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
ASSERT(num_unsaved >= 0);
- Subu(sp, sp, Operand(num_unsaved * kPointerSize));
+ if (num_unsaved > 0) {
+ Subu(sp, sp, Operand(num_unsaved * kPointerSize));
+ }
MultiPush(kSafepointSavedRegisters);
}
@@ -127,7 +97,9 @@ void MacroAssembler::PushSafepointRegisters() {
void MacroAssembler::PopSafepointRegisters() {
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
MultiPop(kSafepointSavedRegisters);
- Addu(sp, sp, Operand(num_unsaved * kPointerSize));
+ if (num_unsaved > 0) {
+ Addu(sp, sp, Operand(num_unsaved * kPointerSize));
+ }
}
@@ -180,6 +152,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+ UNIMPLEMENTED_MIPS();
// General purpose registers are pushed last on the stack.
int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
@@ -187,8 +160,6 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
}
-
-
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
@@ -200,38 +171,53 @@ void MacroAssembler::InNewSpace(Register object,
}
-// Will clobber 4 registers: object, scratch0, scratch1, at. The
-// register 'object' contains a heap object pointer. The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Operand offset,
- Register scratch0,
- Register scratch1) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
-
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register dst,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ ASSERT(!AreAliased(value, dst, t8, object));
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
Label done;
- // First, test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- InNewSpace(object, scratch0, eq, &done);
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
- // Add offset into the object.
- Addu(scratch0, object, offset);
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
- // Record the actual write.
- RecordWriteHelper(object, scratch0, scratch1);
+ Addu(dst, object, Operand(offset - kHeapObjectTag));
+ if (emit_debug_code()) {
+ Label ok;
+ And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
+ Branch(&ok, eq, t8, Operand(zero_reg));
+ stop("Unaligned cell in write barrier");
+ bind(&ok);
+ }
+
+ RecordWrite(object,
+ dst,
+ value,
+ ra_status,
+ save_fp,
+ remembered_set_action,
+ OMIT_SMI_CHECK);
bind(&done);
- // Clobber all input registers when running with the debug-code flag
+ // Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(object, Operand(BitCast<int32_t>(kZapValue)));
- li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
- li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+ li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
+ li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
}
}
@@ -241,29 +227,97 @@ void MacroAssembler::RecordWrite(Register object,
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object,
Register address,
- Register scratch) {
+ Register value,
+ RAStatus ra_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ ASSERT(!AreAliased(object, address, value, t8));
+ ASSERT(!AreAliased(object, address, value, t9));
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
- ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+ ASSERT(!address.is(cp) && !value.is(cp));
Label done;
- // First, test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- InNewSpace(object, scratch, eq, &done);
+ if (smi_check == INLINE_SMI_CHECK) {
+ ASSERT_EQ(0, kSmiTag);
+ And(t8, value, Operand(kSmiTagMask));
+ Branch(&done, eq, t8, Operand(zero_reg));
+ }
+
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+ CheckPageFlag(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ eq,
+ &done);
// Record the actual write.
- RecordWriteHelper(object, address, scratch);
+ if (ra_status == kRAHasNotBeenSaved) {
+ push(ra);
+ }
+ RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ CallStub(&stub);
+ if (ra_status == kRAHasNotBeenSaved) {
+ pop(ra);
+ }
bind(&done);
- // Clobber all input registers when running with the debug-code flag
+ // Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- li(object, Operand(BitCast<int32_t>(kZapValue)));
- li(address, Operand(BitCast<int32_t>(kZapValue)));
- li(scratch, Operand(BitCast<int32_t>(kZapValue)));
+ li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
+ li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+ }
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
+ Register address,
+ Register scratch,
+ SaveFPRegsMode fp_mode,
+ RememberedSetFinalAction and_then) {
+ Label done;
+ if (FLAG_debug_code) {
+ Label ok;
+ JumpIfNotInNewSpace(object, scratch, &ok);
+ stop("Remembered set pointer is in new space");
+ bind(&ok);
+ }
+ // Load store buffer top.
+ ExternalReference store_buffer =
+ ExternalReference::store_buffer_top(isolate());
+ li(t8, Operand(store_buffer));
+ lw(scratch, MemOperand(t8));
+ // Store pointer to buffer and increment buffer top.
+ sw(address, MemOperand(scratch));
+ Addu(scratch, scratch, kPointerSize);
+ // Write back new top of buffer.
+ sw(scratch, MemOperand(t8));
+ // Call stub on end of buffer.
+ // Check for end of buffer.
+ And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+ if (and_then == kFallThroughAtEnd) {
+ Branch(&done, eq, t8, Operand(zero_reg));
+ } else {
+ ASSERT(and_then == kReturnAtEnd);
+ Ret(eq, t8, Operand(zero_reg));
+ }
+ push(ra);
+ StoreBufferOverflowStub store_buffer_overflow =
+ StoreBufferOverflowStub(fp_mode);
+ CallStub(&store_buffer_overflow);
+ pop(ra);
+ bind(&done);
+ if (and_then == kReturnAtEnd) {
+ Ret();
}
}
@@ -707,7 +761,7 @@ void MacroAssembler::MultiPush(RegList regs) {
int16_t stack_offset = num_to_push * kPointerSize;
Subu(sp, sp, Operand(stack_offset));
- for (int16_t i = kNumRegisters; i > 0; i--) {
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kPointerSize;
sw(ToRegister(i), MemOperand(sp, stack_offset));
@@ -746,7 +800,7 @@ void MacroAssembler::MultiPop(RegList regs) {
void MacroAssembler::MultiPopReversed(RegList regs) {
int16_t stack_offset = 0;
- for (int16_t i = kNumRegisters; i > 0; i--) {
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
lw(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
@@ -762,7 +816,7 @@ void MacroAssembler::MultiPushFPU(RegList regs) {
int16_t stack_offset = num_to_push * kDoubleSize;
Subu(sp, sp, Operand(stack_offset));
- for (int16_t i = kNumRegisters; i > 0; i--) {
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
@@ -804,7 +858,7 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
CpuFeatures::Scope scope(FPU);
int16_t stack_offset = 0;
- for (int16_t i = kNumRegisters; i > 0; i--) {
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
@@ -814,6 +868,21 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
}
+void MacroAssembler::FlushICache(Register address, unsigned instructions) {
+ RegList saved_regs = kJSCallerSaved | ra.bit();
+ MultiPush(saved_regs);
+ AllowExternalCallThatCantCauseGC scope(this);
+
+ // Save to a0 in case address == t0.
+ Move(a0, address);
+ PrepareCallCFunction(2, t0);
+
+ li(a1, instructions * kInstrSize);
+ CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
+ MultiPop(saved_regs);
+}
+
+
void MacroAssembler::Ext(Register rt,
Register rs,
uint16_t pos,
@@ -940,11 +1009,9 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
mtc1(at, FPURegister::from_code(scratch.code() + 1));
mtc1(zero_reg, scratch);
// Test if scratch > fd.
- c(OLT, D, fd, scratch);
-
- Label simple_convert;
// If fd < 2^31 we can convert it normally.
- bc1t(&simple_convert);
+ Label simple_convert;
+ BranchF(&simple_convert, NULL, lt, fd, scratch);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
@@ -964,6 +1031,102 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
}
+void MacroAssembler::BranchF(Label* target,
+ Label* nan,
+ Condition cc,
+ FPURegister cmp1,
+ FPURegister cmp2,
+ BranchDelaySlot bd) {
+ if (cc == al) {
+ Branch(bd, target);
+ return;
+ }
+
+ ASSERT(nan || target);
+ // Check for unordered (NaN) cases.
+ if (nan) {
+ c(UN, D, cmp1, cmp2);
+ bc1t(nan);
+ }
+
+ if (target) {
+ // Here NaN cases were either handled by this function or are assumed to
+ // have been handled by the caller.
+ // Unsigned conditions are treated as their signed counterpart.
+ switch (cc) {
+ case Uless:
+ case less:
+ c(OLT, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case Ugreater:
+ case greater:
+ c(ULE, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case Ugreater_equal:
+ case greater_equal:
+ c(ULT, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ case Uless_equal:
+ case less_equal:
+ c(OLE, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case eq:
+ c(EQ, D, cmp1, cmp2);
+ bc1t(target);
+ break;
+ case ne:
+ c(EQ, D, cmp1, cmp2);
+ bc1f(target);
+ break;
+ default:
+ CHECK(0);
+ };
+ }
+
+ if (bd == PROTECT) {
+ nop();
+ }
+}
+
+
+void MacroAssembler::Move(FPURegister dst, double imm) {
+ ASSERT(CpuFeatures::IsEnabled(FPU));
+ static const DoubleRepresentation minus_zero(-0.0);
+ static const DoubleRepresentation zero(0.0);
+ DoubleRepresentation value(imm);
+ // Handle special values first.
+ bool force_load = dst.is(kDoubleRegZero);
+ if (value.bits == zero.bits && !force_load) {
+ mov_d(dst, kDoubleRegZero);
+ } else if (value.bits == minus_zero.bits && !force_load) {
+ neg_d(dst, kDoubleRegZero);
+ } else {
+ uint32_t lo, hi;
+ DoubleAsTwoUInt32(imm, &lo, &hi);
+ // Move the low part of the double into the lower of the corresponding FPU
+ // register of FPU register pair.
+ if (lo != 0) {
+ li(at, Operand(lo));
+ mtc1(at, dst);
+ } else {
+ mtc1(zero_reg, dst);
+ }
+ // Move the high part of the double into the higher of the corresponding FPU
+ // register of FPU register pair.
+ if (hi != 0) {
+ li(at, Operand(hi));
+ mtc1(at, dst.high());
+ } else {
+ mtc1(zero_reg, dst.high());
+ }
+ }
+}
+
+
// Tries to get a signed int32 out of a double precision floating point heap
// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
// 32bits signed integer range.
@@ -1062,6 +1225,53 @@ void MacroAssembler::ConvertToInt32(Register source,
}
+void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
+ FPURegister result,
+ DoubleRegister double_input,
+ Register scratch1,
+ Register except_flag,
+ CheckForInexactConversion check_inexact) {
+ ASSERT(CpuFeatures::IsSupported(FPU));
+ CpuFeatures::Scope scope(FPU);
+
+ int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
+
+ if (check_inexact == kDontCheckForInexactConversion) {
+ // Ingore inexact exceptions.
+ except_mask &= ~kFCSRInexactFlagMask;
+ }
+
+ // Save FCSR.
+ cfc1(scratch1, FCSR);
+ // Disable FPU exceptions.
+ ctc1(zero_reg, FCSR);
+
+ // Do operation based on rounding mode.
+ switch (rounding_mode) {
+ case kRoundToNearest:
+ round_w_d(result, double_input);
+ break;
+ case kRoundToZero:
+ trunc_w_d(result, double_input);
+ break;
+ case kRoundToPlusInf:
+ ceil_w_d(result, double_input);
+ break;
+ case kRoundToMinusInf:
+ floor_w_d(result, double_input);
+ break;
+ } // End of switch-statement.
+
+ // Retrieve FCSR.
+ cfc1(except_flag, FCSR);
+ // Restore FCSR.
+ ctc1(scratch1, FCSR);
+
+ // Check for fpu exceptions.
+ And(except_flag, except_flag, Operand(except_mask));
+}
+
+
void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
Register input_high,
Register input_low,
@@ -1148,22 +1358,21 @@ void MacroAssembler::EmitECMATruncate(Register result,
FPURegister double_input,
FPURegister single_scratch,
Register scratch,
- Register input_high,
- Register input_low) {
+ Register scratch2,
+ Register scratch3) {
CpuFeatures::Scope scope(FPU);
- ASSERT(!input_high.is(result));
- ASSERT(!input_low.is(result));
- ASSERT(!input_low.is(input_high));
+ ASSERT(!scratch2.is(result));
+ ASSERT(!scratch3.is(result));
+ ASSERT(!scratch3.is(scratch2));
ASSERT(!scratch.is(result) &&
- !scratch.is(input_high) &&
- !scratch.is(input_low));
+ !scratch.is(scratch2) &&
+ !scratch.is(scratch3));
ASSERT(!single_scratch.is(double_input));
Label done;
Label manual;
// Clear cumulative exception flags and save the FCSR.
- Register scratch2 = input_high;
cfc1(scratch2, FCSR);
ctc1(zero_reg, FCSR);
// Try a conversion to a signed integer.
@@ -1180,6 +1389,8 @@ void MacroAssembler::EmitECMATruncate(Register result,
Branch(&done, eq, scratch, Operand(zero_reg));
// Load the double value and perform a manual truncation.
+ Register input_high = scratch2;
+ Register input_low = scratch3;
Move(input_low, input_high, double_input);
EmitOutOfInt32RangeTruncate(result,
input_high,
@@ -1211,15 +1422,6 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-bool MacroAssembler::UseAbsoluteCodePointers() {
- if (is_trampoline_emitted()) {
- return true;
- } else {
- return false;
- }
-}
-
-
void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
BranchShort(offset, bdslot);
}
@@ -1233,11 +1435,18 @@ void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
- bool is_label_near = is_near(L);
- if (UseAbsoluteCodePointers() && !is_label_near) {
- Jr(L, bdslot);
+ if (L->is_bound()) {
+ if (is_near(L)) {
+ BranchShort(L, bdslot);
+ } else {
+ Jr(L, bdslot);
+ }
} else {
- BranchShort(L, bdslot);
+ if (is_trampoline_emitted()) {
+ Jr(L, bdslot);
+ } else {
+ BranchShort(L, bdslot);
+ }
}
}
@@ -1245,15 +1454,26 @@ void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
- bool is_label_near = is_near(L);
- if (UseAbsoluteCodePointers() && !is_label_near) {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jr(L, bdslot);
- bind(&skip);
+ if (L->is_bound()) {
+ if (is_near(L)) {
+ BranchShort(L, cond, rs, rt, bdslot);
+ } else {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ }
} else {
- BranchShort(L, cond, rs, rt, bdslot);
+ if (is_trampoline_emitted()) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ } else {
+ BranchShort(L, cond, rs, rt, bdslot);
+ }
}
}
@@ -1276,8 +1496,8 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
Register scratch = at;
if (rt.is_reg()) {
- // We don't want any other register but scratch clobbered.
- ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
+ // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
+ // rt.
r2 = rt.rm_;
switch (cond) {
case cc_always:
@@ -1779,11 +1999,18 @@ void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
- bool is_label_near = is_near(L);
- if (UseAbsoluteCodePointers() && !is_label_near) {
- Jalr(L, bdslot);
+ if (L->is_bound()) {
+ if (is_near(L)) {
+ BranchAndLinkShort(L, bdslot);
+ } else {
+ Jalr(L, bdslot);
+ }
} else {
- BranchAndLinkShort(L, bdslot);
+ if (is_trampoline_emitted()) {
+ Jalr(L, bdslot);
+ } else {
+ BranchAndLinkShort(L, bdslot);
+ }
}
}
@@ -1791,15 +2018,26 @@ void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
- bool is_label_near = is_near(L);
- if (UseAbsoluteCodePointers() && !is_label_near) {
- Label skip;
- Condition neg_cond = NegateCondition(cond);
- BranchShort(&skip, neg_cond, rs, rt);
- Jalr(L, bdslot);
- bind(&skip);
+ if (L->is_bound()) {
+ if (is_near(L)) {
+ BranchAndLinkShort(L, cond, rs, rt, bdslot);
+ } else {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jalr(L, bdslot);
+ bind(&skip);
+ }
} else {
- BranchAndLinkShort(L, cond, rs, rt, bdslot);
+ if (is_trampoline_emitted()) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jalr(L, bdslot);
+ bind(&skip);
+ } else {
+ BranchAndLinkShort(L, cond, rs, rt, bdslot);
+ }
}
}
@@ -2306,10 +2544,10 @@ void MacroAssembler::Push(Handle<Object> handle) {
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- ASSERT(allow_stub_calls());
mov(a0, zero_reg);
li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
+ ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
@@ -2972,15 +3210,140 @@ void MacroAssembler::CopyBytes(Register src,
}
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler) {
+ Label loop, entry;
+ Branch(&entry);
+ bind(&loop);
+ sw(filler, MemOperand(start_offset));
+ Addu(start_offset, start_offset, kPointerSize);
+ bind(&entry);
+ Branch(&loop, lt, start_offset, Operand(end_offset));
+}
+
+
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
}
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
+ lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Branch(fail, ls, scratch,
+ Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ Branch(fail, hi, scratch,
+ Operand(Map::kMaximumBitField2FastElementValue));
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Branch(fail, hi, scratch,
+ Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register receiver_reg,
+ Register elements_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* fail) {
+ Label smi_value, maybe_nan, have_double_value, is_nan, done;
+ Register mantissa_reg = scratch2;
+ Register exponent_reg = scratch3;
+
+ // Handle smi values specially.
+ JumpIfSmi(value_reg, &smi_value);
+
+ // Ensure that the object is a heap number
+ CheckMap(value_reg,
+ scratch1,
+ isolate()->factory()->heap_number_map(),
+ fail,
+ DONT_DO_SMI_CHECK);
+
+ // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+ // in the exponent.
+ li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
+ lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+ Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
+
+ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+ bind(&have_double_value);
+ sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+ Addu(scratch1, scratch1, elements_reg);
+ sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ sw(exponent_reg, FieldMemOperand(scratch1, offset));
+ jmp(&done);
+
+ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
+ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+ Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
+ bind(&is_nan);
+ // Load canonical NaN for storing into the double array.
+ uint64_t nan_int64 = BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+ li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+ jmp(&have_double_value);
+
+ bind(&smi_value);
+ Addu(scratch1, elements_reg,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+ Addu(scratch1, scratch1, scratch2);
+ // scratch1 is now effective address of the double element
+
+ FloatingPointHelper::Destination destination;
+ if (CpuFeatures::IsSupported(FPU)) {
+ destination = FloatingPointHelper::kFPURegisters;
+ } else {
+ destination = FloatingPointHelper::kCoreRegisters;
+ }
+
+ Register untagged_value = receiver_reg;
+ SmiUntag(untagged_value, value_reg);
+ FloatingPointHelper::ConvertIntToDouble(this,
+ untagged_value,
+ destination,
+ f0,
+ mantissa_reg,
+ exponent_reg,
+ scratch4,
+ f2);
+ if (destination == FloatingPointHelper::kFPURegisters) {
+ CpuFeatures::Scope scope(FPU);
+ sdc1(f0, MemOperand(scratch1, 0));
+ } else {
+ sw(mantissa_reg, MemOperand(scratch1, 0));
+ sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
+ }
+ bind(&done);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
@@ -3171,13 +3534,18 @@ void MacroAssembler::InvokeCode(Register code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
call_wrapper, call_kind);
if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
SetCallKind(t1, call_kind);
Call(code);
+ call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
SetCallKind(t1, call_kind);
@@ -3195,6 +3563,9 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
RelocInfo::Mode rmode,
InvokeFlag flag,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
InvokePrologue(expected, actual, code, no_reg, &done, flag,
@@ -3217,6 +3588,9 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
// Contract with called JS functions requires that function is passed in a1.
ASSERT(function.is(a1));
Register expected_reg = a2;
@@ -3239,6 +3613,9 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
ASSERT(function->is_compiled());
// Get the function and setup the context.
@@ -3249,7 +3626,11 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
if (V8::UseCrankshaft()) {
- UNIMPLEMENTED_MIPS();
+ // TODO(kasperl): For now, we always call indirectly through the
+ // code field in the function to allow recompilation to take effect
+ // without changing any of the call sites.
+ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ InvokeCode(a3, expected, actual, flag, NullCallWrapper(), call_kind);
} else {
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
}
@@ -3349,14 +3730,14 @@ void MacroAssembler::GetObjectType(Register object,
void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
Register r1, const Operand& r2) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
}
MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
Register r1, const Operand& r2) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -3368,7 +3749,7 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
@@ -3377,7 +3758,6 @@ MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
Condition cond,
Register r1,
const Operand& r2) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -3486,6 +3866,12 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
}
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+ return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+}
+
+
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
addiu(sp, sp, num_arguments * kPointerSize);
@@ -3566,7 +3952,16 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst,
ASSERT(!overflow_dst.is(scratch));
ASSERT(!overflow_dst.is(left));
ASSERT(!overflow_dst.is(right));
- ASSERT(!left.is(right));
+
+ if (left.is(right) && dst.is(left)) {
+ ASSERT(!dst.is(t9));
+ ASSERT(!scratch.is(t9));
+ ASSERT(!left.is(t9));
+ ASSERT(!right.is(t9));
+ ASSERT(!overflow_dst.is(t9));
+ mov(t9, right);
+ right = t9;
+ }
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
@@ -3599,10 +3994,17 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst,
ASSERT(!overflow_dst.is(scratch));
ASSERT(!overflow_dst.is(left));
ASSERT(!overflow_dst.is(right));
- ASSERT(!left.is(right));
ASSERT(!scratch.is(left));
ASSERT(!scratch.is(right));
+ // This happens with some crankshaft code. Since Subu works fine if
+ // left == right, let's not make that restriction here.
+ if (left.is(right)) {
+ mov(dst, zero_reg);
+ mov(overflow_dst, zero_reg);
+ return;
+ }
+
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
subu(dst, left, right); // Left is overwritten.
@@ -3651,8 +4053,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
li(a0, Operand(function->nargs));
li(a1, Operand(ExternalReference(function, isolate())));
- CEntryStub stub(1);
- stub.SaveDoubles();
+ CEntryStub stub(1, kSaveFPRegs);
CallStub(&stub);
}
@@ -3722,6 +4123,9 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference(
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
GetBuiltinEntry(t9, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(t9));
@@ -3854,14 +4258,20 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg);
}
#endif
- // Disable stub call restrictions to always allow calls to abort.
- AllowStubCallsScope allow_scope(this, true);
li(a0, Operand(p0));
push(a0);
li(a0, Operand(Smi::FromInt(p1 - p0)));
push(a0);
- CallRuntime(Runtime::kAbort, 2);
+ // Disable stub call restrictions to always allow calls to abort.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 2);
+ } else {
+ CallRuntime(Runtime::kAbort, 2);
+ }
// Will not return here.
if (is_trampoline_pool_blocked()) {
// If the calling code cares about the exact number of
@@ -4245,7 +4655,23 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
static const int kRegisterPassedArguments = 4;
-void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments) {
+ int stack_passed_words = 0;
+ num_reg_arguments += 2 * num_double_arguments;
+
+ // Up to four simple arguments are passed in registers a0..a3.
+ if (num_reg_arguments > kRegisterPassedArguments) {
+ stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+ }
+ stack_passed_words += kCArgSlotCount;
+ return stack_passed_words;
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+ int num_double_arguments,
+ Register scratch) {
int frame_alignment = ActivationFrameAlignment();
// Up to four simple arguments are passed in registers a0..a3.
@@ -4253,9 +4679,8 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
// mips, even though those argument slots are not normally used.
// Remaining arguments are pushed on the stack, above (higher address than)
// the argument slots.
- int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
- 0 : num_arguments - kRegisterPassedArguments) +
- kCArgSlotCount;
+ int stack_passed_arguments = CalculateStackPassedWords(
+ num_reg_arguments, num_double_arguments);
if (frame_alignment > kPointerSize) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
@@ -4270,26 +4695,43 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
}
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+ Register scratch) {
+ PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ li(t8, Operand(function));
+ CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+}
+
+
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
- CallCFunctionHelper(no_reg, function, t8, num_arguments);
+ CallCFunction(function, num_arguments, 0);
}
void MacroAssembler::CallCFunction(Register function,
- Register scratch,
int num_arguments) {
- CallCFunctionHelper(function,
- ExternalReference::the_hole_value_location(isolate()),
- scratch,
- num_arguments);
+ CallCFunction(function, num_arguments, 0);
}
void MacroAssembler::CallCFunctionHelper(Register function,
- ExternalReference function_reference,
- Register scratch,
- int num_arguments) {
+ int num_reg_arguments,
+ int num_double_arguments) {
+ ASSERT(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
@@ -4317,19 +4759,15 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// allow preemption, so the return address in the link register
// stays correct.
- if (function.is(no_reg)) {
- function = t9;
- li(function, Operand(function_reference));
- } else if (!function.is(t9)) {
+ if (!function.is(t9)) {
mov(t9, function);
function = t9;
}
Call(function);
- int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
- 0 : num_arguments - kRegisterPassedArguments) +
- kCArgSlotCount;
+ int stack_passed_arguments = CalculateStackPassedWords(
+ num_reg_arguments, num_double_arguments);
if (OS::ActivationFrameAlignment() > kPointerSize) {
lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
@@ -4342,6 +4780,235 @@ void MacroAssembler::CallCFunctionHelper(Register function,
#undef BRANCH_ARGS_CHECK
+void MacroAssembler::PatchRelocatedValue(Register li_location,
+ Register scratch,
+ Register new_value) {
+ lw(scratch, MemOperand(li_location));
+ // At this point scratch is a lui(at, ...) instruction.
+ if (emit_debug_code()) {
+ And(scratch, scratch, kOpcodeMask);
+ Check(eq, "The instruction to patch should be a lui.",
+ scratch, Operand(LUI));
+ lw(scratch, MemOperand(li_location));
+ }
+ srl(t9, new_value, kImm16Bits);
+ Ins(scratch, t9, 0, kImm16Bits);
+ sw(scratch, MemOperand(li_location));
+
+ lw(scratch, MemOperand(li_location, kInstrSize));
+ // scratch is now ori(at, ...).
+ if (emit_debug_code()) {
+ And(scratch, scratch, kOpcodeMask);
+ Check(eq, "The instruction to patch should be an ori.",
+ scratch, Operand(ORI));
+ lw(scratch, MemOperand(li_location, kInstrSize));
+ }
+ Ins(scratch, new_value, 0, kImm16Bits);
+ sw(scratch, MemOperand(li_location, kInstrSize));
+
+ // Update the I-cache so the new lui and ori can be executed.
+ FlushICache(li_location, 2);
+}
+
+
+void MacroAssembler::CheckPageFlag(
+ Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met) {
+ And(scratch, object, Operand(~Page::kPageAlignmentMask));
+ lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ And(scratch, scratch, Operand(mask));
+ Branch(condition_met, cc, scratch, Operand(zero_reg));
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black) {
+ HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* has_color,
+ int first_bit,
+ int second_bit) {
+ ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
+ ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
+
+ GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+ Label other_color, word_boundary;
+ lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ And(t8, t9, Operand(mask_scratch));
+ Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
+ // Shift left 1 by adding.
+ Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
+ Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
+ And(t8, t9, Operand(mask_scratch));
+ Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
+ jmp(&other_color);
+
+ bind(&word_boundary);
+ lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
+ And(t9, t9, Operand(1));
+ Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
+ bind(&other_color);
+}
+
+
+// Detect some, but not all, common pointer-free objects. This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object) {
+ ASSERT(!AreAliased(value, scratch, t8, no_reg));
+ Label is_data_object;
+ lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+ Branch(&is_data_object, eq, t8, Operand(scratch));
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
+ Branch(not_data_object, ne, t8, Operand(zero_reg));
+ bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg) {
+ ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+ And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
+ Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+ const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+ Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
+ sll(t8, t8, kPointerSizeLog2);
+ Addu(bitmap_reg, bitmap_reg, t8);
+ li(t8, Operand(1));
+ sllv(mask_reg, t8, mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Register load_scratch,
+ Label* value_is_white_and_not_data) {
+ ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
+ GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label done;
+
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ And(t8, mask_scratch, load_scratch);
+ Branch(&done, ne, t8, Operand(zero_reg));
+
+ if (FLAG_debug_code) {
+ // Check for impossible bit pattern.
+ Label ok;
+ // sll may overflow, making the check conservative.
+ sll(t8, mask_scratch, 1);
+ And(t8, load_scratch, t8);
+ Branch(&ok, eq, t8, Operand(zero_reg));
+ stop("Impossible marking bit pattern");
+ bind(&ok);
+ }
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ // Currently only checks for HeapNumber and non-cons strings.
+ Register map = load_scratch; // Holds map while checking type.
+ Register length = load_scratch; // Holds length of object after testing type.
+ Label is_data_object;
+
+ // Check for heap-number
+ lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+ {
+ Label skip;
+ Branch(&skip, ne, t8, Operand(map));
+ li(length, HeapNumber::kSize);
+ Branch(&is_data_object);
+ bind(&skip);
+ }
+
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = load_scratch;
+ lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
+ Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ And(t8, instance_type, Operand(kExternalStringTag));
+ {
+ Label skip;
+ Branch(&skip, eq, t8, Operand(zero_reg));
+ li(length, ExternalString::kSize);
+ Branch(&is_data_object);
+ bind(&skip);
+ }
+
+ // Sequential string, either ASCII or UC16.
+ // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+ // getting the length multiplied by 2.
+ ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ lw(t9, FieldMemOperand(value, String::kLengthOffset));
+ And(t8, instance_type, Operand(kStringEncodingMask));
+ {
+ Label skip;
+ Branch(&skip, eq, t8, Operand(zero_reg));
+ srl(t9, t9, 1);
+ bind(&skip);
+ }
+ Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
+ And(length, length, Operand(~kObjectAlignmentMask));
+
+ bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ Or(t8, t8, Operand(mask_scratch));
+ sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+ And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
+ lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ Addu(t8, t8, Operand(length));
+ sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+ bind(&done);
+}
+
+
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
lw(descriptors,
@@ -4353,6 +5020,60 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
}
+void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
+ ASSERT(!output_reg.is(input_reg));
+ Label done;
+ li(output_reg, Operand(255));
+ // Normal branch: nop in delay slot.
+ Branch(&done, gt, input_reg, Operand(output_reg));
+ // Use delay slot in this branch.
+ Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
+ mov(output_reg, zero_reg); // In delay slot.
+ mov(output_reg, input_reg); // Value is in range 0..255.
+ bind(&done);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register result_reg,
+ DoubleRegister input_reg,
+ DoubleRegister temp_double_reg) {
+ Label above_zero;
+ Label done;
+ Label in_bounds;
+
+ Move(temp_double_reg, 0.0);
+ BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
+
+ // Double value is less than zero, NaN or Inf, return 0.
+ mov(result_reg, zero_reg);
+ Branch(&done);
+
+ // Double value is >= 255, return 255.
+ bind(&above_zero);
+ Move(temp_double_reg, 255.0);
+ BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
+ li(result_reg, Operand(255));
+ Branch(&done);
+
+ // In 0-255 range, round and truncate.
+ bind(&in_bounds);
+ round_w_d(temp_double_reg, input_reg);
+ mfc1(result_reg, temp_double_reg);
+ bind(&done);
+}
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+ if (r1.is(r2)) return true;
+ if (r1.is(r3)) return true;
+ if (r1.is(r4)) return true;
+ if (r2.is(r3)) return true;
+ if (r2.is(r4)) return true;
+ if (r3.is(r4)) return true;
+ return false;
+}
+
+
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 5dd012e93..6f81a4bd6 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -50,15 +50,16 @@ class JumpTarget;
// trying to update gp register for position-independent-code. Whenever
// MIPS generated code calls C code, it must be via t9 register.
-// Registers aliases
+
+// Register aliases.
// cp is assumed to be a callee saved register.
+const Register lithiumScratchReg = s3; // Scratch register.
+const Register lithiumScratchReg2 = s4; // Scratch register.
+const Register condReg = s5; // Simulated (partial) condition code for mips.
const Register roots = s6; // Roots array pointer.
const Register cp = s7; // JavaScript context pointer.
const Register fp = s8_fp; // Alias for fp.
-// Registers used for condition evaluation.
-const Register condReg1 = s4;
-const Register condReg2 = s5;
-
+const DoubleRegister lithiumScratchDouble = f30; // Double scratch register.
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
@@ -90,6 +91,43 @@ enum BranchDelaySlot {
PROTECT
};
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+static MemOperand ContextOperand(Register context, int index) {
+ return MemOperand(context, Context::SlotOffset(index));
+}
+
+
+static inline MemOperand GlobalObjectOperand() {
+ return ContextOperand(cp, Context::GLOBAL_INDEX);
+}
+
+
+// Generate a MemOperand for loading a field from an object.
+static inline MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+// Generate a MemOperand for storing arguments 5..N on the stack
+// when calling CallCFunction().
+static inline MemOperand CFunctionArgumentOperand(int index) {
+ ASSERT(index > kCArgSlotCount);
+ // Argument 5 takes the slot just past the four Arg-slots.
+ int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
+ return MemOperand(sp, offset);
+}
+
+
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -138,21 +176,22 @@ class MacroAssembler: public Assembler {
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
- int CallSize(Register target, COND_ARGS);
+ static int CallSize(Register target, COND_ARGS);
void Call(Register target, COND_ARGS);
- int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
- int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- unsigned ast_id = kNoASTId,
- COND_ARGS);
+ static int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ unsigned ast_id = kNoASTId,
+ COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId,
COND_ARGS);
void Ret(COND_ARGS);
- inline void Ret(BranchDelaySlot bd) {
- Ret(al, zero_reg, Operand(zero_reg), bd);
+ inline void Ret(BranchDelaySlot bd, Condition cond = al,
+ Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
+ Ret(cond, rs, rt, bd);
}
#undef COND_ARGS
@@ -197,6 +236,8 @@ class MacroAssembler: public Assembler {
mtc1(src_high, FPURegister::from_code(dst.code() + 1));
}
+ void Move(FPURegister dst, double imm);
+
// Jump unconditionally to given label.
// We NEED a nop in the branch delay slot, as it used by v8, for example in
// CodeGenerator::ProcessDeferred().
@@ -206,6 +247,7 @@ class MacroAssembler: public Assembler {
Branch(L);
}
+
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index);
@@ -221,39 +263,127 @@ class MacroAssembler: public Assembler {
Condition cond, Register src1, const Operand& src2);
- // Check if object is in new space.
- // scratch can be object itself, but it will be clobbered.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc, // eq for new space, ne otherwise.
- Label* branch);
+ // ---------------------------------------------------------------------------
+ // GC Support
+ void IncrementalMarkingRecordWriteHelper(Register object,
+ Register value,
+ Register address);
+
+ enum RememberedSetFinalAction {
+ kReturnAtEnd,
+ kFallThroughAtEnd
+ };
+
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ void CheckPageFlag(Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met);
+
+ // Check if object is in new space. Jumps if the object is not in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfNotInNewSpace(Register object,
+ Register scratch,
+ Label* branch) {
+ InNewSpace(object, scratch, ne, branch);
+ }
+
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but scratch will be clobbered.
+ void JumpIfInNewSpace(Register object,
+ Register scratch,
+ Label* branch) {
+ InNewSpace(object, scratch, eq, branch);
+ }
- // For the page containing |object| mark the region covering [address]
- // dirty. The object address must be in the first 8K of an allocated page.
- void RecordWriteHelper(Register object,
- Register address,
- Register scratch);
-
- // For the page containing |object| mark the region covering
- // [object+offset] dirty. The object address must be in the first 8K
- // of an allocated page. The 'scratch' registers are used in the
- // implementation and all 3 registers are clobbered by the
- // operation, as well as the 'at' register. RecordWrite updates the
- // write barrier even when storing smis.
- void RecordWrite(Register object,
- Operand offset,
+ // Check if an object has a given incremental marking color.
+ void HasColor(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* has_color,
+ int first_bit,
+ int second_bit);
+
+ void JumpIfBlack(Register object,
Register scratch0,
- Register scratch1);
+ Register scratch1,
+ Label* on_black);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* object_is_white_and_not_data);
+
+ // Detects conservatively whether an object is data-only, ie it does need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object);
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // MemOperand(reg, off).
+ inline void RecordWriteContextSlot(
+ Register context,
+ int offset,
+ Register value,
+ Register scratch,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK) {
+ RecordWriteField(context,
+ offset + kHeapObjectTag,
+ value,
+ scratch,
+ ra_status,
+ save_fp,
+ remembered_set_action,
+ smi_check);
+ }
- // For the page containing |object| mark the region covering
- // [address] dirty. The object address must be in the first 8K of an
- // allocated page. All 3 registers are clobbered by the operation,
- // as well as the ip register. RecordWrite updates the write barrier
- // even when storing smis.
- void RecordWrite(Register object,
- Register address,
- Register scratch);
+ // For a given |object| notify the garbage collector that the slot |address|
+ // has been written. |value| is the object being stored. The value and
+ // address registers are clobbered by the operation.
+ void RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// ---------------------------------------------------------------------------
@@ -517,6 +647,14 @@ class MacroAssembler: public Assembler {
Addu(sp, sp, 2 * kPointerSize);
}
+ // Pop three registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3) {
+ lw(src3, MemOperand(sp, 0 * kPointerSize));
+ lw(src2, MemOperand(sp, 1 * kPointerSize));
+ lw(src1, MemOperand(sp, 2 * kPointerSize));
+ Addu(sp, sp, 3 * kPointerSize);
+ }
+
void Pop(uint32_t count = 1) {
Addu(sp, sp, Operand(count * kPointerSize));
}
@@ -535,10 +673,17 @@ class MacroAssembler: public Assembler {
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
+ // Flush the I-cache from asm code. You should use CPU::FlushICache from C.
+ // Does not handle errors.
+ void FlushICache(Register address, unsigned instructions);
+
// MIPS32 R2 instruction macro.
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ // ---------------------------------------------------------------------------
+ // FPU macros. These do not handle special cases like NaN or +- inf.
+
// Convert unsigned word to double.
void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
@@ -547,6 +692,24 @@ class MacroAssembler: public Assembler {
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
+ // Wrapper function for the different cmp/branch types.
+ void BranchF(Label* target,
+ Label* nan,
+ Condition cc,
+ FPURegister cmp1,
+ FPURegister cmp2,
+ BranchDelaySlot bd = PROTECT);
+
+ // Alternate (inline) version for better readability with USE_DELAY_SLOT.
+ inline void BranchF(BranchDelaySlot bd,
+ Label* target,
+ Label* nan,
+ Condition cc,
+ FPURegister cmp1,
+ FPURegister cmp2) {
+ BranchF(target, nan, cc, cmp1, cmp2, bd);
+ };
+
// Convert the HeapNumber pointed to by source to a 32bits signed integer
// dest. If the HeapNumber does not fit into a 32bits signed integer branch
// to not_int32 label. If FPU is available double_scratch is used but not
@@ -558,6 +721,18 @@ class MacroAssembler: public Assembler {
FPURegister double_scratch,
Label *not_int32);
+ // Truncates a double using a specific rounding mode.
+ // The except_flag will contain any exceptions caused by the instruction.
+ // If check_inexact is kDontCheckForInexactConversion, then the inexacat
+ // exception is masked.
+ void EmitFPUTruncate(FPURoundingMode rounding_mode,
+ FPURegister result,
+ DoubleRegister double_input,
+ Register scratch1,
+ Register except_flag,
+ CheckForInexactConversion check_inexact
+ = kDontCheckForInexactConversion);
+
// Helper for EmitECMATruncate.
// This will truncate a floating-point value outside of the singed 32bit
// integer range to a 32bit signed integer.
@@ -579,15 +754,6 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register scratch3);
- // -------------------------------------------------------------------------
- // Activation frames.
-
- void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
- void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
- void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
- void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
// Enter exit frame.
// argc - argument count to be dropped by LeaveExitFrame.
// save_doubles - saves FPU registers on stack, currently disabled.
@@ -614,6 +780,7 @@ class MacroAssembler: public Assembler {
Register map,
Register scratch);
+
// -------------------------------------------------------------------------
// JavaScript invokes.
@@ -702,6 +869,13 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
+ // Initialize fields with filler values. Fields starting at |start_offset|
+ // not including end_offset are overwritten with the value in |filler|. At
+ // the end the loop, |start_offset| takes the value of |end_offset|.
+ void InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler);
+
// -------------------------------------------------------------------------
// Support functions.
@@ -725,6 +899,31 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check if a map for a JSObject indicates that the object has fast smi only
+ // elements. Jump to the specified label if it does not.
+ void CheckFastSmiOnlyElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check to see if maybe_number can be stored as a double in
+ // FastDoubleElements. If it can, store it at the index specified by key in
+ // the FastDoubleElements array elements, otherwise jump to fail.
+ void StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register receiver_reg,
+ Register elements_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* fail);
+
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
@@ -754,6 +953,21 @@ class MacroAssembler: public Assembler {
// occurred.
void IllegalOperation(int num_arguments);
+
+ // Load and check the instance type of an object for being a string.
+ // Loads the type into the second argument register.
+ // Returns a condition that will be enabled if the object was a string.
+ Condition IsObjectStringType(Register obj,
+ Register type,
+ Register result) {
+ lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+ lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+ And(type, type, Operand(kIsNotStringMask));
+ ASSERT_EQ(0, kStringTag);
+ return eq;
+ }
+
+
// Picks out an array index from the hash field.
// Register use:
// hash - holds the index's hash. Clobbered.
@@ -879,6 +1093,9 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
+
// Before calling a C-function from generated code, align arguments on stack
// and add space for the four mips argument slots.
// After aligning the frame, non-register arguments must be stored on the
@@ -888,7 +1105,11 @@ class MacroAssembler: public Assembler {
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
- void PrepareCallCFunction(int num_arguments, Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments,
+ int num_double_registers,
+ Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments,
+ Register scratch);
// Arguments 1-4 are placed in registers a0 thru a3 respectively.
// Arguments 5..n are stored to stack using following:
@@ -900,7 +1121,13 @@ class MacroAssembler: public Assembler {
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, Register scratch, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function,
+ int num_reg_arguments,
+ int num_double_arguments);
void GetCFunctionDoubleResult(const DoubleRegister dst);
// There are two ways of passing double arguments on MIPS, depending on
@@ -976,6 +1203,9 @@ class MacroAssembler: public Assembler {
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() { return has_frame_; }
+ inline bool AllowThisStubCall(CodeStub* stub);
// ---------------------------------------------------------------------------
// Number utilities.
@@ -1003,6 +1233,13 @@ class MacroAssembler: public Assembler {
Addu(reg, reg, reg);
}
+ // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
+ void SmiTagCheckOverflow(Register reg, Register overflow) {
+ mov(overflow, reg); // Save original value.
+ addu(reg, reg, reg);
+ xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
+ }
+
void SmiTag(Register dst, Register src) {
Addu(dst, src, src);
}
@@ -1017,10 +1254,11 @@ class MacroAssembler: public Assembler {
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label,
- Register scratch = at) {
+ Register scratch = at,
+ BranchDelaySlot bd = PROTECT) {
ASSERT_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
- Branch(smi_label, eq, scratch, Operand(zero_reg));
+ Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
}
// Jump if the register contains a non-smi.
@@ -1090,13 +1328,29 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* failure);
+ void ClampUint8(Register output_reg, Register input_reg);
+
+ void ClampDoubleToUint8(Register result_reg,
+ DoubleRegister input_reg,
+ DoubleRegister temp_double_reg);
+
+
void LoadInstanceDescriptors(Register map, Register descriptors);
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
+ // Patch the relocated value (lui/ori pair).
+ void PatchRelocatedValue(Register li_location,
+ Register scratch,
+ Register new_value);
+
private:
void CallCFunctionHelper(Register function,
- ExternalReference function_reference,
- Register scratch,
- int num_arguments);
+ int num_reg_arguments,
+ int num_double_arguments);
void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchShort(int16_t offset, Condition cond, Register rs,
@@ -1132,25 +1386,33 @@ class MacroAssembler: public Assembler {
// the function in the 'resolved' flag.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
void InitializeNewString(Register string,
Register length,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2);
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cond, // eq for new space, ne otherwise.
+ Label* branch);
+
+ // Helper for finding the mark bits for an address. Afterwards, the
+ // bitmap register points at the word with the mark bits and the mask
+ // the position of the first bit. Leaves addr_reg unchanged.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg);
+
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
- bool UseAbsoluteCodePointers();
-
bool generating_stub_;
bool allow_stub_calls_;
+ bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
@@ -1191,34 +1453,6 @@ class CodePatcher {
};
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-static MemOperand ContextOperand(Register context, int index) {
- return MemOperand(context, Context::SlotOffset(index));
-}
-
-
-static inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_INDEX);
-}
-
-
-// Generate a MemOperand for loading a field from an object.
-static inline MemOperand FieldMemOperand(Register object, int offset) {
- return MemOperand(object, offset - kHeapObjectTag);
-}
-
-
-// Generate a MemOperand for storing arguments 5..N on the stack
-// when calling CallCFunction().
-static inline MemOperand CFunctionArgumentOperand(int index) {
- ASSERT(index > kCArgSlotCount);
- // Argument 5 takes the slot just past the four Arg-slots.
- int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
- return MemOperand(sp, offset);
-}
-
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
index 63e836f22..9db5c5bed 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -377,9 +377,12 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// Isolate.
__ li(a3, Operand(ExternalReference::isolate_address()));
- ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
- __ CallCFunction(function, argument_count);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ __ CallCFunction(function, argument_count);
+ }
// Restore regexp engine registers.
__ MultiPop(regexp_registers_to_retain);
@@ -607,6 +610,12 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Entry code:
__ bind(&entry_label_);
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL,
+ // no is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Actually emit code to start a new stack frame.
// Push arguments
// Save callee-save registers.
// Start new stack frame.
@@ -1244,13 +1253,14 @@ void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
// Stack is already aligned for call, so decrement by alignment
// to make room for storing the return address.
- __ Subu(sp, sp, Operand(stack_alignment));
- __ sw(ra, MemOperand(sp, 0));
- __ mov(a0, sp);
+ __ Subu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
+ const int return_address_offset = kCArgsSlotsSize;
+ __ Addu(a0, sp, return_address_offset);
+ __ sw(ra, MemOperand(a0, 0));
__ mov(t9, t1);
__ Call(t9);
- __ lw(ra, MemOperand(sp, 0));
- __ Addu(sp, sp, Operand(stack_alignment));
+ __ lw(ra, MemOperand(sp, return_address_offset));
+ __ Addu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
__ Jump(ra);
}
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 5b949734f..4bad0a2cc 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -432,7 +432,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
- __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
+ __ mov(name_reg, a0);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -445,7 +451,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
- __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
+ __ mov(name_reg, a0);
+ __ RecordWriteField(scratch,
+ offset,
+ name_reg,
+ receiver_reg,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
}
// Return the value (register v0).
@@ -554,9 +566,10 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
}
-static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
+static MaybeObject* GenerateFastApiDirectCall(
+ MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
// ----------- S t a t e -------------
// -- sp[0] : holder (set by CheckPrototypes)
// -- sp[4] : callee js function
@@ -595,6 +608,7 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
const int kApiStackSpace = 4;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// NOTE: the O32 abi requires a0 to hold a special pointer when returning a
@@ -626,6 +640,7 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
ExternalReference(&fun,
ExternalReference::DIRECT_API_CALL,
masm->isolate());
+ AllowExternalCallThatCantCauseGC scope(masm);
return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
@@ -804,7 +819,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
miss_label);
// Call a runtime function to load the interceptor property.
- __ EnterInternalFrame();
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
@@ -822,7 +837,8 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Restore the name_ register.
__ pop(name_);
- __ LeaveInternalFrame();
+
+ // Leave the internal frame.
}
void LoadWithInterceptor(MacroAssembler* masm,
@@ -831,19 +847,20 @@ class CallInterceptorCompiler BASE_EMBEDDED {
JSObject* holder_obj,
Register scratch,
Label* interceptor_succeeded) {
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(holder, name_);
+ __ Push(holder, name_);
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- __ LeaveInternalFrame();
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ }
// If interceptor returns no-result sentinel, call the constant function.
__ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
@@ -1256,7 +1273,9 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
+
// Create AccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object **args_) as the data.
__ sw(a2, MemOperand(sp, kPointerSize));
@@ -1317,40 +1336,42 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
- __ EnterInternalFrame();
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- // CALLBACKS case needs a receiver to be passed into C++ callback.
- __ Push(receiver, holder_reg, name_reg);
- } else {
- __ Push(holder_reg, name_reg);
- }
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ Push(receiver, holder_reg, name_reg);
+ } else {
+ __ Push(holder_reg, name_reg);
+ }
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method).
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
- __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
- __ LeaveInternalFrame();
- __ Ret();
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method).
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
- __ bind(&interceptor_failed);
- __ pop(name_reg);
- __ pop(holder_reg);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- __ pop(receiver);
- }
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
+ }
- __ LeaveInternalFrame();
+ // Leave the internal frame.
+ }
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
@@ -1580,7 +1601,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
DONT_DO_SMI_CHECK);
if (argc == 1) { // Otherwise fall through to call the builtin.
- Label exit, with_write_barrier, attempt_to_grow_elements;
+ Label attempt_to_grow_elements;
// Get the array's length into v0 and calculate new length.
__ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1594,29 +1615,51 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check if we could survive without allocation.
__ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
+ // Check if value is a smi.
+ Label with_write_barrier;
+ __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ JumpIfNotSmi(t0, &with_write_barrier);
+
// Save new length.
__ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Push the element.
- __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(end_elements, elements, end_elements);
const int kEndElementsOffset =
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
- __ sw(t0, MemOperand(end_elements, kEndElementsOffset));
- __ Addu(end_elements, end_elements, kPointerSize);
+ __ Addu(end_elements, end_elements, kEndElementsOffset);
+ __ sw(t0, MemOperand(end_elements));
// Check for a smi.
- __ JumpIfNotSmi(t0, &with_write_barrier);
- __ bind(&exit);
__ Drop(argc + 1);
__ Ret();
__ bind(&with_write_barrier);
- __ InNewSpace(elements, t0, eq, &exit);
- __ RecordWriteHelper(elements, end_elements, t0);
+
+ __ lw(t2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ CheckFastSmiOnlyElements(t2, t2, &call_builtin);
+
+ // Save new length.
+ __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Push the element.
+ // We may need a register containing the address end_elements below,
+ // so write back the value in end_elements.
+ __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(end_elements, elements, end_elements);
+ __ Addu(end_elements, end_elements, kEndElementsOffset);
+ __ sw(t0, MemOperand(end_elements));
+
+ __ RecordWrite(elements,
+ end_elements,
+ t0,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
__ Drop(argc + 1);
__ Ret();
@@ -1628,6 +1671,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ Branch(&call_builtin);
}
+ __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
+ // Growing elements that are SMI-only requires special handling in case
+ // the new element is non-Smi. For now, delegate to the builtin.
+ Label no_fast_elements_check;
+ __ JumpIfSmi(a2, &no_fast_elements_check);
+ __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(t3, t3, &call_builtin);
+ __ bind(&no_fast_elements_check);
+
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(
masm()->isolate());
@@ -1653,8 +1705,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Update new_space_allocation_top.
__ sw(t2, MemOperand(t3));
// Push the argument.
- __ lw(t2, MemOperand(sp, (argc - 1) * kPointerSize));
- __ sw(t2, MemOperand(end_elements));
+ __ sw(a2, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
@@ -2551,7 +2602,12 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
if (V8::UseCrankshaft()) {
- UNIMPLEMENTED_MIPS();
+ // TODO(kasperl): For now, we always call indirectly through the
+ // code field in the function to allow recompilation to take effect
+ // without changing any of the call sites.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ InvokeCode(a3, expected, arguments(), JUMP_FUNCTION,
+ NullCallWrapper(), call_kind);
} else {
__ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
JUMP_FUNCTION, call_kind);
@@ -2718,6 +2774,16 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Store the value in the cell.
__ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
__ mov(v0, a0); // Stored value must be returned in v0.
+
+ // This trashes a0 but the value is returned in v0 anyway.
+ __ RecordWriteField(t0,
+ JSGlobalPropertyCell::kValueOffset,
+ a0,
+ a2,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET);
+
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
__ Ret();
@@ -3116,7 +3182,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+MaybeObject* KeyedLoadStubCompiler::CompileLoadPolymorphic(
MapList* receiver_maps,
CodeList* handler_ics) {
// ----------- S t a t e -------------
@@ -3210,9 +3276,10 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic(
MapList* receiver_maps,
- CodeList* handler_ics) {
+ CodeList* handler_stubs,
+ MapList* transitioned_maps) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -3225,10 +3292,18 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
int receiver_count = receiver_maps->length();
__ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map(receiver_maps->at(current));
- Handle<Code> code(handler_ics->at(current));
- __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map));
+ for (int i = 0; i < receiver_count; ++i) {
+ Handle<Map> map(receiver_maps->at(i));
+ Handle<Code> code(handler_stubs->at(i));
+ if (transitioned_maps->at(i) == NULL) {
+ __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map));
+ } else {
+ Label next_map;
+ __ Branch(&next_map, eq, a3, Operand(map));
+ __ li(t0, Operand(Handle<Map>(transitioned_maps->at(i))));
+ __ Jump(code, RelocInfo::CODE_TARGET);
+ __ bind(&next_map);
+ }
}
__ bind(&miss);
@@ -3457,6 +3532,7 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@@ -3553,6 +3629,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3828,7 +3905,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the index is in range.
- __ SmiUntag(t0, key);
__ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
@@ -3836,7 +3912,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// a3: external array.
- // t0: key (integer).
if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
// Double to pixel conversion is only implemented in the runtime for now.
@@ -3848,7 +3923,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
// a3: base pointer of external storage.
- // t0: key (integer).
// t1: value (integer).
switch (elements_kind) {
@@ -3865,33 +3939,36 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ mov(v0, t1); // Value is in range 0..255.
__ bind(&done);
__ mov(t1, v0);
- __ addu(t8, a3, t0);
+
+ __ srl(t8, key, 1);
+ __ addu(t8, a3, t8);
__ sb(t1, MemOperand(t8, 0));
}
break;
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ addu(t8, a3, t0);
+ __ srl(t8, key, 1);
+ __ addu(t8, a3, t8);
__ sb(t1, MemOperand(t8, 0));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ sll(t8, t0, 1);
- __ addu(t8, a3, t8);
+ __ addu(t8, a3, key);
__ sh(t1, MemOperand(t8, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, t0, 2);
+ __ sll(t8, key, 1);
__ addu(t8, a3, t8);
__ sw(t1, MemOperand(t8, 0));
break;
case EXTERNAL_FLOAT_ELEMENTS:
// Perform int-to-float conversion and store to memory.
+ __ SmiUntag(t0, key);
StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
break;
case EXTERNAL_DOUBLE_ELEMENTS:
- __ sll(t8, t0, 3);
+ __ sll(t8, key, 2);
__ addu(a3, a3, t8);
// a3: effective address of the double element
FloatingPointHelper::Destination destination;
@@ -3913,6 +3990,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3921,12 +3999,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
// Entry registers are intact, a0 holds the value which is the return value.
- __ mov(v0, value);
+ __ mov(v0, a0);
__ Ret();
if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
// a3: external array.
- // t0: index (integer).
__ bind(&check_heap_number);
__ GetObjectType(value, t1, t2);
__ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
@@ -3934,7 +4011,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
// a3: base pointer of external storage.
- // t0: key (integer).
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
@@ -3947,11 +4023,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvt_s_d(f0, f0);
- __ sll(t8, t0, 2);
+ __ sll(t8, key, 1);
__ addu(t8, a3, t8);
__ swc1(f0, MemOperand(t8, 0));
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sll(t8, t0, 3);
+ __ sll(t8, key, 2);
__ addu(t8, a3, t8);
__ sdc1(f0, MemOperand(t8, 0));
} else {
@@ -3960,18 +4036,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ addu(t8, a3, t0);
+ __ srl(t8, key, 1);
+ __ addu(t8, a3, t8);
__ sb(t3, MemOperand(t8, 0));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ sll(t8, t0, 1);
- __ addu(t8, a3, t8);
+ __ addu(t8, a3, key);
__ sh(t3, MemOperand(t8, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, t0, 2);
+ __ sll(t8, key, 1);
__ addu(t8, a3, t8);
__ sw(t3, MemOperand(t8, 0));
break;
@@ -3979,6 +4055,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3989,7 +4066,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// Entry registers are intact, a0 holds the value
// which is the return value.
- __ mov(v0, value);
+ __ mov(v0, a0);
__ Ret();
} else {
// FPU is not available, do manual conversions.
@@ -4044,13 +4121,13 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ or_(t3, t7, t6);
__ bind(&done);
- __ sll(t9, a1, 2);
+ __ sll(t9, key, 1);
__ addu(t9, a2, t9);
__ sw(t3, MemOperand(t9, 0));
// Entry registers are intact, a0 holds the value which is the return
// value.
- __ mov(v0, value);
+ __ mov(v0, a0);
__ Ret();
__ bind(&nan_or_infinity_or_zero);
@@ -4068,6 +4145,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// t8: effective address of destination element.
__ sw(t4, MemOperand(t8, 0));
__ sw(t3, MemOperand(t8, Register::kSizeInBytes));
+ __ mov(v0, a0);
__ Ret();
} else {
bool is_signed_type = IsElementTypeSigned(elements_kind);
@@ -4130,18 +4208,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ addu(t8, a3, t0);
+ __ srl(t8, key, 1);
+ __ addu(t8, a3, t8);
__ sb(t3, MemOperand(t8, 0));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ sll(t8, t0, 1);
- __ addu(t8, a3, t8);
+ __ addu(t8, a3, key);
__ sh(t3, MemOperand(t8, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, t0, 2);
+ __ sll(t8, key, 1);
__ addu(t8, a3, t8);
__ sw(t3, MemOperand(t8, 0));
break;
@@ -4149,6 +4227,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4298,8 +4377,10 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
}
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
- bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+ MacroAssembler* masm,
+ bool is_js_array,
+ ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -4308,7 +4389,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
// -- a3 : scratch
// -- a4 : scratch (elements)
// -----------------------------------
- Label miss_force_generic;
+ Label miss_force_generic, transition_elements_kind;
Register value_reg = a0;
Register key_reg = a1;
@@ -4342,14 +4423,32 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
// Compare smis.
__ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
- __ Addu(scratch,
- elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(scratch3, scratch2, scratch);
- __ sw(value_reg, MemOperand(scratch3));
- __ RecordWrite(scratch, Operand(scratch2), receiver_reg , elements_reg);
-
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ __ JumpIfNotSmi(value_reg, &transition_elements_kind);
+ __ Addu(scratch,
+ elements_reg,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(scratch, scratch, scratch2);
+ __ sw(value_reg, MemOperand(scratch));
+ } else {
+ ASSERT(elements_kind == FAST_ELEMENTS);
+ __ Addu(scratch,
+ elements_reg,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(scratch, scratch, scratch2);
+ __ sw(value_reg, MemOperand(scratch));
+ __ mov(receiver_reg, value_reg);
+ ASSERT(elements_kind == FAST_ELEMENTS);
+ __ RecordWrite(elements_reg, // Object.
+ scratch, // Address.
+ receiver_reg, // Value.
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
// value_reg (a0) is preserved.
// Done.
__ Ret();
@@ -4358,6 +4457,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&transition_elements_kind);
+ Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(ic_miss, RelocInfo::CODE_TARGET);
}
@@ -4375,15 +4478,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- t2 : scratch (exponent_reg)
// -- t3 : scratch4
// -----------------------------------
- Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
+ Label miss_force_generic, transition_elements_kind;
Register value_reg = a0;
Register key_reg = a1;
Register receiver_reg = a2;
- Register scratch = a3;
- Register elements_reg = t0;
- Register mantissa_reg = t1;
- Register exponent_reg = t2;
+ Register elements_reg = a3;
+ Register scratch1 = t0;
+ Register scratch2 = t1;
+ Register scratch3 = t2;
Register scratch4 = t3;
// This stub is meant to be tail-jumped to, the receiver must already
@@ -4395,90 +4498,25 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Check that the key is within bounds.
if (is_js_array) {
- __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
- __ lw(scratch,
+ __ lw(scratch1,
FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
}
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
- __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
-
- // Handle smi values specially.
- __ JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- __ CheckMap(value_reg,
- scratch,
- masm->isolate()->factory()->heap_number_map(),
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
- // in the exponent.
- __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
- __ lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
- __ Branch(&maybe_nan, ge, exponent_reg, Operand(scratch));
-
- __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
- __ bind(&have_double_value);
- __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- __ Addu(scratch, elements_reg, Operand(scratch4));
- __ sw(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ sw(exponent_reg, FieldMemOperand(scratch, offset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, value_reg); // In delay slot.
-
- __ bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
- __ Branch(&is_nan, gt, exponent_reg, Operand(scratch));
- __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
- __ Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
-
- __ bind(&is_nan);
- // Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- __ li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- __ li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
- __ jmp(&have_double_value);
-
- __ bind(&smi_value);
- __ Addu(scratch, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- __ Addu(scratch, scratch, scratch4);
- // scratch is now effective address of the double element
-
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(FPU)) {
- destination = FloatingPointHelper::kFPURegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
+ __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1));
+
+ __ StoreNumberToDoubleElements(value_reg,
+ key_reg,
+ receiver_reg,
+ elements_reg,
+ scratch1,
+ scratch2,
+ scratch3,
+ scratch4,
+ &transition_elements_kind);
- Register untagged_value = receiver_reg;
- __ SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(
- masm,
- untagged_value,
- destination,
- f0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- f2);
- if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatures::Scope scope(FPU);
- __ sdc1(f0, MemOperand(scratch, 0));
- } else {
- __ sw(mantissa_reg, MemOperand(scratch, 0));
- __ sw(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
- }
__ Ret(USE_DELAY_SLOT);
__ mov(v0, value_reg); // In delay slot.
@@ -4487,6 +4525,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&transition_elements_kind);
+ Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(ic_miss, RelocInfo::CODE_TARGET);
}