summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-10-13 17:45:02 -0700
committerRyan Dahl <ry@tinyclouds.org>2011-10-13 17:45:02 -0700
commit33b5f2f7799081eafe04df3278aad40fd4ae3b55 (patch)
tree46e2840438240411375d3f12f5172c42aa571f95 /deps/v8/src/arm
parent59a5262041dce0760b1f960a900eca8b8ca1138f (diff)
downloadnode-33b5f2f7799081eafe04df3278aad40fd4ae3b55.tar.gz
Upgrade V8 to 3.7.0
Diffstat (limited to 'deps/v8/src/arm')
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h24
-rw-r--r--deps/v8/src/arm/assembler-arm.cc12
-rw-r--r--deps/v8/src/arm/assembler-arm.h10
-rw-r--r--deps/v8/src/arm/builtins-arm.cc1118
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc598
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h245
-rw-r--r--deps/v8/src/arm/codegen-arm.cc8
-rw-r--r--deps/v8/src/arm/codegen-arm.h10
-rw-r--r--deps/v8/src/arm/debug-arm.cc82
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc34
-rw-r--r--deps/v8/src/arm/frames-arm.h10
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc231
-rw-r--r--deps/v8/src/arm/ic-arm.cc149
-rw-r--r--deps/v8/src/arm/lithium-arm.cc38
-rw-r--r--deps/v8/src/arm/lithium-arm.h16
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc182
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h7
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc566
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h226
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc15
-rw-r--r--deps/v8/src/arm/simulator-arm.cc2
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc357
22 files changed, 2727 insertions, 1213 deletions
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 3e19a4538..93cecf52b 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -77,6 +77,11 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
+ if (host() != NULL && IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
}
@@ -101,6 +106,10 @@ Object** RelocInfo::target_object_address() {
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+ if (host() != NULL && target->IsHeapObject()) {
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ }
}
@@ -131,6 +140,12 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
+ if (host() != NULL) {
+ // TODO(1550) We are passing NULL as a slot because cell can never be on
+ // evacuation candidate.
+ host()->GetHeap()->incremental_marking()->RecordWrite(
+ host(), NULL, cell);
+ }
}
@@ -147,6 +162,11 @@ void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+ if (host() != NULL) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
}
@@ -195,7 +215,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitPointer(target_object_address());
+ visitor->VisitEmbeddedPointer(this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
@@ -221,7 +241,7 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitPointer(heap, target_object_address());
+ StaticVisitor::VisitEmbeddedPointer(heap, this);
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 0ec36921a..329493a34 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -78,7 +78,9 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
void CpuFeatures::Probe() {
- ASSERT(!initialized_);
+ unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
+ CpuFeaturesImpliedByCompiler());
+ ASSERT(supported_ == 0 || supported_ == standard_features);
#ifdef DEBUG
initialized_ = true;
#endif
@@ -86,8 +88,7 @@ void CpuFeatures::Probe() {
// Get the features implied by the OS and the compiler settings. This is the
// minimal set of features which is also alowed for generated code in the
// snapshot.
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- supported_ |= CpuFeaturesImpliedByCompiler();
+ supported_ |= standard_features;
if (Serializer::enabled()) {
// No probing for features if we might serialize (generate snapshot).
@@ -2505,7 +2506,8 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(pc_, rmode, data, NULL);
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
// Adjust code for new modes.
ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@@ -2537,7 +2539,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
+ RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 9a586936f..d19b64da5 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -1209,6 +1209,10 @@ class Assembler : public AssemblerBase {
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Read/patch instructions
+ Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ void instr_at_put(int pos, Instr instr) {
+ *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ }
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
@@ -1263,12 +1267,6 @@ class Assembler : public AssemblerBase {
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
- // Read/patch instructions
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
- void instr_at_put(int pos, Instr instr) {
- *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
- }
-
// Decode branch instruction at pos and return branch target pos
int target_at(int pos);
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 60d2081c2..32b7896a5 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -582,10 +582,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&convert_argument);
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
- __ EnterInternalFrame();
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ }
__ pop(function);
__ mov(argument, r0);
__ b(&argument_is_string);
@@ -601,10 +602,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// create a string wrapper.
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
- __ EnterInternalFrame();
- __ push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(argument);
+ __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ }
__ Ret();
}
@@ -617,12 +619,12 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
- Label non_function_call;
+ Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &non_function_call);
+ __ b(ne, &slow);
// Jump to the function-specific construct stub.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
@@ -631,10 +633,19 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
+ // r2: object type
+ Label do_call;
+ __ bind(&slow);
+ __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ b(ne, &non_function_call);
+ __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ jmp(&do_call);
+
__ bind(&non_function_call);
+ __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0, RelocInfo::NONE));
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ SetCallKind(r5, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -650,321 +661,329 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Isolate* isolate = masm->isolate();
// Enter a construct frame.
- __ EnterConstructFrame();
-
- // Preserve the two incoming parameters on the stack.
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
- __ push(r0); // Smi-tagged arguments count.
- __ push(r1); // Constructor function.
-
- // Try to allocate the object without transitioning into C code. If any of the
- // preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+ // Preserve the two incoming parameters on the stack.
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ push(r0); // Smi-tagged arguments count.
+ __ push(r1); // Constructor function.
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ mov(r2, Operand(debug_step_in_fp));
- __ ldr(r2, MemOperand(r2));
- __ tst(r2, r2);
- __ b(ne, &rt_call);
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(isolate);
+ __ mov(r2, Operand(debug_step_in_fp));
+ __ ldr(r2, MemOperand(r2));
+ __ tst(r2, r2);
+ __ b(ne, &rt_call);
#endif
- // Load the initial map and verify that it is in fact a map.
- // r1: constructor function
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r2, &rt_call);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ b(ne, &rt_call);
+ // Load the initial map and verify that it is in fact a map.
+ // r1: constructor function
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(r2, &rt_call);
+ __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ b(ne, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // r1: constructor function
+ // r2: initial map
+ __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
+ __ b(eq, &rt_call);
- // Check that the constructor is not constructing a JSFunction (see comments
- // in Runtime_NewObject in runtime.cc). In which case the initial map's
- // instance type would be JS_FUNCTION_TYPE.
- // r1: constructor function
- // r2: initial map
- __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
- __ b(eq, &rt_call);
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
- __ ldrb(r4, constructor_count);
- __ sub(r4, r4, Operand(1), SetCC);
- __ strb(r4, constructor_count);
- __ b(ne, &allocate);
-
- __ Push(r1, r2);
-
- __ push(r1); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(r2);
- __ pop(r1);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // r1: constructor function
- // r2: initial map
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
+ if (count_constructions) {
+ Label allocate;
+ // Decrease generous allocation count.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ MemOperand constructor_count =
+ FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
+ __ ldrb(r4, constructor_count);
+ __ sub(r4, r4, Operand(1), SetCC);
+ __ strb(r4, constructor_count);
+ __ b(ne, &allocate);
+
+ __ Push(r1, r2);
+
+ __ push(r1); // constructor
+ // The call will replace the stub, so the countdown is only done once.
+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+ __ pop(r2);
+ __ pop(r1);
+
+ __ bind(&allocate);
+ }
- // Allocated the JSObject, now initialize the fields. Map is set to initial
- // map and properties and elements are set to empty fixed array.
- // r1: constructor function
- // r2: initial map
- // r3: object size
- // r4: JSObject (not tagged)
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(r5, r4);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
- // Fill all the in-object properties with the appropriate filler.
- // r1: constructor function
- // r2: initial map
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r5: First in-object property of JSObject (not tagged)
- __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- { Label loop, entry;
+ // Now allocate the JSObject on the heap.
+ // r1: constructor function
+ // r2: initial map
+ __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+ __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // r1: constructor function
+ // r2: initial map
+ // r3: object size
+ // r4: JSObject (not tagged)
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(r5, r4);
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
+ ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+
+ // Fill all the in-object properties with the appropriate filler.
+ // r1: constructor function
+ // r2: initial map
+ // r3: object size (in words)
+ // r4: JSObject (not tagged)
+ // r5: First in-object property of JSObject (not tagged)
+ __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+ __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
+ __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
+ __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
+ // r0: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmp(r0, r6);
+ __ Assert(le, "Unexpected number of pre-allocated property fields.");
+ }
+ __ InitializeFieldsWithFiller(r5, r0, r7);
// To allow for truncation.
__ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
- } else {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
}
- __ b(&entry);
- __ bind(&loop);
- __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(r5, r6);
- __ b(lt, &loop);
- }
+ __ InitializeFieldsWithFiller(r5, r6, r7);
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on. Any
+ // failures need to undo the allocation, so that the heap is in a
+ // consistent state and verifiable.
+ __ add(r4, r4, Operand(kHeapObjectTag));
+
+ // Check if a non-empty properties array is needed. Continue with
+ // allocated object if not fall through to runtime call if it is.
+ // r1: constructor function
+ // r4: JSObject
+ // r5: start of next object (not tagged)
+ __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
+ // The field instance sizes contains both pre-allocated property fields
+ // and in-object properties.
+ __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
+ __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ add(r3, r3, Operand(r6));
+ __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte,
+ kBitsPerByte);
+ __ sub(r3, r3, Operand(r6), SetCC);
+
+ // Done if no extra properties are to be allocated.
+ __ b(eq, &allocated);
+ __ Assert(pl, "Property allocation count failed.");
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // r1: constructor
+ // r3: number of elements in properties array
+ // r4: JSObject
+ // r5: start of next object
+ __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ AllocateInNewSpace(
+ r0,
+ r5,
+ r6,
+ r2,
+ &undo_allocation,
+ static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+ // Initialize the FixedArray.
+ // r1: constructor
+ // r3: number of elements in properties array
+ // r4: JSObject
+ // r5: FixedArray (not tagged)
+ __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
+ __ mov(r2, r5);
+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+ __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+ __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
+
+ // Initialize the fields to undefined.
+ // r1: constructor function
+ // r2: First element of FixedArray (not tagged)
+ // r3: number of elements in properties array
+ // r4: JSObject
+ // r5: FixedArray (not tagged)
+ __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+ { Label loop, entry;
+ if (count_constructions) {
+ __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+ } else if (FLAG_debug_code) {
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ cmp(r7, r8);
+ __ Assert(eq, "Undefined value not loaded.");
+ }
+ __ b(&entry);
+ __ bind(&loop);
+ __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
+ __ bind(&entry);
+ __ cmp(r2, r6);
+ __ b(lt, &loop);
+ }
- // Add the object tag to make the JSObject real, so that we can continue and
- // jump into the continuation code at any time from now on. Any failures
- // need to undo the allocation, so that the heap is in a consistent state
- // and verifiable.
- __ add(r4, r4, Operand(kHeapObjectTag));
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject
+ // r1: constructor function
+ // r4: JSObject
+ // r5: FixedArray (not tagged)
+ __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag.
+ __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
+
+ // Continue with JSObject being successfully allocated
+ // r1: constructor function
+ // r4: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // r4: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ UndoAllocationInNewSpace(r4, r5);
+ }
- // Check if a non-empty properties array is needed. Continue with allocated
- // object if not fall through to runtime call if it is.
+ // Allocate the new receiver object using the runtime call.
// r1: constructor function
+ __ bind(&rt_call);
+ __ push(r1); // argument for Runtime_NewObject
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ mov(r4, r0);
+
+ // Receiver for constructor call allocated.
// r4: JSObject
- // r5: start of next object (not tagged)
- __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields and
- // in-object properties.
- __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8);
- __ add(r3, r3, Operand(r6));
- __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8);
- __ sub(r3, r3, Operand(r6), SetCC);
-
- // Done if no extra properties are to be allocated.
- __ b(eq, &allocated);
- __ Assert(pl, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: start of next object
- __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateInNewSpace(
- r0,
- r5,
- r6,
- r2,
- &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
- // Initialize the FixedArray.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
- __ mov(r2, r5);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ mov(r0, Operand(r3, LSL, kSmiTagSize));
- __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
-
- // Initialize the fields to undefined.
+ __ bind(&allocated);
+ __ push(r4);
+
+ // Push the function and the allocated receiver from the stack.
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(r1, MemOperand(sp, kPointerSize));
+ __ push(r1); // Constructor function.
+ __ push(r4); // Receiver.
+
+ // Reload the number of arguments from the stack.
// r1: constructor function
- // r2: First element of FixedArray (not tagged)
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- { Label loop, entry;
- if (count_constructions) {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ cmp(r7, r8);
- __ Assert(eq, "Undefined value not loaded.");
- }
- __ b(&entry);
- __ bind(&loop);
- __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(r2, r6);
- __ b(lt, &loop);
- }
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
+
+ // Setup pointer to last argument.
+ __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Setup number of arguments for function call below
+ __ mov(r0, Operand(r3, LSR, kSmiTagSize));
+
+ // Copy arguments and receiver to the expression stack.
+ // r0: number of arguments
+ // r2: address of last argument (caller sp)
// r1: constructor function
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag.
- __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
+ // r3: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ Label loop, entry;
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
+ __ push(ip);
+ __ bind(&entry);
+ __ sub(r3, r3, Operand(2), SetCC);
+ __ b(ge, &loop);
- // Continue with JSObject being successfully allocated
+ // Call the function.
+ // r0: number of arguments
// r1: constructor function
- // r4: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // r4: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(r4, r5);
- }
-
- // Allocate the new receiver object using the runtime call.
- // r1: constructor function
- __ bind(&rt_call);
- __ push(r1); // argument for Runtime_NewObject
- __ CallRuntime(Runtime::kNewObject, 1);
- __ mov(r4, r0);
-
- // Receiver for constructor call allocated.
- // r4: JSObject
- __ bind(&allocated);
- __ push(r4);
-
- // Push the function and the allocated receiver from the stack.
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, kPointerSize));
- __ push(r1); // Constructor function.
- __ push(r4); // Receiver.
-
- // Reload the number of arguments from the stack.
- // r1: constructor function
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
-
- // Setup pointer to last argument.
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Setup number of arguments for function call below
- __ mov(r0, Operand(r3, LSR, kSmiTagSize));
+ if (is_api_function) {
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ Handle<Code> code =
+ masm->isolate()->builtins()->HandleApiCallConstruct();
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+ } else {
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
- // Copy arguments and receiver to the expression stack.
- // r0: number of arguments
- // r2: address of last argument (caller sp)
- // r1: constructor function
- // r3: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- Label loop, entry;
- __ b(&entry);
- __ bind(&loop);
- __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
- __ push(ip);
- __ bind(&entry);
- __ sub(r3, r3, Operand(2), SetCC);
- __ b(ge, &loop);
+ // Pop the function from the stack.
+ // sp[0]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ pop();
- // Call the function.
- // r0: number of arguments
- // r1: constructor function
- if (is_api_function) {
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Restore context from the frame.
+ // r0: result
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // r0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ JumpIfSmi(r0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ ldr(r0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // r0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+
+ // Leave construct frame.
}
- // Pop the function from the stack.
- // sp[0]: constructor function
- // sp[2]: receiver
- // sp[3]: constructor function
- // sp[4]: number of arguments (smi-tagged)
- __ pop();
-
- // Restore context from the frame.
- // r0: result
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(r0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ ldr(r0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
- __ LeaveConstructFrame();
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize));
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
@@ -997,63 +1016,64 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r4: argv
// r5-r7, cp may be clobbered
- // Clear the context before we push it when entering the JS frame.
+ // Clear the context before we push it when entering the internal frame.
__ mov(cp, Operand(0, RelocInfo::NONE));
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Set up the context from the function argument.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ // Set up the context from the function argument.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // Set up the roots register.
- ExternalReference roots_address =
- ExternalReference::roots_address(masm->isolate());
- __ mov(r10, Operand(roots_address));
+ // Set up the roots register.
+ ExternalReference roots_address =
+ ExternalReference::roots_address(masm->isolate());
+ __ mov(r10, Operand(roots_address));
- // Push the function and the receiver onto the stack.
- __ push(r1);
- __ push(r2);
+ // Push the function and the receiver onto the stack.
+ __ push(r1);
+ __ push(r2);
- // Copy arguments to the stack in a loop.
- // r1: function
- // r3: argc
- // r4: argv, i.e. points to first arg
- Label loop, entry;
- __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
- // r2 points past last arg.
- __ b(&entry);
- __ bind(&loop);
- __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
- __ ldr(r0, MemOperand(r0)); // dereference handle
- __ push(r0); // push parameter
- __ bind(&entry);
- __ cmp(r4, r2);
- __ b(ne, &loop);
-
- // Initialize all JavaScript callee-saved registers, since they will be seen
- // by the garbage collector as part of handlers.
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ mov(r5, Operand(r4));
- __ mov(r6, Operand(r4));
- __ mov(r7, Operand(r4));
- if (kR9Available == 1) {
- __ mov(r9, Operand(r4));
- }
+ // Copy arguments to the stack in a loop.
+ // r1: function
+ // r3: argc
+ // r4: argv, i.e. points to first arg
+ Label loop, entry;
+ __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
+ // r2 points past last arg.
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
+ __ ldr(r0, MemOperand(r0)); // dereference handle
+ __ push(r0); // push parameter
+ __ bind(&entry);
+ __ cmp(r4, r2);
+ __ b(ne, &loop);
- // Invoke the code and pass argc as r0.
- __ mov(r0, Operand(r3));
- if (is_construct) {
- __ Call(masm->isolate()->builtins()->JSConstructCall());
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ mov(r5, Operand(r4));
+ __ mov(r6, Operand(r4));
+ __ mov(r7, Operand(r4));
+ if (kR9Available == 1) {
+ __ mov(r9, Operand(r4));
+ }
- // Exit the JS frame and remove the parameters (except function), and return.
- // Respect ABI stack constraint.
- __ LeaveInternalFrame();
+ // Invoke the code and pass argc as r0.
+ __ mov(r0, Operand(r3));
+ if (is_construct) {
+ __ Call(masm->isolate()->builtins()->JSConstructCall());
+ } else {
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+ }
+ // Exit the JS frame and remove the parameters (except function), and
+ // return.
+ // Respect ABI stack constraint.
+ }
__ Jump(lr);
// r0: result
@@ -1072,26 +1092,27 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
+ // Preserve the function.
+ __ push(r1);
+ // Push call kind information.
+ __ push(r5);
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(r1);
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+ // Calculate the entry point.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
+ // Restore call kind information.
+ __ pop(r5);
+ // Restore saved function.
+ __ pop(r1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down internal frame.
+ }
// Do a tail-call of the compiled function.
__ Jump(r2);
@@ -1100,26 +1121,27 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Enter an internal frame.
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
+ // Preserve the function.
+ __ push(r1);
+ // Push call kind information.
+ __ push(r5);
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(r1);
+ __ CallRuntime(Runtime::kLazyRecompile, 1);
+ // Calculate the entry point.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
+ // Restore call kind information.
+ __ pop(r5);
+ // Restore saved function.
+ __ pop(r1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
+ // Tear down internal frame.
+ }
// Do a tail-call of the compiled function.
__ Jump(r2);
@@ -1128,12 +1150,13 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
- __ EnterInternalFrame();
- // Pass the function and deoptimization type to the runtime system.
- __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(r0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Pass the function and deoptimization type to the runtime system.
+ __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(r0);
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ }
// Get the full codegen state from the stack and untag it -> r6.
__ ldr(r6, MemOperand(sp, 0 * kPointerSize));
@@ -1173,9 +1196,10 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- __ EnterInternalFrame();
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kNotifyOSR, 0);
+ }
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
__ Ret();
}
@@ -1191,10 +1215,11 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ EnterInternalFrame();
- __ push(r0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ }
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
@@ -1276,17 +1301,23 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ b(ge, &shift_arguments);
__ bind(&convert_to_object);
- __ EnterInternalFrame(); // In order to preserve argument count.
- __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
- __ push(r0);
- __ push(r2);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(r2, r0);
+ {
+ // Enter an internal frame in order to preserve argument count.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
+ __ push(r0);
+
+ __ push(r2);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ mov(r2, r0);
+
+ __ pop(r0);
+ __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+
+ // Exit the internal frame.
+ }
- __ pop(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
- __ LeaveInternalFrame();
// Restore the function to r1, and the flag to r4.
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ mov(r4, Operand(0, RelocInfo::NONE));
@@ -1406,156 +1437,157 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kRecvOffset = 3 * kPointerSize;
const int kFunctionOffset = 4 * kPointerSize;
- __ EnterInternalFrame();
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(r0);
- __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
- __ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
- // Make r2 the space we have left. The stack might already be overflowed
- // here which will cause r2 to become negative.
- __ sub(r2, sp, r2);
- // Check if the arguments will overflow the stack.
- __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ b(gt, &okay); // Signed comparison.
-
- // Out of stack space.
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ push(r1);
- __ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- // End of stack check.
+ __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
+ __ push(r0);
+ __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+ // Make r2 the space we have left. The stack might already be overflowed
+ // here which will cause r2 to become negative.
+ __ sub(r2, sp, r2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ b(gt, &okay); // Signed comparison.
+
+ // Out of stack space.
+ __ ldr(r1, MemOperand(fp, kFunctionOffset));
+ __ push(r1);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ // End of stack check.
- // Push current limit and index.
- __ bind(&okay);
- __ push(r0); // limit
- __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index
- __ push(r1);
+ // Push current limit and index.
+ __ bind(&okay);
+ __ push(r0); // limit
+ __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index
+ __ push(r1);
- // Get the receiver.
- __ ldr(r0, MemOperand(fp, kRecvOffset));
+ // Get the receiver.
+ __ ldr(r0, MemOperand(fp, kRecvOffset));
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &push_receiver);
+ // Check that the function is a JS function (otherwise it must be a proxy).
+ Label push_receiver;
+ __ ldr(r1, MemOperand(fp, kFunctionOffset));
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+ __ b(ne, &push_receiver);
- // Change context eagerly to get the right global object if necessary.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // Load the shared function info while the function is still in r1.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ // Change context eagerly to get the right global object if necessary.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ // Load the shared function info while the function is still in r1.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ b(ne, &push_receiver);
-
- // Do not transform the receiver for strict mode functions.
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &push_receiver);
-
- // Compute the receiver in non-strict mode.
- __ JumpIfSmi(r0, &call_to_object);
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, &use_global_receiver);
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, &use_global_receiver);
-
- // Check if the receiver is already a JavaScript object.
- // r0: receiver
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &push_receiver);
-
- // Convert the receiver to a regular object.
- // r0: receiver
- __ bind(&call_to_object);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ b(&push_receiver);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
- __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
- __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- // r0: receiver
- __ bind(&push_receiver);
- __ push(r0);
+ // Compute the receiver.
+ // Do not transform the receiver for strict mode functions.
+ Label call_to_object, use_global_receiver;
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ b(ne, &push_receiver);
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ ldr(r0, MemOperand(fp, kIndexOffset));
- __ b(&entry);
+ // Do not transform the receiver for strict mode functions.
+ __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, &push_receiver);
- // Load the current argument from the arguments array and push it to the
- // stack.
- // r0: current argument index
- __ bind(&loop);
- __ ldr(r1, MemOperand(fp, kArgsOffset));
- __ push(r1);
- __ push(r0);
+ // Compute the receiver in non-strict mode.
+ __ JumpIfSmi(r0, &call_to_object);
+ __ LoadRoot(r1, Heap::kNullValueRootIndex);
+ __ cmp(r0, r1);
+ __ b(eq, &use_global_receiver);
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, r1);
+ __ b(eq, &use_global_receiver);
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ push(r0);
+ // Check if the receiver is already a JavaScript object.
+ // r0: receiver
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, &push_receiver);
- // Use inline caching to access the arguments.
- __ ldr(r0, MemOperand(fp, kIndexOffset));
- __ add(r0, r0, Operand(1 << kSmiTagSize));
- __ str(r0, MemOperand(fp, kIndexOffset));
+ // Convert the receiver to a regular object.
+ // r0: receiver
+ __ bind(&call_to_object);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ b(&push_receiver);
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ ldr(r1, MemOperand(fp, kLimitOffset));
- __ cmp(r0, r1);
- __ b(ne, &loop);
-
- // Invoke the function.
- Label call_proxy;
- ParameterCount actual(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &call_proxy);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+ __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ // r0: receiver
+ __ bind(&push_receiver);
+ __ push(r0);
- // Tear down the internal frame and remove function, receiver and args.
- __ LeaveInternalFrame();
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Jump(lr);
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ ldr(r0, MemOperand(fp, kIndexOffset));
+ __ b(&entry);
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(r1); // add function proxy as last argument
- __ add(r0, r0, Operand(1));
- __ mov(r2, Operand(0, RelocInfo::NONE));
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // r0: current argument index
+ __ bind(&loop);
+ __ ldr(r1, MemOperand(fp, kArgsOffset));
+ __ push(r1);
+ __ push(r0);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(r0);
+
+ // Use inline caching to access the arguments.
+ __ ldr(r0, MemOperand(fp, kIndexOffset));
+ __ add(r0, r0, Operand(1 << kSmiTagSize));
+ __ str(r0, MemOperand(fp, kIndexOffset));
- __ LeaveInternalFrame();
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ ldr(r1, MemOperand(fp, kLimitOffset));
+ __ cmp(r0, r1);
+ __ b(ne, &loop);
+
+ // Invoke the function.
+ Label call_proxy;
+ ParameterCount actual(r0);
+ __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+ __ ldr(r1, MemOperand(fp, kFunctionOffset));
+ __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+ __ b(ne, &call_proxy);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
+
+ frame_scope.GenerateLeaveFrame();
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Jump(lr);
+
+ // Invoke the function proxy.
+ __ bind(&call_proxy);
+ __ push(r1); // add function proxy as last argument
+ __ add(r0, r0, Operand(1));
+ __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ SetCallKind(r5, CALL_AS_METHOD);
+ __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+ __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+
+ // Tear down the internal frame and remove function, receiver and args.
+ }
__ add(sp, sp, Operand(3 * kPointerSize));
__ Jump(lr);
}
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index e65f6d9b6..44923a184 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -189,6 +189,72 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
+void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [sp]: function.
+ // [sp + kPointerSize]: serialized scope info
+
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace(FixedArray::SizeFor(length),
+ r0, r1, r2, &gc, TAG_OBJECT);
+
+ // Load the function from the stack.
+ __ ldr(r3, MemOperand(sp, 0));
+
+ // Load the serialized scope info from the stack.
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+
+ // Setup the object header.
+ __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
+ __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ mov(r2, Operand(Smi::FromInt(length)));
+ __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
+
+ // If this block context is nested in the global context we get a smi
+ // sentinel instead of a function. The block context should get the
+ // canonical empty function of the global context as its closure which
+ // we still have to look up.
+ Label after_sentinel;
+ __ JumpIfNotSmi(r3, &after_sentinel);
+ if (FLAG_debug_code) {
+ const char* message = "Expected 0 as a Smi sentinel";
+ __ cmp(r3, Operand::Zero());
+ __ Assert(eq, message);
+ }
+ __ ldr(r3, GlobalObjectOperand());
+ __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
+ __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
+ __ bind(&after_sentinel);
+
+ // Setup the fixed slots.
+ __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
+ __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
+ __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
+
+ // Copy the global object from the previous context.
+ __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX));
+
+ // Initialize the rest of the slots to the hole value.
+ __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
+ for (int i = 0; i < slots_; i++) {
+ __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
+ }
+
+ // Remove the on-stack argument and return.
+ __ mov(cp, r0);
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Ret();
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
+}
+
+
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
@@ -838,9 +904,11 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
- // Call C routine that may not cause GC or other trouble.
- __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
- 0, 2);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+ }
// Store answer in the overwritable heap number. Double returned in
// registers r0 and r1 or in d0.
if (masm->use_eabi_hardfloat()) {
@@ -857,6 +925,29 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
}
+bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+ // These variants are compiled ahead of time. See next method.
+ if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
+ return true;
+ }
+ if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
+ return true;
+ }
+ // Other register combinations are generated as and when they are needed,
+ // so it is unsafe to call them from stubs (we can't generate a stub while
+ // we are generating a stub).
+ return false;
+}
+
+
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
+ WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
+ WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
+ stub1.GetCode()->set_is_pregenerated(true);
+ stub2.GetCode()->set_is_pregenerated(true);
+}
+
+
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@@ -1197,6 +1288,8 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
+
+ AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
0, 2);
__ pop(pc); // Return.
@@ -1214,7 +1307,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
// FIRST_SPEC_OBJECT_TYPE.
@@ -1606,6 +1699,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The stub expects its argument in the tos_ register and returns its result in
// it, too: zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
// This stub uses VFP3 instructions.
CpuFeatures::Scope scope(VFP3);
@@ -1713,6 +1808,41 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
}
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ __ stm(db_w, sp, kCallerSaved | lr.bit());
+ if (save_doubles_ == kSaveFPRegs) {
+ CpuFeatures::Scope scope(VFP3);
+ __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+ for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+ DwVfpRegister reg = DwVfpRegister::from_code(i);
+ __ vstr(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ }
+ const int argument_count = 1;
+ const int fp_argument_count = 0;
+ const Register scratch = r1;
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+ __ mov(r0, Operand(ExternalReference::isolate_address()));
+ __ CallCFunction(
+ ExternalReference::store_buffer_overflow_function(masm->isolate()),
+ argument_count);
+ if (save_doubles_ == kSaveFPRegs) {
+ CpuFeatures::Scope scope(VFP3);
+ for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+ DwVfpRegister reg = DwVfpRegister::from_code(i);
+ __ vldr(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+ }
+ __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
+}
+
+
void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
@@ -1866,12 +1996,13 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- __ EnterInternalFrame();
- __ push(r0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r1, Operand(r0));
- __ pop(r0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r1, Operand(r0));
+ __ pop(r0);
+ }
__ bind(&heapnumber_allocated);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
@@ -1912,13 +2043,14 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
- __ EnterInternalFrame();
- __ push(r0); // Push the heap number, not the untagged int32.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r2, r0); // Move the new heap number into r2.
- // Get the heap number into r0, now that the new heap number is in r2.
- __ pop(r0);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0); // Push the heap number, not the untagged int32.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r2, r0); // Move the new heap number into r2.
+ // Get the heap number into r0, now that the new heap number is in r2.
+ __ pop(r0);
+ }
// Convert the heap number in r0 to an untagged integer in r1.
// This can't go slow-case because it's the same number we already
@@ -2028,6 +2160,10 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
@@ -3133,10 +3269,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
__ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ EnterInternalFrame();
- __ push(r0);
- __ CallRuntime(RuntimeFunction(), 1);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r0);
+ __ CallRuntime(RuntimeFunction(), 1);
+ }
__ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ Ret();
@@ -3149,14 +3286,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// We return the value in d2 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
- __ EnterInternalFrame();
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ mov(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Allocate an aligned object larger than a HeapNumber.
+ ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+ __ mov(scratch0, Operand(4 * kPointerSize));
+ __ push(scratch0);
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ }
__ Ret();
}
}
@@ -3173,6 +3311,7 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
} else {
__ vmov(r0, r1, d2);
}
+ AllowExternalCallThatCantCauseGC scope(masm);
switch (type_) {
case TranscendentalCache::SIN:
__ CallCFunction(ExternalReference::math_sin_double_function(isolate),
@@ -3268,11 +3407,14 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
__ PrepareCallCFunction(1, 1, scratch);
__ SetCallCDoubleArguments(double_base, exponent);
- __ CallCFunction(
- ExternalReference::power_double_int_function(masm->isolate()),
- 1, 1);
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::power_double_int_function(masm->isolate()),
+ 1, 1);
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
+ }
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber);
@@ -3298,11 +3440,14 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
__ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0, 2);
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
+ }
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber);
@@ -3319,6 +3464,37 @@ bool CEntryStub::NeedsImmovableCode() {
}
+bool CEntryStub::IsPregenerated() {
+ return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+ result_size_ == 1;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+ CEntryStub::GenerateAheadOfTime();
+ WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+ CEntryStub save_doubles(1, kSaveFPRegs);
+ Handle<Code> code = save_doubles.GetCode();
+ code->set_is_pregenerated(true);
+ StoreBufferOverflowStub stub(kSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ code->GetIsolate()->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+ CEntryStub stub(1, kDontSaveFPRegs);
+ Handle<Code> code = stub.GetCode();
+ code->set_is_pregenerated(true);
+}
+
+
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(r0);
}
@@ -3430,8 +3606,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ b(eq, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
- __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
- __ ldr(r3, MemOperand(ip));
+ __ mov(r3, Operand(isolate->factory()->the_hole_value()));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(ip));
@@ -3469,6 +3644,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ sub(r6, r6, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
// Setup argc and the builtin function in callee-saved registers.
@@ -3613,8 +3789,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// saved values before returning a failure to C.
// Clear any pending exceptions.
- __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
- __ ldr(r5, MemOperand(ip));
+ __ mov(r5, Operand(isolate->factory()->the_hole_value()));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r5, MemOperand(ip));
@@ -3851,10 +4026,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
- __ EnterInternalFrame();
- __ Push(r0, r1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r0, r1);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+ }
__ cmp(r0, Operand::Zero());
__ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
@@ -4250,10 +4426,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
- if (!FLAG_regexp_entry_native) {
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
- return;
- }
// Stack frame on entry.
// sp[0]: last_match_info (expected JSArray)
@@ -4480,8 +4652,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for ASCII and 1 for two byte).
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1));
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4532,8 +4703,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
- __ ldr(r1, MemOperand(r1, 0));
+ __ mov(r1, Operand(isolate->factory()->the_hole_value()));
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(r2, 0));
@@ -4575,16 +4745,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ str(r2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
- __ mov(r3, last_match_info_elements); // Moved up to reduce latency.
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
- __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
+ __ mov(r2, subject);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastSubjectOffset,
+ r2,
+ r7,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
- __ mov(r3, last_match_info_elements);
- __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpImpl::kLastInputOffset,
+ subject,
+ r7,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -4712,6 +4891,22 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
+void CallFunctionStub::FinishCode(Code* code) {
+ code->set_has_function_cache(false);
+}
+
+
+void CallFunctionStub::Clear(Heap* heap, Address address) {
+ UNREACHABLE();
+}
+
+
+Object* CallFunctionStub::GetCachedValue(Address address) {
+ UNREACHABLE();
+ return NULL;
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow, non_function;
@@ -4889,23 +5084,26 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ cmp(result_, Operand(ip));
__ b(ne, &call_runtime_);
// Get the first of the two strings and load its instance type.
- __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+ __ ldr(result_, FieldMemOperand(object_, ConsString::kFirstOffset));
__ jmp(&assure_seq_string);
// SlicedString, unpack and add offset.
__ bind(&sliced_string);
__ ldr(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
__ add(scratch_, scratch_, result_);
- __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
+ __ ldr(result_, FieldMemOperand(object_, SlicedString::kParentOffset));
// Assure that we are dealing with a sequential string. Go to runtime if not.
__ bind(&assure_seq_string);
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ ldr(result_, FieldMemOperand(result_, HeapObject::kMapOffset));
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// Check that parent is not an external string. Go to runtime otherwise.
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(result_, Operand(kStringRepresentationMask));
__ b(ne, &call_runtime_);
+ // Actually fetch the parent string if it is confirmed to be sequential.
+ STATIC_ASSERT(SlicedString::kParentOffset == ConsString::kFirstOffset);
+ __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
@@ -6425,12 +6623,13 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
- __ EnterInternalFrame();
- __ Push(r1, r0);
- __ mov(ip, Operand(Smi::FromInt(op_)));
- __ push(ip);
- __ CallExternalReference(miss, 3);
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r1, r0);
+ __ mov(ip, Operand(Smi::FromInt(op_)));
+ __ push(ip);
+ __ CallExternalReference(miss, 3);
+ }
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -6613,6 +6812,8 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+ // This stub overrides SometimesSetsUpAFrame() to return false. That means
+ // we cannot call anything that could cause a GC from this stub.
// Registers:
// result: StringDictionary to probe
// r1: key
@@ -6702,6 +6903,267 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
+struct AheadOfTimeWriteBarrierStubList {
+ Register object, value, address;
+ RememberedSetAction action;
+};
+
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+ // Used in RegExpExecStub.
+ { r6, r4, r7, EMIT_REMEMBERED_SET },
+ { r6, r2, r7, EMIT_REMEMBERED_SET },
+ // Used in CompileArrayPushCall.
+ // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
+ // Also used in KeyedStoreIC::GenerateGeneric.
+ { r3, r4, r5, EMIT_REMEMBERED_SET },
+ // Used in CompileStoreGlobal.
+ { r4, r1, r2, OMIT_REMEMBERED_SET },
+ // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
+ { r1, r2, r3, EMIT_REMEMBERED_SET },
+ { r3, r2, r1, EMIT_REMEMBERED_SET },
+ // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+ { r2, r1, r3, EMIT_REMEMBERED_SET },
+ { r3, r1, r2, EMIT_REMEMBERED_SET },
+ // KeyedStoreStubCompiler::GenerateStoreFastElement.
+ { r4, r2, r3, EMIT_REMEMBERED_SET },
+ // Null termination.
+ { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+};
+
+
+bool RecordWriteStub::IsPregenerated() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ if (object_.is(entry->object) &&
+ value_.is(entry->value) &&
+ address_.is(entry->address) &&
+ remembered_set_action_ == entry->action &&
+ save_fp_regs_mode_ == kDontSaveFPRegs) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool StoreBufferOverflowStub::IsPregenerated() {
+ return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+ StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+ stub1.GetCode()->set_is_pregenerated(true);
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+ !entry->object.is(no_reg);
+ entry++) {
+ RecordWriteStub stub(entry->object,
+ entry->value,
+ entry->address,
+ entry->action,
+ kDontSaveFPRegs);
+ stub.GetCode()->set_is_pregenerated(true);
+ }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_. A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed. The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // The first two instructions are generated with labels so as to get the
+ // offset fixed up correctly by the bind(Label*) call. We patch it back and
+ // forth between a compare instructions (a nop in this position) and the
+ // real branch when we start and stop incremental heap marking.
+ // See RecordWriteStub::Patch for details.
+ __ b(&skip_to_incremental_noncompacting);
+ __ b(&skip_to_incremental_compacting);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ }
+ __ Ret();
+
+ __ bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+ // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+ // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+ ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
+ ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
+ PatchBranchIntoNop(masm, 0);
+ PatchBranchIntoNop(masm, Assembler::kInstrSize);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
+ __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
+ regs_.scratch0(),
+ &dont_need_remembered_set);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch0(),
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ ne,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+
+ __ bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+ InformIncrementalMarker(masm, mode);
+ regs_.Restore(masm);
+ __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+ int argument_count = 3;
+ __ PrepareCallCFunction(argument_count, regs_.scratch0());
+ Register address =
+ r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+ ASSERT(!address.is(regs_.object()));
+ ASSERT(!address.is(r0));
+ __ Move(address, regs_.address());
+ __ Move(r0, regs_.object());
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ Move(r1, address);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ ldr(r1, MemOperand(address, 0));
+ }
+ __ mov(r2, Operand(ExternalReference::isolate_address()));
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ CallCFunction(
+ ExternalReference::incremental_evacuation_record_write_function(
+ masm->isolate()),
+ argument_count);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
+ }
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode) {
+ Label on_black;
+ Label need_incremental;
+ Label need_incremental_pop_scratch;
+
+ // Let's look at the color of the object: If it is not black we don't have
+ // to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&on_black);
+
+ // Get the value from the slot.
+ __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlag(regs_.scratch0(), // Contains value.
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateMask,
+ eq,
+ &ensure_not_white);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+ eq,
+ &need_incremental);
+
+ __ bind(&ensure_not_white);
+ }
+
+ // We need extra registers for this, so we push the object and the address
+ // register temporarily.
+ __ Push(regs_.object(), regs_.address());
+ __ EnsureNotWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+ __ RememberedSetHelper(object_,
+ address_,
+ value_,
+ save_fp_regs_mode_,
+ MacroAssembler::kReturnAtEnd);
+ } else {
+ __ Ret();
+ }
+
+ __ bind(&need_incremental_pop_scratch);
+ __ Pop(regs_.object(), regs_.address());
+
+ __ bind(&need_incremental);
+
+ // Fall through when we need to inform the incremental marker.
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 557f7e6d4..3ba75bab1 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -58,6 +58,25 @@ class TranscendentalCacheStub: public CodeStub {
};
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+ explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+ : save_doubles_(save_fp) { }
+
+ void Generate(MacroAssembler* masm);
+
+ virtual bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+ SaveFPRegsMode save_doubles_;
+
+ Major MajorKey() { return StoreBufferOverflow; }
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@@ -323,6 +342,9 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
the_heap_number_(the_heap_number),
scratch_(scratch) { }
+ bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
+
private:
Register the_int_;
Register the_heap_number_;
@@ -371,6 +393,225 @@ class NumberToStringStub: public CodeStub {
};
+class RecordWriteStub: public CodeStub {
+ public:
+ RecordWriteStub(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode)
+ : object_(object),
+ value_(value),
+ address_(address),
+ remembered_set_action_(remembered_set_action),
+ save_fp_regs_mode_(fp_mode),
+ regs_(object, // An input reg.
+ address, // An input reg.
+ value) { // One scratch reg.
+ }
+
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ virtual bool IsPregenerated();
+ static void GenerateFixedRegStubsAheadOfTime();
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
+ static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
+ masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
+ ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
+ }
+
+ static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
+ masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
+ ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
+ }
+
+ static Mode GetMode(Code* stub) {
+ Instr first_instruction = Assembler::instr_at(stub->instruction_start());
+ Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+ Assembler::kInstrSize);
+
+ if (Assembler::IsBranch(first_instruction)) {
+ return INCREMENTAL;
+ }
+
+ ASSERT(Assembler::IsTstImmediate(first_instruction));
+
+ if (Assembler::IsBranch(second_instruction)) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(Assembler::IsTstImmediate(second_instruction));
+
+ return STORE_BUFFER_ONLY;
+ }
+
+ static void Patch(Code* stub, Mode mode) {
+ MacroAssembler masm(NULL,
+ stub->instruction_start(),
+ stub->instruction_size());
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ PatchBranchIntoNop(&masm, 0);
+ PatchBranchIntoNop(&masm, Assembler::kInstrSize);
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, 0);
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ PatchNopIntoBranch(&masm, Assembler::kInstrSize);
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
+ CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
+ }
+
+ private:
+ // This is a helper class for freeing up 3 scratch registers. The input is
+ // two registers that must be preserved and one scratch register provided by
+ // the caller.
+ class RegisterAllocation {
+ public:
+ RegisterAllocation(Register object,
+ Register address,
+ Register scratch0)
+ : object_(object),
+ address_(address),
+ scratch0_(scratch0) {
+ ASSERT(!AreAliased(scratch0, object, address, no_reg));
+ scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+ }
+
+ void Save(MacroAssembler* masm) {
+ ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+ // We don't have to save scratch0_ because it was given to us as
+ // a scratch register.
+ masm->push(scratch1_);
+ }
+
+ void Restore(MacroAssembler* masm) {
+ masm->pop(scratch1_);
+ }
+
+ // If we have to call into C then we need to save and restore all caller-
+ // saved registers that were not already preserved. The scratch registers
+ // will be restored by other means so we don't bother pushing them here.
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+ masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
+ if (mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(VFP3);
+ masm->sub(sp,
+ sp,
+ Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+ // Save all VFP registers except d0.
+ for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
+ DwVfpRegister reg = DwVfpRegister::from_code(i);
+ masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
+ }
+ }
+ }
+
+ inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+ SaveFPRegsMode mode) {
+ if (mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(VFP3);
+ // Restore all VFP registers except d0.
+ for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
+ DwVfpRegister reg = DwVfpRegister::from_code(i);
+ masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
+ }
+ masm->add(sp,
+ sp,
+ Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+ }
+ masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
+ }
+
+ inline Register object() { return object_; }
+ inline Register address() { return address_; }
+ inline Register scratch0() { return scratch0_; }
+ inline Register scratch1() { return scratch1_; }
+
+ private:
+ Register object_;
+ Register address_;
+ Register scratch0_;
+ Register scratch1_;
+
+ Register GetRegThatIsNotOneOf(Register r1,
+ Register r2,
+ Register r3) {
+ for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ Register candidate = Register::FromAllocationIndex(i);
+ if (candidate.is(r1)) continue;
+ if (candidate.is(r2)) continue;
+ if (candidate.is(r3)) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+ return no_reg;
+ }
+ friend class RecordWriteStub;
+ };
+
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ return ObjectBits::encode(object_.code()) |
+ ValueBits::encode(value_.code()) |
+ AddressBits::encode(address_.code()) |
+ RememberedSetActionBits::encode(remembered_set_action_) |
+ SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+ }
+
+ bool MustBeInStubCache() {
+ // All stubs must be registered in the stub cache
+ // otherwise IncrementalMarker would not be able to find
+ // and patch it.
+ return true;
+ }
+
+ void Activate(Code* code) {
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+ }
+
+ class ObjectBits: public BitField<int, 0, 4> {};
+ class ValueBits: public BitField<int, 4, 4> {};
+ class AddressBits: public BitField<int, 8, 4> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
+ class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
+
+ Register object_;
+ Register value_;
+ Register address_;
+ RememberedSetAction remembered_set_action_;
+ SaveFPRegsMode save_fp_regs_mode_;
+ Label slow_;
+ RegisterAllocation regs_;
+};
+
+
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
@@ -575,6 +816,8 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0,
Register r1);
+ virtual bool SometimesSetsUpAFrame() { return false; }
+
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
@@ -587,7 +830,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
- Major MajorKey() { return StringDictionaryNegativeLookup; }
+ Major MajorKey() { return StringDictionaryLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index bf748a9b6..3993ed02b 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -38,12 +38,16 @@ namespace internal {
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterInternalFrame();
+ masm->EnterFrame(StackFrame::INTERNAL);
+ ASSERT(!masm->has_frame());
+ masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveInternalFrame();
+ masm->LeaveFrame(StackFrame::INTERNAL);
+ ASSERT(masm->has_frame());
+ masm->set_has_frame(false);
}
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index d27982aba..1c0d508d2 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -69,16 +69,6 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
- // Constants related to patching of inlined load/store.
- static int GetInlinedKeyedLoadInstructionsAfterPatch() {
- return FLAG_debug_code ? 32 : 13;
- }
- static const int kInlinedKeyedStoreInstructionsAfterPatch = 8;
- static int GetInlinedNamedStoreInstructionsAfterPatch() {
- ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
- return Isolate::Current()->inlined_write_barrier_size() + 4;
- }
-
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index 07a22722c..b866f9cc8 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -132,55 +132,57 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
- __ EnterInternalFrame();
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- if ((object_regs | non_object_regs) != 0) {
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ tst(reg, Operand(0xc0000000));
- __ Assert(eq, "Unable to encode value as smi");
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Store the registers containing live values on the expression stack to
+ // make sure that these are correctly updated during GC. Non object values
+ // are stored as a smi causing it to be untouched by GC.
+ ASSERT((object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+ ASSERT((object_regs & non_object_regs) == 0);
+ if ((object_regs | non_object_regs) != 0) {
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ if (FLAG_debug_code) {
+ __ tst(reg, Operand(0xc0000000));
+ __ Assert(eq, "Unable to encode value as smi");
+ }
+ __ mov(reg, Operand(reg, LSL, kSmiTagSize));
}
- __ mov(reg, Operand(reg, LSL, kSmiTagSize));
}
+ __ stm(db_w, sp, object_regs | non_object_regs);
}
- __ stm(db_w, sp, object_regs | non_object_regs);
- }
#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
- __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- if ((object_regs | non_object_regs) != 0) {
- __ ldm(ia_w, sp, object_regs | non_object_regs);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- __ mov(reg, Operand(reg, LSR, kSmiTagSize));
- }
- if (FLAG_debug_code &&
- (((object_regs |non_object_regs) & (1 << r)) == 0)) {
- __ mov(reg, Operand(kDebugZapValue));
+ __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
+ __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
+
+ CEntryStub ceb(1);
+ __ CallStub(&ceb);
+
+ // Restore the register values from the expression stack.
+ if ((object_regs | non_object_regs) != 0) {
+ __ ldm(ia_w, sp, object_regs | non_object_regs);
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ Register reg = { r };
+ if ((non_object_regs & (1 << r)) != 0) {
+ __ mov(reg, Operand(reg, LSR, kSmiTagSize));
+ }
+ if (FLAG_debug_code &&
+ (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+ __ mov(reg, Operand(kDebugZapValue));
+ }
}
}
- }
- __ LeaveInternalFrame();
+ // Leave the internal frame.
+ }
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 00357f76d..bb03d740d 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -112,12 +112,19 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
#endif
+ Isolate* isolate = code->GetIsolate();
+
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
+ DeoptimizerData* data = isolate->deoptimizer_data();
node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node;
+ // We might be in the middle of incremental marking with compaction.
+ // Tell collector to treat this code object in a special way and
+ // ignore all slots that might have been recorded on it.
+ isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@@ -134,7 +141,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+ Address pc_after,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
@@ -169,6 +177,13 @@ void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
reinterpret_cast<uint32_t>(check_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());
+
+ RelocInfo rinfo(pc_after - 2 * kInstrSize,
+ RelocInfo::CODE_TARGET,
+ 0,
+ unoptimized_code);
+ unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ unoptimized_code, &rinfo, replacement_code);
}
@@ -193,6 +208,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
reinterpret_cast<uint32_t>(replacement_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(check_code->entry());
+
+ check_code->GetHeap()->incremental_marking()->
+ RecordCodeTargetPatch(pc_after - 2 * kInstrSize, check_code);
}
@@ -632,7 +650,10 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(r5, Operand(ExternalReference::isolate_address()));
__ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New().
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+ }
// Preserve "deoptimizer" object in register r0 and get the input
// frame descriptor pointer to r1 (deoptimizer->input_);
@@ -686,8 +707,11 @@ void Deoptimizer::EntryGenerator::Generate() {
// r0: deoptimizer object; r1: scratch.
__ PrepareCallCFunction(1, r1);
// Call Deoptimizer::ComputeOutputFrames().
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm());
+ __ CallCFunction(
+ ExternalReference::compute_output_frames_function(isolate), 1);
+ }
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index 26bbd82d0..c66ceee93 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -70,6 +70,16 @@ static const RegList kCalleeSaved =
1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code)
+// When calling into C++ (only for C++ calls that can't cause a GC).
+// The call code will take care of lr, fp, etc.
+static const RegList kCallerSaved =
+ 1 << 0 | // r0
+ 1 << 1 | // r1
+ 1 << 2 | // r2
+ 1 << 3 | // r3
+ 1 << 9; // r9
+
+
static const int kNumCalleeSaved = 7 + kR9Available;
// Double registers d8 to d15 are callee-saved.
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 50ed8b1da..353ce5b10 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -39,6 +39,7 @@
#include "stub-cache.h"
#include "arm/code-stubs-arm.h"
+#include "arm/macro-assembler-arm.h"
namespace v8 {
namespace internal {
@@ -155,6 +156,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ bind(&ok);
}
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
int locals_count = info->scope()->num_stack_slots();
__ Push(lr, fp, cp, r1);
@@ -200,13 +206,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
- __ mov(r1, Operand(Context::SlotOffset(var->index())));
- __ str(r0, MemOperand(cp, r1));
- // Update the write barrier. This clobbers all involved
- // registers, so we have to use two more registers to avoid
- // clobbering cp.
- __ mov(r2, Operand(cp));
- __ RecordWrite(r2, Operand(r1), r3, r0);
+ MemOperand target = ContextOperand(cp, var->index());
+ __ str(r0, target);
+
+ // Update the write barrier.
+ __ RecordWriteContextSlot(
+ cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
}
}
}
@@ -264,7 +269,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
- EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
+ EmitDeclaration(scope()->function(), CONST, NULL, &ignored);
}
VisitDeclarations(scope()->declarations());
}
@@ -665,12 +670,15 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
__ str(src, location);
+
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
- __ RecordWrite(scratch0,
- Operand(Context::SlotOffset(var->index())),
- scratch1,
- src);
+ __ RecordWriteContextSlot(scratch0,
+ location.offset(),
+ src,
+ scratch1,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs);
}
}
@@ -703,7 +711,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
- Variable::Mode mode,
+ VariableMode mode,
FunctionLiteral* function,
int* global_count) {
// If it was not possible to allocate the variable at compile time, we
@@ -721,7 +729,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ str(result_register(), StackOperand(variable));
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (mode == CONST || mode == LET) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, StackOperand(variable));
@@ -746,10 +754,16 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ str(result_register(), ContextOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
- __ mov(r1, Operand(cp));
- __ RecordWrite(r1, Operand(offset), r2, result_register());
+ __ RecordWriteContextSlot(cp,
+ offset,
+ result_register(),
+ r2,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (mode == CONST || mode == LET) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, ContextOperand(cp, variable->index()));
@@ -762,10 +776,8 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
__ mov(r2, Operand(variable->name()));
// Declaration nodes are always introduced in one of three modes.
- ASSERT(mode == Variable::VAR ||
- mode == Variable::CONST ||
- mode == Variable::LET);
- PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+ ASSERT(mode == VAR || mode == CONST || mode == LET);
+ PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE;
__ mov(r1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@@ -775,7 +787,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ Push(cp, r2, r1);
// Push initial value for function declaration.
VisitForStackValue(function);
- } else if (mode == Variable::CONST || mode == Variable::LET) {
+ } else if (mode == CONST || mode == LET) {
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ Push(cp, r2, r1, r0);
} else {
@@ -1205,15 +1217,23 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
- if (var->mode() == Variable::DYNAMIC_GLOBAL) {
+ if (var->mode() == DYNAMIC_GLOBAL) {
EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
__ jmp(done);
- } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
+ } else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == Variable::CONST) {
+ if (local->mode() == CONST ||
+ local->mode() == LET) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ if (local->mode() == CONST) {
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ } else { // LET
+ __ b(ne, done);
+ __ mov(r0, Operand(var->name()));
+ __ push(r0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
}
__ jmp(done);
}
@@ -1246,13 +1266,13 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
- if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
+ if (var->mode() != LET && var->mode() != CONST) {
context()->Plug(var);
} else {
// Let and const need a read barrier.
GetVar(r0, var);
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (var->mode() == Variable::LET) {
+ if (var->mode() == LET) {
Label done;
__ b(ne, &done);
__ mov(r0, Operand(var->name()));
@@ -1490,14 +1510,23 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
// Store the subexpression value in the array's elements.
- __ ldr(r1, MemOperand(sp)); // Copy of array literal.
- __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ ldr(r6, MemOperand(sp)); // Copy of array literal.
+ __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ str(result_register(), FieldMemOperand(r1, offset));
+ Label no_map_change;
+ __ JumpIfSmi(result_register(), &no_map_change);
// Update the write barrier for the array store with r0 as the scratch
// register.
- __ RecordWrite(r1, Operand(offset), r2, result_register());
+ __ RecordWriteField(
+ r1, offset, result_register(), r2, kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ CheckFastSmiOnlyElements(r3, r2, &no_map_change);
+ __ push(r6); // Copy of array literal.
+ __ CallRuntime(Runtime::kNonSmiElementStored, 1);
+ __ bind(&no_map_change);
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@@ -1844,7 +1873,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
- } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
__ push(r0); // Value.
@@ -1869,11 +1898,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// RecordWrite may destroy all its register arguments.
__ mov(r3, result_register());
int offset = Context::SlotOffset(var->index());
- __ RecordWrite(r1, Operand(offset), r2, r3);
+ __ RecordWriteContextSlot(
+ r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
}
}
- } else if (var->mode() != Variable::CONST) {
+ } else if (var->mode() != CONST) {
// Assignment to var or initializing assignment to let.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, r1);
@@ -1887,7 +1917,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ str(r0, location);
if (var->IsContextSlot()) {
__ mov(r3, r0);
- __ RecordWrite(r1, Operand(Context::SlotOffset(var->index())), r2, r3);
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
}
} else {
ASSERT(var->IsLookupSlot());
@@ -2107,10 +2139,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
__ push(r1);
// Push the strict mode flag. In harmony mode every eval call
// is a strict mode eval call.
- StrictModeFlag strict_mode = strict_mode_flag();
- if (FLAG_harmony_block_scoping) {
- strict_mode = kStrictMode;
- }
+ StrictModeFlag strict_mode =
+ FLAG_harmony_scoping ? kStrictMode : strict_mode_flag();
__ mov(r1, Operand(Smi::FromInt(strict_mode)));
__ push(r1);
@@ -2156,7 +2186,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// context lookup in the runtime system.
Label done;
Variable* var = proxy->var();
- if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
+ if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) {
Label slow;
EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
// Push the function and resolve eval.
@@ -2662,20 +2692,24 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
+ // Assume that there are only two callable types, and one of them is at
+ // either end of the type range for JS object types. Saves extra comparisons.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
// Map is now in r0.
__ b(lt, &null);
-
- // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
- // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
- // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
- __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
- __ b(ge, &function);
-
- // Check if the constructor in the map is a function.
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ __ b(eq, &function);
+
+ __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE));
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ __ b(eq, &function);
+ // Assume that there is no larger type.
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+ // Check if the constructor in the map is a JS function.
__ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
__ b(ne, &non_function_constructor);
@@ -2853,7 +2887,9 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
- __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
+ __ mov(r2, r0);
+ __ RecordWriteField(
+ r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
__ bind(&done);
context()->Plug(r0);
@@ -3141,16 +3177,31 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ str(scratch1, MemOperand(index2, 0));
__ str(scratch2, MemOperand(index1, 0));
- Label new_space;
- __ InNewSpace(elements, scratch1, eq, &new_space);
+ Label no_remembered_set;
+ __ CheckPageFlag(elements,
+ scratch1,
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
+ ne,
+ &no_remembered_set);
// Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask.)
- __ mov(scratch1, elements);
- __ RecordWriteHelper(elements, index1, scratch2);
- __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements.
+ // We are swapping two objects in an array and the incremental marker never
+ // pauses in the middle of scanning a single object. Therefore the
+ // incremental marker is not disturbed, so we don't need to call the
+ // RecordWrite stub that notifies the incremental marker.
+ __ RememberedSetHelper(elements,
+ index1,
+ scratch2,
+ kDontSaveFPRegs,
+ MacroAssembler::kFallThroughAtEnd);
+ __ RememberedSetHelper(elements,
+ index2,
+ scratch2,
+ kDontSaveFPRegs,
+ MacroAssembler::kFallThroughAtEnd);
- __ bind(&new_space);
+ __ bind(&no_remembered_set);
// We are done. Drop elements from the stack, and return undefined.
__ Drop(3);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@@ -3898,10 +3949,14 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Handle<String> check,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
+ Handle<String> check) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
{ AccumulatorValueContext context(this);
VisitForTypeofValue(expr);
}
@@ -3942,9 +3997,11 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
- Split(ge, if_true, if_false, fall_through);
-
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
+ __ b(eq, if_true);
+ __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
+ Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(r0, if_false);
if (!FLAG_harmony_typeof) {
@@ -3963,18 +4020,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else {
if (if_false != fall_through) __ jmp(if_false);
}
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- VisitForAccumulatorValue(expr);
- PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
+ context()->Plug(if_true, if_false);
}
@@ -3982,9 +4028,12 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position());
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr)) return;
+
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
-
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -3992,13 +4041,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
- context()->Plug(if_true, if_false);
- return;
- }
-
Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
@@ -4085,8 +4127,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
- Comment cmnt(masm_, "[ CompareToNull");
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@@ -4094,15 +4137,21 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- VisitForAccumulatorValue(expr->expression());
+ VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
+ Heap::RootListIndex nil_value = nil == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ LoadRoot(r1, nil_value);
__ cmp(r0, r1);
- if (expr->is_strict()) {
+ if (expr->op() == Token::EQ_STRICT) {
Split(eq, if_true, if_false, fall_through);
} else {
+ Heap::RootListIndex other_nil_value = nil == kNullValue ?
+ Heap::kUndefinedValueRootIndex :
+ Heap::kNullValueRootIndex;
__ b(eq, if_true);
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r1, other_nil_value);
__ cmp(r0, r1);
__ b(eq, if_true);
__ JumpIfSmi(r0, if_false);
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 2e49cae92..6e0badca1 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -208,7 +208,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update the write barrier. Make sure not to clobber the value.
__ mov(scratch1, value);
- __ RecordWrite(elements, scratch2, scratch1);
+ __ RecordWrite(
+ elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
}
@@ -504,21 +505,22 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Get the receiver of the function from the stack.
__ ldr(r3, MemOperand(sp, argc * kPointerSize));
- __ EnterInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the receiver and the name of the function.
- __ Push(r3, r2);
+ // Push the receiver and the name of the function.
+ __ Push(r3, r2);
- // Call the entry.
- __ mov(r0, Operand(2));
- __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
+ // Call the entry.
+ __ mov(r0, Operand(2));
+ __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
- CEntryStub stub(1);
- __ CallStub(&stub);
+ CEntryStub stub(1);
+ __ CallStub(&stub);
- // Move result to r1 and leave the internal frame.
- __ mov(r1, Operand(r0));
- __ LeaveInternalFrame();
+ // Move result to r1 and leave the internal frame.
+ __ mov(r1, Operand(r0));
+ }
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
@@ -650,12 +652,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
- __ EnterInternalFrame();
- __ push(r2); // save the key
- __ Push(r1, r2); // pass the receiver and the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(r2); // restore the key
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r2); // save the key
+ __ Push(r1, r2); // pass the receiver and the key
+ __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+ __ pop(r2); // restore the key
+ }
__ mov(r1, r0);
__ jmp(&do_call);
@@ -908,7 +911,8 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
__ str(r0, mapped_location);
__ add(r6, r3, r5);
- __ RecordWrite(r3, r6, r9);
+ __ mov(r9, r0);
+ __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in r3.
@@ -916,7 +920,8 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
__ str(r0, unmapped_location);
__ add(r6, r3, r4);
- __ RecordWrite(r3, r6, r9);
+ __ mov(r9, r0);
+ __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
@@ -1267,13 +1272,17 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
- Label slow, fast, array, extra;
+ Label slow, array, extra, check_if_double_array;
+ Label fast_object_with_map_check, fast_object_without_map_check;
+ Label fast_double_with_map_check, fast_double_without_map_check;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
Register elements = r3; // Elements array of the receiver.
+ Register elements_map = r6;
+ Register receiver_map = r7;
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
@@ -1281,35 +1290,26 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
- __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
- __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check if the object is a JS array or not.
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+ __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
__ cmp(r4, Operand(JS_ARRAY_TYPE));
__ b(eq, &array);
// Check that the object is some kind of JSObject.
- __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, &slow);
- __ cmp(r4, Operand(JS_PROXY_TYPE));
- __ b(eq, &slow);
- __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(eq, &slow);
// Object case: Check key against length in the elements array.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check that the object is in fast mode and writable.
- __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r4, ip);
- __ b(ne, &slow);
// Check array bounds. Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip));
- __ b(lo, &fast);
+ __ b(lo, &fast_object_with_map_check);
// Slow case, handle jump to runtime.
__ bind(&slow);
@@ -1330,21 +1330,31 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip));
__ b(hs, &slow);
+ __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ b(ne, &check_if_double_array);
// Calculate key + 1 as smi.
STATIC_ASSERT(kSmiTag == 0);
__ add(r4, key, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ b(&fast);
+ __ b(&fast_object_without_map_check);
+
+ __ bind(&check_if_double_array);
+ __ cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ b(ne, &slow);
+ // Add 1 to key, and go to common element store code for doubles.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ add(r4, key, Operand(Smi::FromInt(1)));
+ __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ jmp(&fast_double_without_map_check);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
// is the length is always a smi.
__ bind(&array);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r4, ip);
- __ b(ne, &slow);
// Check the key against the length in the array.
__ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1352,18 +1362,57 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ b(hs, &extra);
// Fall through to fast case.
- __ bind(&fast);
- // Fast case, store the value to the elements backing store.
- __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value, MemOperand(r5));
- // Skip write barrier if the written value is a smi.
- __ tst(value, Operand(kSmiTagMask));
- __ Ret(eq);
+ __ bind(&fast_object_with_map_check);
+ Register scratch_value = r4;
+ Register address = r5;
+ __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ b(ne, &fast_double_with_map_check);
+ __ bind(&fast_object_without_map_check);
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(value, &non_smi_value);
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(value, MemOperand(address));
+ __ Ret();
+
+ __ bind(&non_smi_value);
+ // Escape to slow case when writing non-smi into smi-only array.
+ __ CheckFastObjectElements(receiver_map, scratch_value, &slow);
+ // Fast elements array, store the value to the elements backing store.
+ __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(value, MemOperand(address));
// Update write barrier for the elements array address.
- __ sub(r4, r5, Operand(elements));
- __ RecordWrite(elements, Operand(r4), r5, r6);
+ __ mov(scratch_value, value); // Preserve the value which is returned.
+ __ RecordWrite(elements,
+ address,
+ scratch_value,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ Ret();
+ __ bind(&fast_double_with_map_check);
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ __ cmp(elements_map,
+ Operand(masm->isolate()->factory()->fixed_double_array_map()));
+ __ b(ne, &slow);
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(value,
+ key,
+ receiver,
+ elements,
+ r4,
+ r5,
+ r6,
+ r7,
+ &slow);
__ Ret();
}
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 30ccd05be..84959397b 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -212,10 +212,11 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
InputAt(0)->PrintTo(stream);
- stream->Add(is_strict() ? " === null" : " == null");
+ stream->Add(kind() == kStrictEquality ? " === " : " == ");
+ stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
@@ -711,7 +712,9 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- instr->set_environment(CreateEnvironment(hydrogen_env));
+ int argument_index_accumulator = 0;
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator));
return instr;
}
@@ -994,10 +997,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+ HEnvironment* hydrogen_env,
+ int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+ LEnvironment* outer =
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
@@ -1007,7 +1013,6 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
argument_count_,
value_count,
outer);
- int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1016,7 +1021,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument(argument_index++);
+ op = new LArgument((*argument_index_accumulator)++);
} else {
op = UseAny(value);
}
@@ -1444,9 +1449,9 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
}
-LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LIsNullAndBranch(UseRegisterAtStart(instr->value()));
+ return new LIsNilAndBranch(UseRegisterAtStart(instr->value()));
}
@@ -1734,7 +1739,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new LLoadGlobalCell;
- return instr->check_hole_value()
+ return instr->RequiresHoleCheck()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
@@ -1748,14 +1753,11 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- if (instr->check_hole_value()) {
- LOperand* temp = TempRegister();
- LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(new LStoreGlobalCell(value, temp));
- } else {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new LStoreGlobalCell(value, NULL);
- }
+ LOperand* temp = TempRegister();
+ LOperand* value = UseTempRegister(instr->value());
+ LInstruction* result = new LStoreGlobalCell(value, temp);
+ if (instr->RequiresHoleCheck()) result = AssignEnvironment(result);
+ return result;
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 8c18760fd..73c7e459c 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -107,7 +107,7 @@ class LCodeGen;
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsNullAndBranch) \
+ V(IsNilAndBranch) \
V(IsObjectAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -627,16 +627,17 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
};
-class LIsNullAndBranch: public LControlInstruction<1, 0> {
+class LIsNilAndBranch: public LControlInstruction<1, 0> {
public:
- explicit LIsNullAndBranch(LOperand* value) {
+ explicit LIsNilAndBranch(LOperand* value) {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
+ DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
- bool is_strict() const { return hydrogen()->is_strict(); }
+ EqualityKind kind() const { return hydrogen()->kind(); }
+ NilValue nil() const { return hydrogen()->nil(); }
virtual void PrintDataTo(StringStream* stream);
};
@@ -2159,7 +2160,8 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+ int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index f5d744914..70ef88481 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -82,6 +82,14 @@ bool LCodeGen::GenerateCode() {
status_ = GENERATING;
CpuFeatures::Scope scope1(VFP3);
CpuFeatures::Scope scope2(ARMv7);
+
+ CodeStub::GenerateFPStubs();
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // NONE indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in GeneratePrologue).
+ FrameScope frame_scope(masm_, StackFrame::NONE);
+
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -206,13 +214,11 @@ bool LCodeGen::GeneratePrologue() {
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
- __ mov(r1, Operand(Context::SlotOffset(var->index())));
- __ str(r0, MemOperand(cp, r1));
- // Update the write barrier. This clobbers all involved
- // registers, so we have to use two more registers to avoid
- // clobbering cp.
- __ mov(r2, Operand(cp));
- __ RecordWrite(r2, Operand(r1), r3, r0);
+ MemOperand target = ContextOperand(cp, var->index());
+ __ str(r0, target);
+ // Update the write barrier. This clobbers r3 and r0.
+ __ RecordWriteContextSlot(
+ cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
}
}
Comment(";;; End allocate local context");
@@ -262,6 +268,9 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ Comment(";;; Deferred code @%d: %s.",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
code->Generate();
__ jmp(code->exit());
}
@@ -739,7 +748,7 @@ void LCodeGen::RecordSafepoint(
int deoptimization_index) {
ASSERT(expected_safepoint_kind_ == kind);
- const ZoneList<LOperand*>* operands = pointers->operands();
+ const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
@@ -1032,6 +1041,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
virtual void Generate() {
codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
}
+ virtual LInstruction* instr() { return instr_; }
private:
LDivI* instr_;
};
@@ -1743,25 +1753,35 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
Register scratch = scratch0();
Register reg = ToRegister(instr->InputAt(0));
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
- // TODO(fsc): If the expression is known to be a smi, then it's
- // definitely not null. Jump to the false block.
+ // If the expression is known to be untagged or a smi, then it's definitely
+ // not null, and it can't be a an undetectable object.
+ if (instr->hydrogen()->representation().IsSpecialization() ||
+ instr->hydrogen()->type().IsSmi()) {
+ EmitGoto(false_block);
+ return;
+ }
int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
+ Heap::kNullValueRootIndex :
+ Heap::kUndefinedValueRootIndex;
+ __ LoadRoot(ip, nil_value);
__ cmp(reg, ip);
- if (instr->is_strict()) {
+ if (instr->kind() == kStrictEquality) {
EmitBranch(true_block, false_block, eq);
} else {
+ Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
+ Heap::kUndefinedValueRootIndex :
+ Heap::kNullValueRootIndex;
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ b(eq, true_label);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(ip, other_nil_value);
__ cmp(reg, ip);
__ b(eq, true_label);
__ JumpIfSmi(reg, false_label);
@@ -1918,28 +1938,36 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
__ JumpIfSmi(input, is_false);
- __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, is_false);
- // Map is now in temp.
- // Functions have class 'Function'.
- __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ b(ge, is_true);
+ // Assuming the following assertions, we can use the same compares to test
+ // for both being a function type and being in the object type range.
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ FIRST_SPEC_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_SPEC_OBJECT_TYPE - 1);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
+ __ b(lt, is_false);
+ __ b(eq, is_true);
+ __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+ __ b(eq, is_true);
} else {
- __ b(ge, is_false);
+ // Faster code path to avoid two compares: subtract lower bound from the
+ // actual type and do a signed compare with the width of the type range.
+ __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
+ __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ b(gt, is_false);
}
+ // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
- // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and
- // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
- // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
- STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -2016,9 +2044,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
virtual void Generate() {
codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
}
-
+ virtual LInstruction* instr() { return instr_; }
Label* map_check() { return &map_check_; }
-
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
@@ -2180,7 +2207,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
__ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
- if (instr->hydrogen()->check_hole_value()) {
+ if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
DeoptimizeIf(eq, instr->environment());
@@ -2203,6 +2230,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
+ Register scratch2 = ToRegister(instr->TempAt(0));
// Load the cell.
__ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
@@ -2211,8 +2239,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted.
- if (instr->hydrogen()->check_hole_value()) {
- Register scratch2 = ToRegister(instr->TempAt(0));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch2,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -2222,6 +2249,15 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// Store the value.
__ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+
+ // Cells are always in the remembered set.
+ __ RecordWriteField(scratch,
+ JSGlobalPropertyCell::kValueOffset,
+ value,
+ scratch2,
+ kLRHasBeenSaved,
+ kSaveFPRegs,
+ OMIT_REMEMBERED_SET);
}
@@ -2247,10 +2283,15 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
- __ str(value, ContextOperand(context, instr->slot_index()));
+ MemOperand target = ContextOperand(context, instr->slot_index());
+ __ str(value, target);
if (instr->needs_write_barrier()) {
- int offset = Context::SlotOffset(instr->slot_index());
- __ RecordWrite(context, Operand(offset), value, scratch0());
+ __ RecordWriteContextSlot(context,
+ target.offset(),
+ value,
+ scratch0(),
+ kLRHasBeenSaved,
+ kSaveFPRegs);
}
}
@@ -2500,13 +2541,9 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
}
- if (instr->hydrogen()->RequiresHoleCheck()) {
- // TODO(danno): If no hole check is required, there is no need to allocate
- // elements into a temporary register, instead scratch can be used.
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
- }
+ __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
__ vldr(result, elements, 0);
}
@@ -2577,6 +2614,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -2906,6 +2944,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
+ virtual LInstruction* instr() { return instr_; }
private:
LUnaryMathOperation* instr_;
};
@@ -3202,7 +3241,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
+ CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Drop(1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3262,7 +3301,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ str(value, FieldMemOperand(object, offset));
if (instr->needs_write_barrier()) {
// Update the write barrier for the object for in-object properties.
- __ RecordWrite(object, Operand(offset), value, scratch);
+ __ RecordWriteField(
+ object, offset, value, scratch, kLRHasBeenSaved, kSaveFPRegs);
}
} else {
__ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
@@ -3270,7 +3310,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->needs_write_barrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
- __ RecordWrite(scratch, Operand(offset), value, object);
+ __ RecordWriteField(
+ scratch, offset, value, object, kLRHasBeenSaved, kSaveFPRegs);
}
}
}
@@ -3301,6 +3342,13 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Register scratch = scratch0();
+ // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+ // conversion, so it deopts in that case.
+ if (instr->hydrogen()->ValueNeedsSmiCheck()) {
+ __ tst(value, Operand(kSmiTagMask));
+ DeoptimizeIf(ne, instr->environment());
+ }
+
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3315,8 +3363,8 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Compute address of modified element and store it into key register.
- __ add(key, scratch, Operand(FixedArray::kHeaderSize));
- __ RecordWrite(elements, key, value);
+ __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ RecordWrite(elements, key, value, kLRHasBeenSaved, kSaveFPRegs);
}
}
@@ -3417,6 +3465,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@@ -3452,6 +3501,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@@ -3575,6 +3625,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStringCharFromCode* instr_;
};
@@ -3646,6 +3697,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
};
@@ -3711,6 +3763,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LNumberTagD* instr_;
};
@@ -3819,16 +3872,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
-class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
- LTaggedToI* instr_;
-};
-
-
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(instr->InputAt(0));
Register scratch1 = scratch0();
@@ -3911,6 +3954,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+ class DeferredTaggedToI: public LDeferredCode {
+ public:
+ DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LTaggedToI* instr_;
+ };
+
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@@ -4343,10 +4396,12 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = ne;
} else if (type_name->Equals(heap()->function_symbol())) {
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, input, scratch,
- FIRST_CALLABLE_SPEC_OBJECT_TYPE);
- final_branch_condition = ge;
+ __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
+ __ b(eq, true_label);
+ __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
+ final_branch_condition = eq;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
@@ -4468,6 +4523,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ virtual LInstruction* instr() { return instr_; }
private:
LStackCheck* instr_;
};
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index ead848903..711e4595e 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -376,16 +376,20 @@ class LCodeGen BASE_EMBEDDED {
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen), external_exit_(NULL) {
+ : codegen_(codegen),
+ external_exit_(NULL),
+ instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
+ virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+ int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@@ -396,6 +400,7 @@ class LDeferredCode: public ZoneObject {
Label entry_;
Label exit_;
Label* external_exit_;
+ int instruction_index_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index f37f31021..918f9ebe0 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -42,7 +42,8 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true) {
+ allow_stub_calls_(true),
+ has_frame_(false) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
@@ -406,32 +407,6 @@ void MacroAssembler::StoreRoot(Register source,
}
-void MacroAssembler::RecordWriteHelper(Register object,
- Register address,
- Register scratch) {
- if (emit_debug_code()) {
- // Check that the object is not in new space.
- Label not_in_new_space;
- InNewSpace(object, scratch, ne, &not_in_new_space);
- Abort("new-space object passed to RecordWriteHelper");
- bind(&not_in_new_space);
- }
-
- // Calculate page address.
- Bfc(object, 0, kPageSizeBits);
-
- // Calculate region number.
- Ubfx(address, address, Page::kRegionSizeLog2,
- kPageSizeBits - Page::kRegionSizeLog2);
-
- // Mark region dirty.
- ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
- mov(ip, Operand(1));
- orr(scratch, scratch, Operand(ip, LSL, address));
- str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
-}
-
-
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cond,
@@ -443,38 +418,52 @@ void MacroAssembler::InNewSpace(Register object,
}
-// Will clobber 4 registers: object, offset, scratch, ip. The
-// register 'object' contains a heap object pointer. The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Operand offset,
- Register scratch0,
- Register scratch1) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
-
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
Label done;
- // First, test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- InNewSpace(object, scratch0, eq, &done);
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
- // Add offset into the object.
- add(scratch0, object, offset);
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
- // Record the actual write.
- RecordWriteHelper(object, scratch0, scratch1);
+ add(dst, object, Operand(offset - kHeapObjectTag));
+ if (emit_debug_code()) {
+ Label ok;
+ tst(dst, Operand((1 << kPointerSizeLog2) - 1));
+ b(eq, &ok);
+ stop("Unaligned cell in write barrier");
+ bind(&ok);
+ }
+
+ RecordWrite(object,
+ dst,
+ value,
+ lr_status,
+ save_fp,
+ remembered_set_action,
+ OMIT_SMI_CHECK);
bind(&done);
- // Clobber all input registers when running with the debug-code flag
+ // Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(object, Operand(BitCast<int32_t>(kZapValue)));
- mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
- mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+ mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
+ mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
}
}
@@ -484,29 +473,94 @@ void MacroAssembler::RecordWrite(Register object,
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object,
Register address,
- Register scratch) {
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
- ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+ ASSERT(!address.is(cp) && !value.is(cp));
Label done;
- // First, test that the object is not in the new space. We cannot set
- // region marks for new space pages.
- InNewSpace(object, scratch, eq, &done);
+ if (smi_check == INLINE_SMI_CHECK) {
+ ASSERT_EQ(0, kSmiTag);
+ tst(value, Operand(kSmiTagMask));
+ b(eq, &done);
+ }
+
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ eq,
+ &done);
+ CheckPageFlag(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ eq,
+ &done);
// Record the actual write.
- RecordWriteHelper(object, address, scratch);
+ if (lr_status == kLRHasNotBeenSaved) {
+ push(lr);
+ }
+ RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ pop(lr);
+ }
bind(&done);
- // Clobber all input registers when running with the debug-code flag
+ // Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- mov(object, Operand(BitCast<int32_t>(kZapValue)));
- mov(address, Operand(BitCast<int32_t>(kZapValue)));
- mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
+ mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
+ mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+ }
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
+ Register address,
+ Register scratch,
+ SaveFPRegsMode fp_mode,
+ RememberedSetFinalAction and_then) {
+ Label done;
+ if (FLAG_debug_code) {
+ Label ok;
+ JumpIfNotInNewSpace(object, scratch, &ok);
+ stop("Remembered set pointer is in new space");
+ bind(&ok);
+ }
+ // Load store buffer top.
+ ExternalReference store_buffer =
+ ExternalReference::store_buffer_top(isolate());
+ mov(ip, Operand(store_buffer));
+ ldr(scratch, MemOperand(ip));
+ // Store pointer to buffer and increment buffer top.
+ str(address, MemOperand(scratch, kPointerSize, PostIndex));
+ // Write back new top of buffer.
+ str(scratch, MemOperand(ip));
+ // Call stub on end of buffer.
+ // Check for end of buffer.
+ tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+ if (and_then == kFallThroughAtEnd) {
+ b(eq, &done);
+ } else {
+ ASSERT(and_then == kReturnAtEnd);
+ Ret(eq);
+ }
+ push(lr);
+ StoreBufferOverflowStub store_buffer_overflow =
+ StoreBufferOverflowStub(fp_mode);
+ CallStub(&store_buffer_overflow);
+ pop(lr);
+ bind(&done);
+ if (and_then == kReturnAtEnd) {
+ Ret();
}
}
@@ -961,6 +1015,9 @@ void MacroAssembler::InvokeCode(Register code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
@@ -988,6 +1045,9 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
RelocInfo::Mode rmode,
InvokeFlag flag,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
InvokePrologue(expected, actual, code, no_reg, &done, flag,
@@ -1011,6 +1071,9 @@ void MacroAssembler::InvokeFunction(Register fun,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
// Contract with called JS functions requires that function is passed in r1.
ASSERT(fun.is(r1));
@@ -1035,6 +1098,9 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
ASSERT(function->is_compiled());
// Get the function and setup the context.
@@ -1090,10 +1156,10 @@ void MacroAssembler::IsObjectJSStringType(Register object,
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- ASSERT(allow_stub_calls());
mov(r0, Operand(0, RelocInfo::NONE));
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
+ ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
#endif
@@ -1793,13 +1859,127 @@ void MacroAssembler::CompareRoot(Register obj,
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
- STATIC_ASSERT(FAST_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
+ ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ b(ls, fail);
+ cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
+ b(hi, fail);
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ b(hi, fail);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register receiver_reg,
+ Register elements_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* fail) {
+ Label smi_value, maybe_nan, have_double_value, is_nan, done;
+ Register mantissa_reg = scratch2;
+ Register exponent_reg = scratch3;
+
+ // Handle smi values specially.
+ JumpIfSmi(value_reg, &smi_value);
+
+ // Ensure that the object is a heap number
+ CheckMap(value_reg,
+ scratch1,
+ isolate()->factory()->heap_number_map(),
+ fail,
+ DONT_DO_SMI_CHECK);
+
+ // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+ // in the exponent.
+ mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
+ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+ cmp(exponent_reg, scratch1);
+ b(ge, &maybe_nan);
+
+ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+ bind(&have_double_value);
+ add(scratch1, elements_reg,
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+ str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ str(exponent_reg, FieldMemOperand(scratch1, offset));
+ jmp(&done);
+
+ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ b(gt, &is_nan);
+ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+ cmp(mantissa_reg, Operand(0));
+ b(eq, &have_double_value);
+ bind(&is_nan);
+ // Load canonical NaN for storing into the double array.
+ uint64_t nan_int64 = BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+ jmp(&have_double_value);
+
+ bind(&smi_value);
+ add(scratch1, elements_reg,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ add(scratch1, scratch1,
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+ // scratch1 is now effective address of the double element
+
+ FloatingPointHelper::Destination destination;
+ if (CpuFeatures::IsSupported(VFP3)) {
+ destination = FloatingPointHelper::kVFPRegisters;
+ } else {
+ destination = FloatingPointHelper::kCoreRegisters;
+ }
+
+ Register untagged_value = receiver_reg;
+ SmiUntag(untagged_value, value_reg);
+ FloatingPointHelper::ConvertIntToDouble(this,
+ untagged_value,
+ destination,
+ d0,
+ mantissa_reg,
+ exponent_reg,
+ scratch4,
+ s2);
+ if (destination == FloatingPointHelper::kVFPRegisters) {
+ CpuFeatures::Scope scope(VFP3);
+ vstr(d0, scratch1, 0);
+ } else {
+ str(mantissa_reg, MemOperand(scratch1, 0));
+ str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
+ }
+ bind(&done);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
@@ -1895,13 +2075,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
}
MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -1913,13 +2093,12 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -2022,6 +2201,12 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
}
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+ return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+}
+
+
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
add(sp, sp, Operand(num_arguments * kPointerSize));
@@ -2417,8 +2602,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function, isolate())));
- CEntryStub stub(1);
- stub.SaveDoubles();
+ CEntryStub stub(1, kSaveFPRegs);
CallStub(&stub);
}
@@ -2491,6 +2675,9 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference(
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
GetBuiltinEntry(r2, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(r2));
@@ -2622,14 +2809,20 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg);
}
#endif
- // Disable stub call restrictions to always allow calls to abort.
- AllowStubCallsScope allow_scope(this, true);
mov(r0, Operand(p0));
push(r0);
mov(r0, Operand(Smi::FromInt(p1 - p0)));
push(r0);
- CallRuntime(Runtime::kAbort, 2);
+ // Disable stub call restrictions to always allow calls to abort.
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 2);
+ } else {
+ CallRuntime(Runtime::kAbort, 2);
+ }
// will not return here
if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of
@@ -2930,6 +3123,19 @@ void MacroAssembler::CopyBytes(Register src,
}
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler) {
+ Label loop, entry;
+ b(&entry);
+ bind(&loop);
+ str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
+ bind(&entry);
+ cmp(start_offset, end_offset);
+ b(lt, &loop);
+}
+
+
void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
Register source, // Input.
Register scratch) {
@@ -3089,23 +3295,15 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
- CallCFunctionHelper(no_reg,
- function,
- ip,
- num_reg_arguments,
- num_double_arguments);
+ mov(ip, Operand(function));
+ CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
}
void MacroAssembler::CallCFunction(Register function,
- Register scratch,
- int num_reg_arguments,
- int num_double_arguments) {
- CallCFunctionHelper(function,
- ExternalReference::the_hole_value_location(isolate()),
- scratch,
- num_reg_arguments,
- num_double_arguments);
+ int num_reg_arguments,
+ int num_double_arguments) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
}
@@ -3116,17 +3314,15 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function,
- Register scratch,
int num_arguments) {
- CallCFunction(function, scratch, num_arguments, 0);
+ CallCFunction(function, num_arguments, 0);
}
void MacroAssembler::CallCFunctionHelper(Register function,
- ExternalReference function_reference,
- Register scratch,
int num_reg_arguments,
int num_double_arguments) {
+ ASSERT(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
@@ -3150,10 +3346,6 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
- if (function.is(no_reg)) {
- mov(scratch, Operand(function_reference));
- function = scratch;
- }
Call(function);
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
@@ -3185,6 +3377,185 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
}
+void MacroAssembler::CheckPageFlag(
+ Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met) {
+ and_(scratch, object, Operand(~Page::kPageAlignmentMask));
+ ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ tst(scratch, Operand(mask));
+ b(cc, condition_met);
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black) {
+ HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* has_color,
+ int first_bit,
+ int second_bit) {
+ ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
+
+ GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+ Label other_color, word_boundary;
+ ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ tst(ip, Operand(mask_scratch));
+ b(first_bit == 1 ? eq : ne, &other_color);
+ // Shift left 1 by adding.
+ add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
+ b(eq, &word_boundary);
+ tst(ip, Operand(mask_scratch));
+ b(second_bit == 1 ? ne : eq, has_color);
+ jmp(&other_color);
+
+ bind(&word_boundary);
+ ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
+ tst(ip, Operand(1));
+ b(second_bit == 1 ? ne : eq, has_color);
+ bind(&other_color);
+}
+
+
+// Detect some, but not all, common pointer-free objects. This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object) {
+ Label is_data_object;
+ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ b(eq, &is_data_object);
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
+ b(ne, not_data_object);
+ bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg) {
+ ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+ and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
+ Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+ const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+ Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
+ add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
+ mov(ip, Operand(1));
+ mov(mask_reg, Operand(ip, LSL, mask_reg));
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Register load_scratch,
+ Label* value_is_white_and_not_data) {
+ ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
+ GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label done;
+
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ tst(mask_scratch, load_scratch);
+ b(ne, &done);
+
+ if (FLAG_debug_code) {
+ // Check for impossible bit pattern.
+ Label ok;
+ // LSL may overflow, making the check conservative.
+ tst(load_scratch, Operand(mask_scratch, LSL, 1));
+ b(eq, &ok);
+ stop("Impossible marking bit pattern");
+ bind(&ok);
+ }
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ // Currently only checks for HeapNumber and non-cons strings.
+ Register map = load_scratch; // Holds map while checking type.
+ Register length = load_scratch; // Holds length of object after testing type.
+ Label is_data_object;
+
+ // Check for heap-number
+ ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
+ b(eq, &is_data_object);
+
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = load_scratch;
+ ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
+ b(ne, value_is_white_and_not_data);
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ tst(instance_type, Operand(kExternalStringTag));
+ mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
+ b(ne, &is_data_object);
+
+ // Sequential string, either ASCII or UC16.
+ // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+ // getting the length multiplied by 2.
+ ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ ldr(ip, FieldMemOperand(value, String::kLengthOffset));
+ tst(instance_type, Operand(kStringEncodingMask));
+ mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
+ add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
+ and_(length, length, Operand(~kObjectAlignmentMask));
+
+ bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ orr(ip, ip, Operand(mask_scratch));
+ str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+ and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
+ ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ add(ip, ip, Operand(length));
+ str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+ bind(&done);
+}
+
+
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
Usat(output_reg, 8, Operand(input_reg));
}
@@ -3234,6 +3605,17 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
}
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+ if (r1.is(r2)) return true;
+ if (r1.is(r3)) return true;
+ if (r1.is(r4)) return true;
+ if (r2.is(r3)) return true;
+ if (r2.is(r4)) return true;
+ if (r3.is(r4)) return true;
+ return false;
+}
+
+
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 6084fde2d..8ee468a91 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -29,6 +29,7 @@
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "assembler.h"
+#include "frames.h"
#include "v8globals.h"
namespace v8 {
@@ -79,6 +80,14 @@ enum ObjectToDoubleFlags {
};
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -157,40 +166,126 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond = al);
+ // ---------------------------------------------------------------------------
+ // GC Support
+
+ void IncrementalMarkingRecordWriteHelper(Register object,
+ Register value,
+ Register address);
+
+ enum RememberedSetFinalAction {
+ kReturnAtEnd,
+ kFallThroughAtEnd
+ };
+
+ // Record in the remembered set the fact that we have a pointer to new space
+ // at the address pointed to by the addr register. Only works if addr is not
+ // in new space.
+ void RememberedSetHelper(Register object, // Used for debug code.
+ Register addr,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then);
+
+ void CheckPageFlag(Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met);
+
+ // Check if object is in new space. Jumps if the object is not in new space.
+ // The register scratch can be object itself, but scratch will be clobbered.
+ void JumpIfNotInNewSpace(Register object,
+ Register scratch,
+ Label* branch) {
+ InNewSpace(object, scratch, ne, branch);
+ }
- // Check if object is in new space.
- // scratch can be object itself, but it will be clobbered.
- void InNewSpace(Register object,
- Register scratch,
- Condition cond, // eq for new space, ne otherwise
- Label* branch);
-
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfInNewSpace(Register object,
+ Register scratch,
+ Label* branch) {
+ InNewSpace(object, scratch, eq, branch);
+ }
- // For the page containing |object| mark the region covering [address]
- // dirty. The object address must be in the first 8K of an allocated page.
- void RecordWriteHelper(Register object,
- Register address,
- Register scratch);
+ // Check if an object has a given incremental marking color.
+ void HasColor(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* has_color,
+ int first_bit,
+ int second_bit);
- // For the page containing |object| mark the region covering
- // [object+offset] dirty. The object address must be in the first 8K
- // of an allocated page. The 'scratch' registers are used in the
- // implementation and all 3 registers are clobbered by the
- // operation, as well as the ip register. RecordWrite updates the
- // write barrier even when storing smis.
- void RecordWrite(Register object,
- Operand offset,
+ void JumpIfBlack(Register object,
Register scratch0,
- Register scratch1);
+ Register scratch1,
+ Label* on_black);
+
+ // Checks the color of an object. If the object is already grey or black
+ // then we just fall through, since it is already live. If it is white and
+ // we can determine that it doesn't need to be scanned, then we just mark it
+ // black and fall through. For the rest we jump to the label so the
+ // incremental marker can fix its assumptions.
+ void EnsureNotWhite(Register object,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* object_is_white_and_not_data);
- // For the page containing |object| mark the region covering
- // [address] dirty. The object address must be in the first 8K of an
- // allocated page. All 3 registers are clobbered by the operation,
- // as well as the ip register. RecordWrite updates the write barrier
- // even when storing smis.
- void RecordWrite(Register object,
- Register address,
- Register scratch);
+ // Detects conservatively whether an object is data-only, ie it does need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object);
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored. value and scratch registers are clobbered by the operation.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
+
+ // As above, but the offset has the tag presubtracted. For use with
+ // MemOperand(reg, off).
+ inline void RecordWriteContextSlot(
+ Register context,
+ int offset,
+ Register value,
+ Register scratch,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK) {
+ RecordWriteField(context,
+ offset + kHeapObjectTag,
+ value,
+ scratch,
+ lr_status,
+ save_fp,
+ remembered_set_action,
+ smi_check);
+ }
+
+ // For a given |object| notify the garbage collector that the slot |address|
+ // has been written. |value| is the object being stored. The value and
+ // address registers are clobbered by the operation.
+ void RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+ SmiCheck smi_check = INLINE_SMI_CHECK);
// Push a handle.
void Push(Handle<Object> handle);
@@ -318,16 +413,6 @@ class MacroAssembler: public Assembler {
const double imm,
const Condition cond = al);
-
- // ---------------------------------------------------------------------------
- // Activation frames
-
- void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
- void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
- void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
- void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0);
@@ -569,6 +654,13 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
+ // Initialize fields with filler values. Fields starting at |start_offset|
+ // not including end_offset are overwritten with the value in |filler|. At
+ // the end the loop, |start_offset| takes the value of |end_offset|.
+ void InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler);
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -608,6 +700,31 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
+ // Check if a map for a JSObject indicates that the object can have both smi
+ // and HeapObject elements. Jump to the specified label if it does not.
+ void CheckFastObjectElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check if a map for a JSObject indicates that the object has fast smi only
+ // elements. Jump to the specified label if it does not.
+ void CheckFastSmiOnlyElements(Register map,
+ Register scratch,
+ Label* fail);
+
+ // Check to see if maybe_number can be stored as a double in
+ // FastDoubleElements. If it can, store it at the index specified by key in
+ // the FastDoubleElements array elements, otherwise jump to fail.
+ void StoreNumberToDoubleElements(Register value_reg,
+ Register key_reg,
+ Register receiver_reg,
+ Register elements_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* fail);
+
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
@@ -830,11 +947,11 @@ class MacroAssembler: public Assembler {
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, Register scratch, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
void CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments);
- void CallCFunction(Register function, Register scratch,
+ void CallCFunction(Register function,
int num_reg_arguments,
int num_double_arguments);
@@ -902,6 +1019,9 @@ class MacroAssembler: public Assembler {
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
+ void set_has_frame(bool value) { has_frame_ = value; }
+ bool has_frame() { return has_frame_; }
+ inline bool AllowThisStubCall(CodeStub* stub);
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
@@ -1048,10 +1168,12 @@ class MacroAssembler: public Assembler {
void LoadInstanceDescriptors(Register map, Register descriptors);
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+
private:
void CallCFunctionHelper(Register function,
- ExternalReference function_reference,
- Register scratch,
int num_reg_arguments,
int num_double_arguments);
@@ -1067,16 +1189,25 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
void InitializeNewString(Register string,
Register length,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2);
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cond, // eq for new space, ne otherwise.
+ Label* branch);
+
+ // Helper for finding the mark bits for an address. Afterwards, the
+ // bitmap register points at the word with the mark bits and the mask
+ // the position of the first bit. Leaves addr_reg unchanged.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg);
+
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
@@ -1084,6 +1215,7 @@ class MacroAssembler: public Assembler {
bool generating_stub_;
bool allow_stub_calls_;
+ bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index cd76edbf1..c87646793 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -371,9 +371,12 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// Isolate.
__ mov(r3, Operand(ExternalReference::isolate_address()));
- ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
- __ CallCFunction(function, argument_count);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ __ CallCFunction(function, argument_count);
+ }
// Check if function returned non-zero for success or zero for failure.
__ cmp(r0, Operand(0, RelocInfo::NONE));
@@ -611,6 +614,12 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Entry code:
__ bind(&entry_label_);
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL, no
+ // is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Actually emit code to start a new stack frame.
// Push arguments
// Save callee-save registers.
// Start new stack frame.
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 6af535553..570420262 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1618,6 +1618,8 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
+ // Catch null pointers a little earlier.
+ ASSERT(start_address > 8191 || start_address < 0);
int reg = 0;
while (rlist != 0) {
if ((rlist & 1) != 0) {
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index f8565924b..4558afe68 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -431,7 +431,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
- __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
+ __ mov(name_reg, r0);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -444,7 +450,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
- __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
+ __ mov(name_reg, r0);
+ __ RecordWriteField(scratch,
+ offset,
+ name_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
}
// Return the value (register r0).
@@ -553,9 +565,10 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
}
-static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
+static MaybeObject* GenerateFastApiDirectCall(
+ MacroAssembler* masm,
+ const CallOptimization& optimization,
+ int argc) {
// ----------- S t a t e -------------
// -- sp[0] : holder (set by CheckPrototypes)
// -- sp[4] : callee js function
@@ -591,6 +604,8 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
ApiFunction fun(api_function_address);
const int kApiStackSpace = 4;
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// r0 = v8::Arguments&
@@ -616,9 +631,11 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
ExternalReference ref = ExternalReference(&fun,
ExternalReference::DIRECT_API_CALL,
masm->isolate());
+ AllowExternalCallThatCantCauseGC scope(masm);
return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
+
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -794,7 +811,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
miss_label);
// Call a runtime function to load the interceptor property.
- __ EnterInternalFrame();
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
@@ -811,7 +828,8 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Restore the name_ register.
__ pop(name_);
- __ LeaveInternalFrame();
+
+ // Leave the internal frame.
}
void LoadWithInterceptor(MacroAssembler* masm,
@@ -820,18 +838,19 @@ class CallInterceptorCompiler BASE_EMBEDDED {
JSObject* holder_obj,
Register scratch,
Label* interceptor_succeeded) {
- __ EnterInternalFrame();
- __ Push(holder, name_);
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- __ LeaveInternalFrame();
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(holder, name_);
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
+ }
// If interceptor returns no-result sentinel, call the constant function.
__ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
@@ -1228,7 +1247,10 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
ApiFunction fun(getter_address);
const int kApiStackSpace = 1;
+
+ FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
+
// Create AccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object **args_) as the data.
__ str(scratch2, MemOperand(sp, 1 * kPointerSize));
@@ -1288,41 +1310,43 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
- __ EnterInternalFrame();
+ {
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- // CALLBACKS case needs a receiver to be passed into C++ callback.
- __ Push(receiver, holder_reg, name_reg);
- } else {
- __ Push(holder_reg, name_reg);
- }
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ Push(receiver, holder_reg, name_reg);
+ } else {
+ __ Push(holder_reg, name_reg);
+ }
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
- holder_reg,
- name_reg,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch1);
- __ b(eq, &interceptor_failed);
- __ LeaveInternalFrame();
- __ Ret();
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch1);
+ __ b(eq, &interceptor_failed);
+ frame_scope.GenerateLeaveFrame();
+ __ Ret();
- __ bind(&interceptor_failed);
- __ pop(name_reg);
- __ pop(holder_reg);
- if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
- __ pop(receiver);
- }
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
+ }
- __ LeaveInternalFrame();
+ // Leave the internal frame.
+ }
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
@@ -1556,7 +1580,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
DONT_DO_SMI_CHECK);
if (argc == 1) { // Otherwise fall through to call the builtin.
- Label exit, with_write_barrier, attempt_to_grow_elements;
+ Label attempt_to_grow_elements;
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1571,11 +1595,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ cmp(r0, r4);
__ b(gt, &attempt_to_grow_elements);
+ // Check if value is a smi.
+ Label with_write_barrier;
+ __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ JumpIfNotSmi(r4, &with_write_barrier);
+
// Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Push the element.
- __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements,
@@ -1585,14 +1613,31 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
// Check for a smi.
- __ JumpIfNotSmi(r4, &with_write_barrier);
- __ bind(&exit);
__ Drop(argc + 1);
__ Ret();
__ bind(&with_write_barrier);
- __ InNewSpace(elements, r4, eq, &exit);
- __ RecordWriteHelper(elements, end_elements, r4);
+
+ __ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ CheckFastSmiOnlyElements(r6, r6, &call_builtin);
+
+ // Save new length.
+ __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Push the element.
+ // We may need a register containing the address end_elements below,
+ // so write back the value in end_elements.
+ __ add(end_elements, elements,
+ Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
+
+ __ RecordWrite(elements,
+ end_elements,
+ r4,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
__ Drop(argc + 1);
__ Ret();
@@ -1604,6 +1649,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ b(&call_builtin);
}
+ __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
+ // Growing elements that are SMI-only requires special handling in case
+ // the new element is non-Smi. For now, delegate to the builtin.
+ Label no_fast_elements_check;
+ __ JumpIfSmi(r2, &no_fast_elements_check);
+ __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(r7, r7, &call_builtin);
+ __ bind(&no_fast_elements_check);
+
Isolate* isolate = masm()->isolate();
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate);
@@ -1630,8 +1684,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Update new_space_allocation_top.
__ str(r6, MemOperand(r7));
// Push the argument.
- __ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize));
- __ str(r6, MemOperand(end_elements));
+ __ str(r2, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
@@ -2713,6 +2766,15 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Store the value in the cell.
__ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
+ __ mov(r1, r0);
+ __ RecordWriteField(r4,
+ JSGlobalPropertyCell::kValueOffset,
+ r1,
+ r2,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET);
+
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
__ Ret();
@@ -3116,7 +3178,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+MaybeObject* KeyedLoadStubCompiler::CompileLoadPolymorphic(
MapList* receiver_maps,
CodeList* handler_ics) {
// ----------- S t a t e -------------
@@ -3212,9 +3274,10 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic(
MapList* receiver_maps,
- CodeList* handler_ics) {
+ CodeList* handler_stubs,
+ MapList* transitioned_maps) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -3227,12 +3290,20 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
int receiver_count = receiver_maps->length();
__ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map(receiver_maps->at(current));
- Handle<Code> code(handler_ics->at(current));
+ for (int i = 0; i < receiver_count; ++i) {
+ Handle<Map> map(receiver_maps->at(i));
+ Handle<Code> code(handler_stubs->at(i));
__ mov(ip, Operand(map));
__ cmp(r3, ip);
- __ Jump(code, RelocInfo::CODE_TARGET, eq);
+ if (transitioned_maps->at(i) == NULL) {
+ __ Jump(code, RelocInfo::CODE_TARGET, eq);
+ } else {
+ Label next_map;
+ __ b(eq, &next_map);
+ __ mov(r4, Operand(Handle<Map>(transitioned_maps->at(i))));
+ __ Jump(code, RelocInfo::CODE_TARGET, al);
+ __ bind(&next_map);
+ }
}
__ bind(&miss);
@@ -3454,6 +3525,7 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3540,6 +3612,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3880,6 +3953,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3943,6 +4017,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4082,6 +4157,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
+ case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4234,8 +4310,10 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
}
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
- bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+ MacroAssembler* masm,
+ bool is_js_array,
+ ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -4244,7 +4322,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
// -- r3 : scratch
// -- r4 : scratch (elements)
// -----------------------------------
- Label miss_force_generic;
+ Label miss_force_generic, transition_elements_kind;
Register value_reg = r0;
Register key_reg = r1;
@@ -4277,15 +4355,33 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
__ cmp(key_reg, scratch);
__ b(hs, &miss_force_generic);
- __ add(scratch,
- elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ str(value_reg,
- MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ RecordWrite(scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
- receiver_reg , elements_reg);
-
+ if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+ __ JumpIfNotSmi(value_reg, &transition_elements_kind);
+ __ add(scratch,
+ elements_reg,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ add(scratch,
+ scratch,
+ Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(value_reg, MemOperand(scratch));
+ } else {
+ ASSERT(elements_kind == FAST_ELEMENTS);
+ __ add(scratch,
+ elements_reg,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ add(scratch,
+ scratch,
+ Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ str(value_reg, MemOperand(scratch));
+ __ mov(receiver_reg, value_reg);
+ __ RecordWrite(elements_reg, // Object.
+ scratch, // Address.
+ receiver_reg, // Value.
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
// value_reg (r0) is preserved.
// Done.
__ Ret();
@@ -4294,6 +4390,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&transition_elements_kind);
+ Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(ic_miss, RelocInfo::CODE_TARGET);
}
@@ -4309,15 +4409,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- r4 : scratch
// -- r5 : scratch
// -----------------------------------
- Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
+ Label miss_force_generic, transition_elements_kind;
Register value_reg = r0;
Register key_reg = r1;
Register receiver_reg = r2;
- Register scratch = r3;
- Register elements_reg = r4;
- Register mantissa_reg = r5;
- Register exponent_reg = r6;
+ Register elements_reg = r3;
+ Register scratch1 = r4;
+ Register scratch2 = r5;
+ Register scratch3 = r6;
Register scratch4 = r7;
// This stub is meant to be tail-jumped to, the receiver must already
@@ -4329,90 +4429,25 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Check that the key is within bounds.
if (is_js_array) {
- __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
- __ ldr(scratch,
+ __ ldr(scratch1,
FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
}
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
- __ cmp(key_reg, scratch);
+ __ cmp(key_reg, scratch1);
__ b(hs, &miss_force_generic);
- // Handle smi values specially.
- __ JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- __ CheckMap(value_reg,
- scratch,
- masm->isolate()->factory()->heap_number_map(),
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
- // in the exponent.
- __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
- __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
- __ cmp(exponent_reg, scratch);
- __ b(ge, &maybe_nan);
-
- __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
- __ bind(&have_double_value);
- __ add(scratch, elements_reg,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ str(exponent_reg, FieldMemOperand(scratch, offset));
- __ Ret();
-
- __ bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- __ b(gt, &is_nan);
- __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
- __ cmp(mantissa_reg, Operand(0));
- __ b(eq, &have_double_value);
- __ bind(&is_nan);
- // Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- __ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- __ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
- __ jmp(&have_double_value);
-
- __ bind(&smi_value);
- __ add(scratch, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- __ add(scratch, scratch,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- // scratch is now effective address of the double element
-
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP3)) {
- destination = FloatingPointHelper::kVFPRegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
-
- Register untagged_value = receiver_reg;
- __ SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(
- masm,
- untagged_value,
- destination,
- d0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- s2);
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP3);
- __ vstr(d0, scratch, 0);
- } else {
- __ str(mantissa_reg, MemOperand(scratch, 0));
- __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
- }
+ __ StoreNumberToDoubleElements(value_reg,
+ key_reg,
+ receiver_reg,
+ elements_reg,
+ scratch1,
+ scratch2,
+ scratch3,
+ scratch4,
+ &transition_elements_kind);
__ Ret();
// Handle store cache miss, replacing the ic with the generic stub.
@@ -4420,6 +4455,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&transition_elements_kind);
+ Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(ic_miss, RelocInfo::CODE_TARGET);
}