diff options
author | Ryan Dahl <ry@tinyclouds.org> | 2011-11-03 10:34:22 -0700 |
---|---|---|
committer | Ryan Dahl <ry@tinyclouds.org> | 2011-11-03 10:34:22 -0700 |
commit | 0e9c1ca67399868e8d602e146dc51d84ad9fdc15 (patch) | |
tree | 6171b2ab5bcad0697b60a75c30ac6c4d0674dfec /deps/v8/src/x64 | |
parent | a6dbe0ff23a8d73cd747de30c426753ae743113a (diff) | |
download | node-0e9c1ca67399868e8d602e146dc51d84ad9fdc15.tar.gz |
Downgrade V8 to 3.6.4
Diffstat (limited to 'deps/v8/src/x64')
-rw-r--r-- | deps/v8/src/x64/assembler-x64-inl.h | 26 | ||||
-rw-r--r-- | deps/v8/src/x64/assembler-x64.cc | 4 | ||||
-rw-r--r-- | deps/v8/src/x64/assembler-x64.h | 15 | ||||
-rw-r--r-- | deps/v8/src/x64/builtins-x64.cc | 1077 | ||||
-rw-r--r-- | deps/v8/src/x64/code-stubs-x64.cc | 499 | ||||
-rw-r--r-- | deps/v8/src/x64/code-stubs-x64.h | 277 | ||||
-rw-r--r-- | deps/v8/src/x64/codegen-x64.cc | 8 | ||||
-rw-r--r-- | deps/v8/src/x64/debug-x64.cc | 105 | ||||
-rw-r--r-- | deps/v8/src/x64/deoptimizer-x64.cc | 32 | ||||
-rw-r--r-- | deps/v8/src/x64/full-codegen-x64.cc | 219 | ||||
-rw-r--r-- | deps/v8/src/x64/ic-x64.cc | 130 | ||||
-rw-r--r-- | deps/v8/src/x64/lithium-codegen-x64.cc | 196 | ||||
-rw-r--r-- | deps/v8/src/x64/lithium-codegen-x64.h | 10 | ||||
-rw-r--r-- | deps/v8/src/x64/lithium-x64.cc | 34 | ||||
-rw-r--r-- | deps/v8/src/x64/lithium-x64.h | 28 | ||||
-rw-r--r-- | deps/v8/src/x64/macro-assembler-x64.cc | 620 | ||||
-rw-r--r-- | deps/v8/src/x64/macro-assembler-x64.h | 252 | ||||
-rw-r--r-- | deps/v8/src/x64/regexp-macro-assembler-x64.cc | 22 | ||||
-rw-r--r-- | deps/v8/src/x64/stub-cache-x64.cc | 299 |
19 files changed, 1175 insertions, 2678 deletions
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h index 10f0b886d..8db54f075 100644 --- a/deps/v8/src/x64/assembler-x64-inl.h +++ b/deps/v8/src/x64/assembler-x64-inl.h @@ -242,11 +242,6 @@ void RelocInfo::set_target_address(Address target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); if (IsCodeTarget(rmode_)) { Assembler::set_target_address_at(pc_, target); - Object* target_code = Code::GetCodeFromTargetAddress(target); - if (host() != NULL) { - host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( - host(), this, HeapObject::cast(target_code)); - } } else { Memory::Address_at(pc_) = target; CPU::FlushICache(pc_, sizeof(Address)); @@ -284,12 +279,8 @@ Address* RelocInfo::target_reference_address() { void RelocInfo::set_target_object(Object* target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - Memory::Object_at(pc_) = target; + *reinterpret_cast<Object**>(pc_) = target; CPU::FlushICache(pc_, sizeof(Address)); - if (host() != NULL && target->IsHeapObject()) { - host()->GetHeap()->incremental_marking()->RecordWrite( - host(), &Memory::Object_at(pc_), HeapObject::cast(target)); - } } @@ -315,12 +306,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) { Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; Memory::Address_at(pc_) = address; CPU::FlushICache(pc_, sizeof(Address)); - if (host() != NULL) { - // TODO(1550) We are passing NULL as a slot because cell can never be on - // evacuation candidate. - host()->GetHeap()->incremental_marking()->RecordWrite( - host(), NULL, cell); - } } @@ -359,11 +344,6 @@ void RelocInfo::set_call_address(Address target) { target; CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset, sizeof(Address)); - if (host() != NULL) { - Object* target_code = Code::GetCodeFromTargetAddress(target); - host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( - host(), this, HeapObject::cast(target_code)); - } } @@ -388,7 +368,7 @@ Object** RelocInfo::call_object_address() { void RelocInfo::Visit(ObjectVisitor* visitor) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - visitor->VisitEmbeddedPointer(this); + visitor->VisitPointer(target_object_address()); CPU::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeTarget(mode)) { visitor->VisitCodeTarget(this); @@ -416,7 +396,7 @@ template<typename StaticVisitor> void RelocInfo::Visit(Heap* heap) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - StaticVisitor::VisitEmbeddedPointer(heap, this); + StaticVisitor::VisitPointer(heap, target_object_address()); CPU::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeTarget(mode)) { StaticVisitor::VisitCodeTarget(heap, this); diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc index 1c4980ebc..745fdaeb8 100644 --- a/deps/v8/src/x64/assembler-x64.cc +++ b/deps/v8/src/x64/assembler-x64.cc @@ -47,7 +47,7 @@ uint64_t CpuFeatures::found_by_runtime_probing_ = 0; void CpuFeatures::Probe() { - ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures); + ASSERT(!initialized_); #ifdef DEBUG initialized_ = true; #endif @@ -2983,7 +2983,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { return; } } - RelocInfo rinfo(pc_, rmode, data, NULL); + RelocInfo rinfo(pc_, rmode, data); reloc_info_writer.Write(&rinfo); } diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h index 0d870537f..2e373faac 100644 --- a/deps/v8/src/x64/assembler-x64.h +++ b/deps/v8/src/x64/assembler-x64.h @@ -215,12 +215,6 @@ struct XMMRegister { return names[index]; } - static XMMRegister from_code(int code) { - ASSERT(code >= 0); - ASSERT(code < kNumRegisters); - XMMRegister r = { code }; - return r; - } bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } bool is(XMMRegister reg) const { return code_ == reg.code_; } int code() const { @@ -741,10 +735,6 @@ class Assembler : public AssemblerBase { immediate_arithmetic_op_32(0x0, dst, src); } - void addl(const Operand& dst, Register src) { - arithmetic_op_32(0x01, src, dst); - } - void addq(Register dst, Register src) { arithmetic_op(0x03, dst, src); } @@ -1404,14 +1394,13 @@ class Assembler : public AssemblerBase { static const int kMaximalBufferSize = 512*MB; static const int kMinimalBufferSize = 4*KB; - byte byte_at(int pos) { return buffer_[pos]; } - void set_byte_at(int pos, byte value) { buffer_[pos] = value; } - protected: bool emit_debug_code() const { return emit_debug_code_; } private: byte* addr_at(int pos) { return buffer_ + pos; } + byte byte_at(int pos) { return buffer_[pos]; } + void set_byte_at(int pos, byte value) { buffer_[pos] = value; } uint32_t long_at(int pos) { return *reinterpret_cast<uint32_t*>(addr_at(pos)); } diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc index 79ddb1393..db06909da 100644 --- a/deps/v8/src/x64/builtins-x64.cc +++ b/deps/v8/src/x64/builtins-x64.cc @@ -79,12 +79,12 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // -- rdi: constructor function // ----------------------------------- - Label slow, non_function_call; + Label non_function_call; // Check that function is not a smi. __ JumpIfSmi(rdi, &non_function_call); // Check that function is a JSFunction. __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); - __ j(not_equal, &slow); + __ j(not_equal, &non_function_call); // Jump to the function-specific construct stub. __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); @@ -94,19 +94,10 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // rdi: called object // rax: number of arguments - // rcx: object map - Label do_call; - __ bind(&slow); - __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE); - __ j(not_equal, &non_function_call); - __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); - __ jmp(&do_call); - __ bind(&non_function_call); - __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); - __ bind(&do_call); // Set expected number of arguments to zero (not changing rax). __ Set(rbx, 0); + __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); __ SetCallKind(rcx, CALL_AS_METHOD); __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), RelocInfo::CODE_TARGET); @@ -119,278 +110,272 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // Should never count constructions for api objects. ASSERT(!is_api_function || !count_constructions); - // Enter a construct frame. - { - FrameScope scope(masm, StackFrame::CONSTRUCT); + // Enter a construct frame. + __ EnterConstructFrame(); - // Store a smi-tagged arguments count on the stack. - __ Integer32ToSmi(rax, rax); - __ push(rax); + // Store a smi-tagged arguments count on the stack. + __ Integer32ToSmi(rax, rax); + __ push(rax); - // Push the function to invoke on the stack. - __ push(rdi); + // Push the function to invoke on the stack. + __ push(rdi); - // Try to allocate the object without transitioning into C code. If any of - // the preconditions is not met, the code bails out to the runtime call. - Label rt_call, allocated; - if (FLAG_inline_new) { - Label undo_allocation; + // Try to allocate the object without transitioning into C code. If any of the + // preconditions is not met, the code bails out to the runtime call. + Label rt_call, allocated; + if (FLAG_inline_new) { + Label undo_allocation; #ifdef ENABLE_DEBUGGER_SUPPORT - ExternalReference debug_step_in_fp = - ExternalReference::debug_step_in_fp_address(masm->isolate()); - __ movq(kScratchRegister, debug_step_in_fp); - __ cmpq(Operand(kScratchRegister, 0), Immediate(0)); - __ j(not_equal, &rt_call); + ExternalReference debug_step_in_fp = + ExternalReference::debug_step_in_fp_address(masm->isolate()); + __ movq(kScratchRegister, debug_step_in_fp); + __ cmpq(Operand(kScratchRegister, 0), Immediate(0)); + __ j(not_equal, &rt_call); #endif - // Verified that the constructor is a JSFunction. - // Load the initial map and verify that it is in fact a map. - // rdi: constructor - __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset)); - // Will both indicate a NULL and a Smi - ASSERT(kSmiTag == 0); - __ JumpIfSmi(rax, &rt_call); - // rdi: constructor - // rax: initial map (if proven valid below) - __ CmpObjectType(rax, MAP_TYPE, rbx); - __ j(not_equal, &rt_call); - - // Check that the constructor is not constructing a JSFunction (see - // comments in Runtime_NewObject in runtime.cc). In which case the - // initial map's instance type would be JS_FUNCTION_TYPE. - // rdi: constructor - // rax: initial map - __ CmpInstanceType(rax, JS_FUNCTION_TYPE); - __ j(equal, &rt_call); - - if (count_constructions) { - Label allocate; - // Decrease generous allocation count. - __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); - __ decb(FieldOperand(rcx, - SharedFunctionInfo::kConstructionCountOffset)); - __ j(not_zero, &allocate); + // Verified that the constructor is a JSFunction. + // Load the initial map and verify that it is in fact a map. + // rdi: constructor + __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi + STATIC_ASSERT(kSmiTag == 0); + __ JumpIfSmi(rax, &rt_call); + // rdi: constructor + // rax: initial map (if proven valid below) + __ CmpObjectType(rax, MAP_TYPE, rbx); + __ j(not_equal, &rt_call); + + // Check that the constructor is not constructing a JSFunction (see comments + // in Runtime_NewObject in runtime.cc). In which case the initial map's + // instance type would be JS_FUNCTION_TYPE. + // rdi: constructor + // rax: initial map + __ CmpInstanceType(rax, JS_FUNCTION_TYPE); + __ j(equal, &rt_call); + + if (count_constructions) { + Label allocate; + // Decrease generous allocation count. + __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ decb(FieldOperand(rcx, SharedFunctionInfo::kConstructionCountOffset)); + __ j(not_zero, &allocate); - __ push(rax); - __ push(rdi); + __ push(rax); + __ push(rdi); - __ push(rdi); // constructor - // The call will replace the stub, so the countdown is only done once. - __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); + __ push(rdi); // constructor + // The call will replace the stub, so the countdown is only done once. + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); - __ pop(rdi); - __ pop(rax); + __ pop(rdi); + __ pop(rax); - __ bind(&allocate); - } + __ bind(&allocate); + } - // Now allocate the JSObject on the heap. - __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset)); - __ shl(rdi, Immediate(kPointerSizeLog2)); - // rdi: size of new object - __ AllocateInNewSpace(rdi, - rbx, - rdi, - no_reg, - &rt_call, - NO_ALLOCATION_FLAGS); - // Allocated the JSObject, now initialize the fields. - // rax: initial map - // rbx: JSObject (not HeapObject tagged - the actual address). - // rdi: start of next object - __ movq(Operand(rbx, JSObject::kMapOffset), rax); - __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex); - __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx); - __ movq(Operand(rbx, JSObject::kElementsOffset), rcx); - // Set extra fields in the newly allocated object. - // rax: initial map - // rbx: JSObject - // rdi: start of next object - __ lea(rcx, Operand(rbx, JSObject::kHeaderSize)); - __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); + // Now allocate the JSObject on the heap. + __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset)); + __ shl(rdi, Immediate(kPointerSizeLog2)); + // rdi: size of new object + __ AllocateInNewSpace(rdi, + rbx, + rdi, + no_reg, + &rt_call, + NO_ALLOCATION_FLAGS); + // Allocated the JSObject, now initialize the fields. + // rax: initial map + // rbx: JSObject (not HeapObject tagged - the actual address). + // rdi: start of next object + __ movq(Operand(rbx, JSObject::kMapOffset), rax); + __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex); + __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx); + __ movq(Operand(rbx, JSObject::kElementsOffset), rcx); + // Set extra fields in the newly allocated object. + // rax: initial map + // rbx: JSObject + // rdi: start of next object + { Label loop, entry; + // To allow for truncation. if (count_constructions) { - __ movzxbq(rsi, - FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset)); - __ lea(rsi, - Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize)); - // rsi: offset of first field after pre-allocated fields - if (FLAG_debug_code) { - __ cmpq(rsi, rdi); - __ Assert(less_equal, - "Unexpected number of pre-allocated property fields."); - } - __ InitializeFieldsWithFiller(rcx, rsi, rdx); __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex); - } - __ InitializeFieldsWithFiller(rcx, rdi, rdx); - - // Add the object tag to make the JSObject real, so that we can continue - // and jump into the continuation code at any time from now on. Any - // failures need to undo the allocation, so that the heap is in a - // consistent state and verifiable. - // rax: initial map - // rbx: JSObject - // rdi: start of next object - __ or_(rbx, Immediate(kHeapObjectTag)); - - // Check if a non-empty properties array is needed. - // Allocate and initialize a FixedArray if it is. - // rax: initial map - // rbx: JSObject - // rdi: start of next object - // Calculate total properties described map. - __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset)); - __ movzxbq(rcx, - FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset)); - __ addq(rdx, rcx); - // Calculate unused properties past the end of the in-object properties. - __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset)); - __ subq(rdx, rcx); - // Done if no extra properties are to be allocated. - __ j(zero, &allocated); - __ Assert(positive, "Property allocation count failed."); - - // Scale the number of elements by pointer size and add the header for - // FixedArrays to the start of the next object calculation from above. - // rbx: JSObject - // rdi: start of next object (will be start of FixedArray) - // rdx: number of elements in properties array - __ AllocateInNewSpace(FixedArray::kHeaderSize, - times_pointer_size, - rdx, - rdi, - rax, - no_reg, - &undo_allocation, - RESULT_CONTAINS_TOP); - - // Initialize the FixedArray. - // rbx: JSObject - // rdi: FixedArray - // rdx: number of elements - // rax: start of next object - __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex); - __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map - __ Integer32ToSmi(rdx, rdx); - __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length - - // Initialize the fields to undefined. - // rbx: JSObject - // rdi: FixedArray - // rax: start of next object - // rdx: number of elements - { Label loop, entry; + } else { __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); - __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize)); - __ jmp(&entry); - __ bind(&loop); - __ movq(Operand(rcx, 0), rdx); - __ addq(rcx, Immediate(kPointerSize)); - __ bind(&entry); - __ cmpq(rcx, rax); - __ j(below, &loop); } + __ lea(rcx, Operand(rbx, JSObject::kHeaderSize)); + __ jmp(&entry); + __ bind(&loop); + __ movq(Operand(rcx, 0), rdx); + __ addq(rcx, Immediate(kPointerSize)); + __ bind(&entry); + __ cmpq(rcx, rdi); + __ j(less, &loop); + } - // Store the initialized FixedArray into the properties field of - // the JSObject - // rbx: JSObject - // rdi: FixedArray - __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag - __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi); + // Add the object tag to make the JSObject real, so that we can continue and + // jump into the continuation code at any time from now on. Any failures + // need to undo the allocation, so that the heap is in a consistent state + // and verifiable. + // rax: initial map + // rbx: JSObject + // rdi: start of next object + __ or_(rbx, Immediate(kHeapObjectTag)); + + // Check if a non-empty properties array is needed. + // Allocate and initialize a FixedArray if it is. + // rax: initial map + // rbx: JSObject + // rdi: start of next object + // Calculate total properties described map. + __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset)); + __ movzxbq(rcx, FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset)); + __ addq(rdx, rcx); + // Calculate unused properties past the end of the in-object properties. + __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset)); + __ subq(rdx, rcx); + // Done if no extra properties are to be allocated. + __ j(zero, &allocated); + __ Assert(positive, "Property allocation count failed."); + + // Scale the number of elements by pointer size and add the header for + // FixedArrays to the start of the next object calculation from above. + // rbx: JSObject + // rdi: start of next object (will be start of FixedArray) + // rdx: number of elements in properties array + __ AllocateInNewSpace(FixedArray::kHeaderSize, + times_pointer_size, + rdx, + rdi, + rax, + no_reg, + &undo_allocation, + RESULT_CONTAINS_TOP); + + // Initialize the FixedArray. + // rbx: JSObject + // rdi: FixedArray + // rdx: number of elements + // rax: start of next object + __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex); + __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map + __ Integer32ToSmi(rdx, rdx); + __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length + + // Initialize the fields to undefined. + // rbx: JSObject + // rdi: FixedArray + // rax: start of next object + // rdx: number of elements + { Label loop, entry; + __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); + __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize)); + __ jmp(&entry); + __ bind(&loop); + __ movq(Operand(rcx, 0), rdx); + __ addq(rcx, Immediate(kPointerSize)); + __ bind(&entry); + __ cmpq(rcx, rax); + __ j(below, &loop); + } + // Store the initialized FixedArray into the properties field of + // the JSObject + // rbx: JSObject + // rdi: FixedArray + __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag + __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi); - // Continue with JSObject being successfully allocated - // rbx: JSObject - __ jmp(&allocated); - // Undo the setting of the new top so that the heap is verifiable. For - // example, the map's unused properties potentially do not match the - // allocated objects unused properties. - // rbx: JSObject (previous new top) - __ bind(&undo_allocation); - __ UndoAllocationInNewSpace(rbx); - } + // Continue with JSObject being successfully allocated + // rbx: JSObject + __ jmp(&allocated); - // Allocate the new receiver object using the runtime call. - // rdi: function (constructor) - __ bind(&rt_call); - // Must restore rdi (constructor) before calling runtime. - __ movq(rdi, Operand(rsp, 0)); - __ push(rdi); - __ CallRuntime(Runtime::kNewObject, 1); - __ movq(rbx, rax); // store result in rbx + // Undo the setting of the new top so that the heap is verifiable. For + // example, the map's unused properties potentially do not match the + // allocated objects unused properties. + // rbx: JSObject (previous new top) + __ bind(&undo_allocation); + __ UndoAllocationInNewSpace(rbx); + } - // New object allocated. - // rbx: newly allocated object - __ bind(&allocated); - // Retrieve the function from the stack. - __ pop(rdi); + // Allocate the new receiver object using the runtime call. + // rdi: function (constructor) + __ bind(&rt_call); + // Must restore rdi (constructor) before calling runtime. + __ movq(rdi, Operand(rsp, 0)); + __ push(rdi); + __ CallRuntime(Runtime::kNewObject, 1); + __ movq(rbx, rax); // store result in rbx - // Retrieve smi-tagged arguments count from the stack. - __ movq(rax, Operand(rsp, 0)); - __ SmiToInteger32(rax, rax); + // New object allocated. + // rbx: newly allocated object + __ bind(&allocated); + // Retrieve the function from the stack. + __ pop(rdi); - // Push the allocated receiver to the stack. We need two copies - // because we may have to return the original one and the calling - // conventions dictate that the called function pops the receiver. - __ push(rbx); - __ push(rbx); + // Retrieve smi-tagged arguments count from the stack. + __ movq(rax, Operand(rsp, 0)); + __ SmiToInteger32(rax, rax); - // Setup pointer to last argument. - __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset)); + // Push the allocated receiver to the stack. We need two copies + // because we may have to return the original one and the calling + // conventions dictate that the called function pops the receiver. + __ push(rbx); + __ push(rbx); - // Copy arguments and receiver to the expression stack. - Label loop, entry; - __ movq(rcx, rax); - __ jmp(&entry); - __ bind(&loop); - __ push(Operand(rbx, rcx, times_pointer_size, 0)); - __ bind(&entry); - __ decq(rcx); - __ j(greater_equal, &loop); - - // Call the function. - if (is_api_function) { - __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); - Handle<Code> code = - masm->isolate()->builtins()->HandleApiCallConstruct(); - ParameterCount expected(0); - __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET, - CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); - } else { - ParameterCount actual(rax); - __ InvokeFunction(rdi, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); - } + // Setup pointer to last argument. + __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset)); - // Restore context from the frame. - __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); + // Copy arguments and receiver to the expression stack. + Label loop, entry; + __ movq(rcx, rax); + __ jmp(&entry); + __ bind(&loop); + __ push(Operand(rbx, rcx, times_pointer_size, 0)); + __ bind(&entry); + __ decq(rcx); + __ j(greater_equal, &loop); - // If the result is an object (in the ECMA sense), we should get rid - // of the receiver and use the result; see ECMA-262 section 13.2.2-7 - // on page 74. - Label use_receiver, exit; - // If the result is a smi, it is *not* an object in the ECMA sense. - __ JumpIfSmi(rax, &use_receiver); + // Call the function. + if (is_api_function) { + __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); + Handle<Code> code = + masm->isolate()->builtins()->HandleApiCallConstruct(); + ParameterCount expected(0); + __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET, + CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); + } else { + ParameterCount actual(rax); + __ InvokeFunction(rdi, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + } - // If the type of the result (stored in its map) is less than - // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. - STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); - __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx); - __ j(above_equal, &exit); + // Restore context from the frame. + __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); - // Throw away the result of the constructor invocation and use the - // on-stack receiver as the result. - __ bind(&use_receiver); - __ movq(rax, Operand(rsp, 0)); + // If the result is an object (in the ECMA sense), we should get rid + // of the receiver and use the result; see ECMA-262 section 13.2.2-7 + // on page 74. + Label use_receiver, exit; + // If the result is a smi, it is *not* an object in the ECMA sense. + __ JumpIfSmi(rax, &use_receiver); - // Restore the arguments count and leave the construct frame. - __ bind(&exit); - __ movq(rbx, Operand(rsp, kPointerSize)); // Get arguments count. + // If the type of the result (stored in its map) is less than + // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx); + __ j(above_equal, &exit); - // Leave construct frame. - } + // Throw away the result of the constructor invocation and use the + // on-stack receiver as the result. + __ bind(&use_receiver); + __ movq(rax, Operand(rsp, 0)); + + // Restore the arguments count and leave the construct frame. + __ bind(&exit); + __ movq(rbx, Operand(rsp, kPointerSize)); // get arguments count + __ LeaveConstructFrame(); // Remove caller arguments from the stack and return. __ pop(rcx); @@ -428,108 +413,104 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // - Object*** argv // (see Handle::Invoke in execution.cc). - // Open a C++ scope for the FrameScope. - { - // Platform specific argument handling. After this, the stack contains - // an internal frame and the pushed function and receiver, and - // register rax and rbx holds the argument count and argument array, - // while rdi holds the function pointer and rsi the context. - + // Platform specific argument handling. After this, the stack contains + // an internal frame and the pushed function and receiver, and + // register rax and rbx holds the argument count and argument array, + // while rdi holds the function pointer and rsi the context. #ifdef _WIN64 - // MSVC parameters in: - // rcx : entry (ignored) - // rdx : function - // r8 : receiver - // r9 : argc - // [rsp+0x20] : argv - - // Clear the context before we push it when entering the internal frame. - __ Set(rsi, 0); - // Enter an internal frame. - FrameScope scope(masm, StackFrame::INTERNAL); - - // Load the function context into rsi. - __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset)); - - // Push the function and the receiver onto the stack. - __ push(rdx); - __ push(r8); - - // Load the number of arguments and setup pointer to the arguments. - __ movq(rax, r9); - // Load the previous frame pointer to access C argument on stack - __ movq(kScratchRegister, Operand(rbp, 0)); - __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset)); - // Load the function pointer into rdi. - __ movq(rdi, rdx); + // MSVC parameters in: + // rcx : entry (ignored) + // rdx : function + // r8 : receiver + // r9 : argc + // [rsp+0x20] : argv + + // Clear the context before we push it when entering the JS frame. + __ Set(rsi, 0); + __ EnterInternalFrame(); + + // Load the function context into rsi. + __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset)); + + // Push the function and the receiver onto the stack. + __ push(rdx); + __ push(r8); + + // Load the number of arguments and setup pointer to the arguments. + __ movq(rax, r9); + // Load the previous frame pointer to access C argument on stack + __ movq(kScratchRegister, Operand(rbp, 0)); + __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset)); + // Load the function pointer into rdi. + __ movq(rdi, rdx); #else // _WIN64 - // GCC parameters in: - // rdi : entry (ignored) - // rsi : function - // rdx : receiver - // rcx : argc - // r8 : argv - - __ movq(rdi, rsi); - // rdi : function - - // Clear the context before we push it when entering the internal frame. - __ Set(rsi, 0); - // Enter an internal frame. - FrameScope scope(masm, StackFrame::INTERNAL); - - // Push the function and receiver and setup the context. - __ push(rdi); - __ push(rdx); - __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); + // GCC parameters in: + // rdi : entry (ignored) + // rsi : function + // rdx : receiver + // rcx : argc + // r8 : argv + + __ movq(rdi, rsi); + // rdi : function + + // Clear the context before we push it when entering the JS frame. + __ Set(rsi, 0); + // Enter an internal frame. + __ EnterInternalFrame(); + + // Push the function and receiver and setup the context. + __ push(rdi); + __ push(rdx); + __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); - // Load the number of arguments and setup pointer to the arguments. - __ movq(rax, rcx); - __ movq(rbx, r8); + // Load the number of arguments and setup pointer to the arguments. + __ movq(rax, rcx); + __ movq(rbx, r8); #endif // _WIN64 - // Current stack contents: - // [rsp + 2 * kPointerSize ... ]: Internal frame - // [rsp + kPointerSize] : function - // [rsp] : receiver - // Current register contents: - // rax : argc - // rbx : argv - // rsi : context - // rdi : function - - // Copy arguments to the stack in a loop. - // Register rbx points to array of pointers to handle locations. - // Push the values of these handles. - Label loop, entry; - __ Set(rcx, 0); // Set loop variable to 0. - __ jmp(&entry); - __ bind(&loop); - __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0)); - __ push(Operand(kScratchRegister, 0)); // dereference handle - __ addq(rcx, Immediate(1)); - __ bind(&entry); - __ cmpq(rcx, rax); - __ j(not_equal, &loop); - - // Invoke the code. - if (is_construct) { - // Expects rdi to hold function pointer. - __ Call(masm->isolate()->builtins()->JSConstructCall(), - RelocInfo::CODE_TARGET); - } else { - ParameterCount actual(rax); - // Function must be in rdi. - __ InvokeFunction(rdi, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); - } - // Exit the internal frame. Notice that this also removes the empty - // context and the function left on the stack by the code - // invocation. + // Current stack contents: + // [rsp + 2 * kPointerSize ... ]: Internal frame + // [rsp + kPointerSize] : function + // [rsp] : receiver + // Current register contents: + // rax : argc + // rbx : argv + // rsi : context + // rdi : function + + // Copy arguments to the stack in a loop. + // Register rbx points to array of pointers to handle locations. + // Push the values of these handles. + Label loop, entry; + __ Set(rcx, 0); // Set loop variable to 0. + __ jmp(&entry); + __ bind(&loop); + __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0)); + __ push(Operand(kScratchRegister, 0)); // dereference handle + __ addq(rcx, Immediate(1)); + __ bind(&entry); + __ cmpq(rcx, rax); + __ j(not_equal, &loop); + + // Invoke the code. + if (is_construct) { + // Expects rdi to hold function pointer. + __ Call(masm->isolate()->builtins()->JSConstructCall(), + RelocInfo::CODE_TARGET); + } else { + ParameterCount actual(rax); + // Function must be in rdi. + __ InvokeFunction(rdi, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); } + // Exit the JS frame. Notice that this also removes the empty + // context and the function left on the stack by the code + // invocation. + __ LeaveInternalFrame(); // TODO(X64): Is argument correct? Is there a receiver to remove? - __ ret(1 * kPointerSize); // Remove receiver. + __ ret(1 * kPointerSize); // remove receiver } @@ -545,24 +526,23 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { void Builtins::Generate_LazyCompile(MacroAssembler* masm) { // Enter an internal frame. - { - FrameScope scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); - // Push a copy of the function onto the stack. - __ push(rdi); - // Push call kind information. - __ push(rcx); + // Push a copy of the function onto the stack. + __ push(rdi); + // Push call kind information. + __ push(rcx); - __ push(rdi); // Function is also the parameter to the runtime call. - __ CallRuntime(Runtime::kLazyCompile, 1); + __ push(rdi); // Function is also the parameter to the runtime call. + __ CallRuntime(Runtime::kLazyCompile, 1); - // Restore call kind information. - __ pop(rcx); - // Restore receiver. - __ pop(rdi); + // Restore call kind information. + __ pop(rcx); + // Restore receiver. + __ pop(rdi); - // Tear down internal frame. - } + // Tear down temporary frame. + __ LeaveInternalFrame(); // Do a tail-call of the compiled function. __ lea(rax, FieldOperand(rax, Code::kHeaderSize)); @@ -572,24 +552,23 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) { void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { // Enter an internal frame. - { - FrameScope scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); - // Push a copy of the function onto the stack. - __ push(rdi); - // Push call kind information. - __ push(rcx); + // Push a copy of the function onto the stack. + __ push(rdi); + // Push call kind information. + __ push(rcx); - __ push(rdi); // Function is also the parameter to the runtime call. - __ CallRuntime(Runtime::kLazyRecompile, 1); + __ push(rdi); // Function is also the parameter to the runtime call. + __ CallRuntime(Runtime::kLazyRecompile, 1); - // Restore call kind information. - __ pop(rcx); - // Restore function. - __ pop(rdi); + // Restore call kind information. + __ pop(rcx); + // Restore function. + __ pop(rdi); - // Tear down internal frame. - } + // Tear down temporary frame. + __ LeaveInternalFrame(); // Do a tail-call of the compiled function. __ lea(rax, FieldOperand(rax, Code::kHeaderSize)); @@ -600,15 +579,14 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { // Enter an internal frame. - { - FrameScope scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); - // Pass the deoptimization type to the runtime system. - __ Push(Smi::FromInt(static_cast<int>(type))); + // Pass the deoptimization type to the runtime system. + __ Push(Smi::FromInt(static_cast<int>(type))); - __ CallRuntime(Runtime::kNotifyDeoptimized, 1); - // Tear down internal frame. - } + __ CallRuntime(Runtime::kNotifyDeoptimized, 1); + // Tear down temporary frame. + __ LeaveInternalFrame(); // Get the full codegen state from the stack and untag it. __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize)); @@ -645,10 +623,9 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { // the registers without worrying about which of them contain // pointers. This seems a bit fragile. __ Pushad(); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kNotifyOSR, 0); - } + __ EnterInternalFrame(); + __ CallRuntime(Runtime::kNotifyOSR, 0); + __ LeaveInternalFrame(); __ Popad(); __ ret(0); } @@ -718,21 +695,18 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ j(above_equal, &shift_arguments); __ bind(&convert_to_object); - { - // Enter an internal frame in order to preserve argument count. - FrameScope scope(masm, StackFrame::INTERNAL); - __ Integer32ToSmi(rax, rax); - __ push(rax); - - __ push(rbx); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ movq(rbx, rax); - __ Set(rdx, 0); // indicate regular JS_FUNCTION + __ EnterInternalFrame(); // In order to preserve argument count. + __ Integer32ToSmi(rax, rax); + __ push(rax); - __ pop(rax); - __ SmiToInteger32(rax, rax); - } + __ push(rbx); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ movq(rbx, rax); + __ Set(rdx, 0); // indicate regular JS_FUNCTION + __ pop(rax); + __ SmiToInteger32(rax, rax); + __ LeaveInternalFrame(); // Restore the function to rdi. __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize)); __ jmp(&patch_receiver, Label::kNear); @@ -833,162 +807,160 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // rsp+8: arguments // rsp+16: receiver ("this") // rsp+24: function - { - FrameScope frame_scope(masm, StackFrame::INTERNAL); - // Stack frame: - // rbp: Old base pointer - // rbp[1]: return address - // rbp[2]: function arguments - // rbp[3]: receiver - // rbp[4]: function - static const int kArgumentsOffset = 2 * kPointerSize; - static const int kReceiverOffset = 3 * kPointerSize; - static const int kFunctionOffset = 4 * kPointerSize; - - __ push(Operand(rbp, kFunctionOffset)); - __ push(Operand(rbp, kArgumentsOffset)); - __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); - - // Check the stack for overflow. We are not trying to catch - // interruptions (e.g. debug break and preemption) here, so the "real stack - // limit" is checked. - Label okay; - __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex); - __ movq(rcx, rsp); - // Make rcx the space we have left. The stack might already be overflowed - // here which will cause rcx to become negative. - __ subq(rcx, kScratchRegister); - // Make rdx the space we need for the array when it is unrolled onto the - // stack. - __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2); - // Check if the arguments will overflow the stack. - __ cmpq(rcx, rdx); - __ j(greater, &okay); // Signed comparison. - - // Out of stack space. - __ push(Operand(rbp, kFunctionOffset)); - __ push(rax); - __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); - __ bind(&okay); - // End of stack check. - - // Push current index and limit. - const int kLimitOffset = - StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize; - const int kIndexOffset = kLimitOffset - 1 * kPointerSize; - __ push(rax); // limit - __ push(Immediate(0)); // index - - // Get the receiver. - __ movq(rbx, Operand(rbp, kReceiverOffset)); - - // Check that the function is a JS function (otherwise it must be a proxy). - Label push_receiver; - __ movq(rdi, Operand(rbp, kFunctionOffset)); - __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); - __ j(not_equal, &push_receiver); - - // Change context eagerly to get the right global object if necessary. - __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); - - // Do not transform the receiver for strict mode functions. - Label call_to_object, use_global_receiver; - __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); - __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset), - Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); - __ j(not_equal, &push_receiver); - - // Do not transform the receiver for natives. - __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset), - Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte)); - __ j(not_equal, &push_receiver); - - // Compute the receiver in non-strict mode. - __ JumpIfSmi(rbx, &call_to_object, Label::kNear); - __ CompareRoot(rbx, Heap::kNullValueRootIndex); - __ j(equal, &use_global_receiver); - __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex); - __ j(equal, &use_global_receiver); - - // If given receiver is already a JavaScript object then there's no - // reason for converting it. - STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); - __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx); - __ j(above_equal, &push_receiver); - - // Convert the receiver to an object. - __ bind(&call_to_object); - __ push(rbx); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ movq(rbx, rax); - __ jmp(&push_receiver, Label::kNear); - - // Use the current global receiver object as the receiver. - __ bind(&use_global_receiver); - const int kGlobalOffset = - Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; - __ movq(rbx, FieldOperand(rsi, kGlobalOffset)); - __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset)); - __ movq(rbx, FieldOperand(rbx, kGlobalOffset)); - __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset)); + __ EnterInternalFrame(); + // Stack frame: + // rbp: Old base pointer + // rbp[1]: return address + // rbp[2]: function arguments + // rbp[3]: receiver + // rbp[4]: function + static const int kArgumentsOffset = 2 * kPointerSize; + static const int kReceiverOffset = 3 * kPointerSize; + static const int kFunctionOffset = 4 * kPointerSize; + + __ push(Operand(rbp, kFunctionOffset)); + __ push(Operand(rbp, kArgumentsOffset)); + __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); + + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + Label okay; + __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex); + __ movq(rcx, rsp); + // Make rcx the space we have left. The stack might already be overflowed + // here which will cause rcx to become negative. + __ subq(rcx, kScratchRegister); + // Make rdx the space we need for the array when it is unrolled onto the + // stack. + __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2); + // Check if the arguments will overflow the stack. + __ cmpq(rcx, rdx); + __ j(greater, &okay); // Signed comparison. + + // Out of stack space. + __ push(Operand(rbp, kFunctionOffset)); + __ push(rax); + __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + __ bind(&okay); + // End of stack check. + + // Push current index and limit. + const int kLimitOffset = + StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize; + const int kIndexOffset = kLimitOffset - 1 * kPointerSize; + __ push(rax); // limit + __ push(Immediate(0)); // index + + // Get the receiver. + __ movq(rbx, Operand(rbp, kReceiverOffset)); + + // Check that the function is a JS function (otherwise it must be a proxy). + Label push_receiver; + __ movq(rdi, Operand(rbp, kFunctionOffset)); + __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); + __ j(not_equal, &push_receiver); - // Push the receiver. - __ bind(&push_receiver); - __ push(rbx); + // Change context eagerly to get the right global object if necessary. + __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); - // Copy all arguments from the array to the stack. - Label entry, loop; - __ movq(rax, Operand(rbp, kIndexOffset)); - __ jmp(&entry); - __ bind(&loop); - __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments - - // Use inline caching to speed up access to arguments. - Handle<Code> ic = - masm->isolate()->builtins()->KeyedLoadIC_Initialize(); - __ Call(ic, RelocInfo::CODE_TARGET); - // It is important that we do not have a test instruction after the - // call. A test instruction after the call is used to indicate that - // we have generated an inline version of the keyed load. In this - // case, we know that we are not generating a test instruction next. - - // Push the nth argument. - __ push(rax); + // Do not transform the receiver for strict mode functions. + Label call_to_object, use_global_receiver; + __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset), + Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); + __ j(not_equal, &push_receiver); + + // Do not transform the receiver for natives. + __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset), + Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte)); + __ j(not_equal, &push_receiver); + + // Compute the receiver in non-strict mode. + __ JumpIfSmi(rbx, &call_to_object, Label::kNear); + __ CompareRoot(rbx, Heap::kNullValueRootIndex); + __ j(equal, &use_global_receiver); + __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex); + __ j(equal, &use_global_receiver); + + // If given receiver is already a JavaScript object then there's no + // reason for converting it. + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx); + __ j(above_equal, &push_receiver); + + // Convert the receiver to an object. + __ bind(&call_to_object); + __ push(rbx); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ movq(rbx, rax); + __ jmp(&push_receiver, Label::kNear); + + // Use the current global receiver object as the receiver. + __ bind(&use_global_receiver); + const int kGlobalOffset = + Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; + __ movq(rbx, FieldOperand(rsi, kGlobalOffset)); + __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset)); + __ movq(rbx, FieldOperand(rbx, kGlobalOffset)); + __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset)); + + // Push the receiver. + __ bind(&push_receiver); + __ push(rbx); + + // Copy all arguments from the array to the stack. + Label entry, loop; + __ movq(rax, Operand(rbp, kIndexOffset)); + __ jmp(&entry); + __ bind(&loop); + __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments + + // Use inline caching to speed up access to arguments. + Handle<Code> ic = + masm->isolate()->builtins()->KeyedLoadIC_Initialize(); + __ Call(ic, RelocInfo::CODE_TARGET); + // It is important that we do not have a test instruction after the + // call. A test instruction after the call is used to indicate that + // we have generated an inline version of the keyed load. In this + // case, we know that we are not generating a test instruction next. + + // Push the nth argument. + __ push(rax); - // Update the index on the stack and in register rax. - __ movq(rax, Operand(rbp, kIndexOffset)); - __ SmiAddConstant(rax, rax, Smi::FromInt(1)); - __ movq(Operand(rbp, kIndexOffset), rax); + // Update the index on the stack and in register rax. + __ movq(rax, Operand(rbp, kIndexOffset)); + __ SmiAddConstant(rax, rax, Smi::FromInt(1)); + __ movq(Operand(rbp, kIndexOffset), rax); - __ bind(&entry); - __ cmpq(rax, Operand(rbp, kLimitOffset)); - __ j(not_equal, &loop); + __ bind(&entry); + __ cmpq(rax, Operand(rbp, kLimitOffset)); + __ j(not_equal, &loop); - // Invoke the function. - Label call_proxy; - ParameterCount actual(rax); - __ SmiToInteger32(rax, rax); - __ movq(rdi, Operand(rbp, kFunctionOffset)); - __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); - __ j(not_equal, &call_proxy); - __ InvokeFunction(rdi, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + // Invoke the function. + Label call_proxy; + ParameterCount actual(rax); + __ SmiToInteger32(rax, rax); + __ movq(rdi, Operand(rbp, kFunctionOffset)); + __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); + __ j(not_equal, &call_proxy); + __ InvokeFunction(rdi, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); - frame_scope.GenerateLeaveFrame(); - __ ret(3 * kPointerSize); // remove this, receiver, and arguments + __ LeaveInternalFrame(); + __ ret(3 * kPointerSize); // remove this, receiver, and arguments - // Invoke the function proxy. - __ bind(&call_proxy); - __ push(rdi); // add function proxy as last argument - __ incq(rax); - __ Set(rbx, 0); - __ SetCallKind(rcx, CALL_AS_METHOD); - __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY); - __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), - RelocInfo::CODE_TARGET); + // Invoke the function proxy. + __ bind(&call_proxy); + __ push(rdi); // add function proxy as last argument + __ incq(rax); + __ Set(rbx, 0); + __ SetCallKind(rcx, CALL_AS_METHOD); + __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY); + __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); - // Leave internal frame. - } + __ LeaveInternalFrame(); __ ret(3 * kPointerSize); // remove this, receiver, and arguments } @@ -1548,11 +1520,10 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { // Pass the function to optimize as the argument to the on-stack // replacement runtime function. - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(rax); - __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); - } + __ EnterInternalFrame(); + __ push(rax); + __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); + __ LeaveInternalFrame(); // If the result was -1 it means that we couldn't optimize the // function. Just return and continue in the unoptimized version. @@ -1570,9 +1541,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { StackCheckStub stub; __ TailCallStub(&stub); - if (FLAG_debug_code) { - __ Abort("Unreachable code: returned from tail call."); - } + __ Abort("Unreachable code: returned from tail call."); __ bind(&ok); __ ret(0); diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc index 7d41ffe53..df4438b73 100644 --- a/deps/v8/src/x64/code-stubs-x64.cc +++ b/deps/v8/src/x64/code-stubs-x64.cc @@ -155,70 +155,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { } -void FastNewBlockContextStub::Generate(MacroAssembler* masm) { - // Stack layout on entry: - // - // [rsp + (1 * kPointerSize)]: function - // [rsp + (2 * kPointerSize)]: serialized scope info - - // Try to allocate the context in new space. - Label gc; - int length = slots_ + Context::MIN_CONTEXT_SLOTS; - __ AllocateInNewSpace(FixedArray::SizeFor(length), - rax, rbx, rcx, &gc, TAG_OBJECT); - - // Get the function from the stack. - __ movq(rcx, Operand(rsp, 1 * kPointerSize)); - - // Get the serialized scope info from the stack. - __ movq(rbx, Operand(rsp, 2 * kPointerSize)); - - // Setup the object header. - __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex); - __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); - __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length)); - - // If this block context is nested in the global context we get a smi - // sentinel instead of a function. The block context should get the - // canonical empty function of the global context as its closure which - // we still have to look up. - Label after_sentinel; - __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear); - if (FLAG_debug_code) { - const char* message = "Expected 0 as a Smi sentinel"; - __ cmpq(rcx, Immediate(0)); - __ Assert(equal, message); - } - __ movq(rcx, GlobalObjectOperand()); - __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset)); - __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX)); - __ bind(&after_sentinel); - - // Setup the fixed slots. - __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx); - __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi); - __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx); - - // Copy the global object from the previous context. - __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_INDEX)); - __ movq(ContextOperand(rax, Context::GLOBAL_INDEX), rbx); - - // Initialize the rest of the slots to the hole value. - __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex); - for (int i = 0; i < slots_; i++) { - __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx); - } - - // Return and remove the on-stack parameter. - __ movq(rsi, rax); - __ ret(2 * kPointerSize); - - // Need to collect. Call into runtime system. - __ bind(&gc); - __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); -} - - void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { // Stack layout on entry: // @@ -297,8 +233,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { // The stub expects its argument on the stack and returns its result in tos_: // zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { - // This stub overrides SometimesSetsUpAFrame() to return false. That means - // we cannot call anything that could cause a GC from this stub. Label patch; const Register argument = rax; const Register map = rdx; @@ -394,25 +328,6 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { } -void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { - __ PushCallerSaved(save_doubles_); - const int argument_count = 1; - __ PrepareCallCFunction(argument_count); -#ifdef _WIN64 - __ LoadAddress(rcx, ExternalReference::isolate_address()); -#else - __ LoadAddress(rdi, ExternalReference::isolate_address()); -#endif - - AllowExternalCallThatCantCauseGC scope(masm); - __ CallCFunction( - ExternalReference::store_buffer_overflow_function(masm->isolate()), - argument_count); - __ PopCallerSaved(save_doubles_); - __ ret(0); -} - - void ToBooleanStub::CheckOddball(MacroAssembler* masm, Type type, Heap::RootListIndex value, @@ -707,13 +622,12 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(rax); - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ movq(rcx, rax); - __ pop(rax); - } + __ EnterInternalFrame(); + __ push(rax); + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ movq(rcx, rax); + __ pop(rax); + __ LeaveInternalFrame(); __ bind(&heapnumber_allocated); // rcx: allocated 'empty' number @@ -837,10 +751,6 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { void BinaryOpStub::Generate(MacroAssembler* masm) { - // Explicitly allow generation of nested stubs. It is safe here because - // generation code does not use any raw pointers. - AllowStubCallsScope allow_stub_calls(masm, true); - switch (operands_type_) { case BinaryOpIC::UNINITIALIZED: GenerateTypeTransition(masm); @@ -1543,12 +1453,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ addq(rsp, Immediate(kDoubleSize)); // We return the value in xmm1 without adding it to the cache, but // we cause a scavenging GC so that future allocations will succeed. - { - FrameScope scope(masm, StackFrame::INTERNAL); - // Allocate an unused object bigger than a HeapNumber. - __ Push(Smi::FromInt(2 * kDoubleSize)); - __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); - } + __ EnterInternalFrame(); + // Allocate an unused object bigger than a HeapNumber. + __ Push(Smi::FromInt(2 * kDoubleSize)); + __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); + __ LeaveInternalFrame(); __ Ret(); } @@ -1564,11 +1473,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ bind(&runtime_call); __ AllocateHeapNumber(rax, rdi, &skip_cache); __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(rax); - __ CallRuntime(RuntimeFunction(), 1); - } + __ EnterInternalFrame(); + __ push(rax); + __ CallRuntime(RuntimeFunction(), 1); + __ LeaveInternalFrame(); __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); __ Ret(); } @@ -2438,6 +2346,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { #ifdef V8_INTERPRETED_REGEXP __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); #else // V8_INTERPRETED_REGEXP + if (!FLAG_regexp_entry_native) { + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + return; + } // Stack frame on entry. // rsp[0]: return address @@ -2758,18 +2670,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Store last subject and last input. __ movq(rax, Operand(rsp, kSubjectOffset)); __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax); - __ RecordWriteField(rbx, - RegExpImpl::kLastSubjectOffset, - rax, - rdi, - kDontSaveFPRegs); + __ movq(rcx, rbx); + __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi); __ movq(rax, Operand(rsp, kSubjectOffset)); __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax); - __ RecordWriteField(rbx, - RegExpImpl::kLastInputOffset, - rax, - rdi, - kDontSaveFPRegs); + __ movq(rcx, rbx); + __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi); // Get the static offsets vector filled by the native regexp code. __ LoadAddress(rcx, @@ -3325,22 +3231,6 @@ void StackCheckStub::Generate(MacroAssembler* masm) { } -void CallFunctionStub::FinishCode(Code* code) { - code->set_has_function_cache(false); -} - - -void CallFunctionStub::Clear(Heap* heap, Address address) { - UNREACHABLE(); -} - - -Object* CallFunctionStub::GetCachedValue(Address address) { - UNREACHABLE(); - return NULL; -} - - void CallFunctionStub::Generate(MacroAssembler* masm) { Label slow, non_function; @@ -3429,35 +3319,6 @@ bool CEntryStub::NeedsImmovableCode() { } -bool CEntryStub::IsPregenerated() { -#ifdef _WIN64 - return result_size_ == 1; -#else - return true; -#endif -} - - -void CodeStub::GenerateStubsAheadOfTime() { - CEntryStub::GenerateAheadOfTime(); - StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); - // It is important that the store buffer overflow stubs are generated first. - RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); -} - - -void CodeStub::GenerateFPStubs() { -} - - -void CEntryStub::GenerateAheadOfTime() { - CEntryStub stub(1, kDontSaveFPRegs); - stub.GetCode()->set_is_pregenerated(true); - CEntryStub save_doubles(1, kSaveFPRegs); - save_doubles.GetCode()->set_is_pregenerated(true); -} - - void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { // Throw exception in eax. __ Throw(rax); @@ -3896,7 +3757,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex); } else { - // Get return address and delta to inlined map check. __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); __ movq(Operand(kScratchRegister, kOffsetToMapCheckValue), rax); @@ -3931,11 +3791,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); } else { // Store offset of true in the root array at the inline check site. - int true_offset = 0x100 + - (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias; - // Assert it is a 1-byte signed value. - ASSERT(true_offset >= 0 && true_offset < 0x100); - __ movl(rax, Immediate(true_offset)); + ASSERT((Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias + == 0xB0 - 0x100); + __ movl(rax, Immediate(0xB0)); // TrueValue is at -10 * kPointerSize. __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax); @@ -3954,11 +3812,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex); } else { // Store offset of false in the root array at the inline check site. - int false_offset = 0x100 + - (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias; - // Assert it is a 1-byte signed value. - ASSERT(false_offset >= 0 && false_offset < 0x100); - __ movl(rax, Immediate(false_offset)); + ASSERT((Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias + == 0xB8 - 0x100); + __ movl(rax, Immediate(0xB8)); // FalseValue is at -9 * kPointerSize. __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax); @@ -4080,23 +3936,22 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { Heap::kEmptyStringRootIndex); __ j(not_equal, &call_runtime_); // Get the first of the two strings and load its instance type. - ASSERT(!kScratchRegister.is(scratch_)); - __ movq(kScratchRegister, FieldOperand(object_, ConsString::kFirstOffset)); + __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset)); __ jmp(&assure_seq_string, Label::kNear); // SlicedString, unpack and add offset. __ bind(&sliced_string); __ addq(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset)); - __ movq(kScratchRegister, FieldOperand(object_, SlicedString::kParentOffset)); + __ movq(object_, FieldOperand(object_, SlicedString::kParentOffset)); __ bind(&assure_seq_string); - __ movq(result_, FieldOperand(kScratchRegister, HeapObject::kMapOffset)); + __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset)); __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); // If the first cons component is also non-flat, then go to runtime. STATIC_ASSERT(kSeqStringTag == 0); __ testb(result_, Immediate(kStringRepresentationMask)); __ j(not_zero, &call_runtime_); - __ movq(object_, kScratchRegister); + __ jmp(&flat_string); // Check for 1-byte or 2-byte string. __ bind(&flat_string); @@ -5416,13 +5271,12 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { // Call the runtime system in a fresh internal frame. ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(rdx); - __ push(rax); - __ Push(Smi::FromInt(op_)); - __ CallExternalReference(miss, 3); - } + __ EnterInternalFrame(); + __ push(rdx); + __ push(rax); + __ Push(Smi::FromInt(op_)); + __ CallExternalReference(miss, 3); + __ LeaveInternalFrame(); // Compute the entry point of the rewritten stub. __ lea(rdi, FieldOperand(rax, Code::kHeaderSize)); @@ -5553,8 +5407,6 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { - // This stub overrides SometimesSetsUpAFrame() to return false. That means - // we cannot call anything that could cause a GC from this stub. // Stack frame on entry: // esp[0 * kPointerSize]: return address. // esp[1 * kPointerSize]: key's hash. @@ -5640,279 +5492,6 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { } -struct AheadOfTimeWriteBarrierStubList { - Register object, value, address; - RememberedSetAction action; -}; - - -struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { - // Used in RegExpExecStub. - { rbx, rax, rdi, EMIT_REMEMBERED_SET }, - // Used in CompileArrayPushCall. - { rbx, rcx, rdx, EMIT_REMEMBERED_SET }, - // Used in CompileStoreGlobal. - { rbx, rcx, rdx, OMIT_REMEMBERED_SET }, - // Used in StoreStubCompiler::CompileStoreField and - // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. - { rdx, rcx, rbx, EMIT_REMEMBERED_SET }, - // GenerateStoreField calls the stub with two different permutations of - // registers. This is the second. - { rbx, rcx, rdx, EMIT_REMEMBERED_SET }, - // StoreIC::GenerateNormal via GenerateDictionaryStore. - { rbx, r8, r9, EMIT_REMEMBERED_SET }, - // KeyedStoreIC::GenerateGeneric. - { rbx, rdx, rcx, EMIT_REMEMBERED_SET}, - // KeyedStoreStubCompiler::GenerateStoreFastElement. - { rdi, rdx, rcx, EMIT_REMEMBERED_SET}, - // Null termination. - { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET} -}; - - -bool RecordWriteStub::IsPregenerated() { - for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; - !entry->object.is(no_reg); - entry++) { - if (object_.is(entry->object) && - value_.is(entry->value) && - address_.is(entry->address) && - remembered_set_action_ == entry->action && - save_fp_regs_mode_ == kDontSaveFPRegs) { - return true; - } - } - return false; -} - - -void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { - StoreBufferOverflowStub stub1(kDontSaveFPRegs); - stub1.GetCode()->set_is_pregenerated(true); - StoreBufferOverflowStub stub2(kSaveFPRegs); - stub2.GetCode()->set_is_pregenerated(true); -} - - -void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { - for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; - !entry->object.is(no_reg); - entry++) { - RecordWriteStub stub(entry->object, - entry->value, - entry->address, - entry->action, - kDontSaveFPRegs); - stub.GetCode()->set_is_pregenerated(true); - } -} - - -// Takes the input in 3 registers: address_ value_ and object_. A pointer to -// the value has just been written into the object, now this stub makes sure -// we keep the GC informed. The word in the object where the value has been -// written is in the address register. -void RecordWriteStub::Generate(MacroAssembler* masm) { - Label skip_to_incremental_noncompacting; - Label skip_to_incremental_compacting; - - // The first two instructions are generated with labels so as to get the - // offset fixed up correctly by the bind(Label*) call. We patch it back and - // forth between a compare instructions (a nop in this position) and the - // real branch when we start and stop incremental heap marking. - // See RecordWriteStub::Patch for details. - __ jmp(&skip_to_incremental_noncompacting, Label::kNear); - __ jmp(&skip_to_incremental_compacting, Label::kFar); - - if (remembered_set_action_ == EMIT_REMEMBERED_SET) { - __ RememberedSetHelper(object_, - address_, - value_, - save_fp_regs_mode_, - MacroAssembler::kReturnAtEnd); - } else { - __ ret(0); - } - - __ bind(&skip_to_incremental_noncompacting); - GenerateIncremental(masm, INCREMENTAL); - - __ bind(&skip_to_incremental_compacting); - GenerateIncremental(masm, INCREMENTAL_COMPACTION); - - // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. - // Will be checked in IncrementalMarking::ActivateGeneratedStub. - masm->set_byte_at(0, kTwoByteNopInstruction); - masm->set_byte_at(2, kFiveByteNopInstruction); -} - - -void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { - regs_.Save(masm); - - if (remembered_set_action_ == EMIT_REMEMBERED_SET) { - Label dont_need_remembered_set; - - __ movq(regs_.scratch0(), Operand(regs_.address(), 0)); - __ JumpIfNotInNewSpace(regs_.scratch0(), - regs_.scratch0(), - &dont_need_remembered_set); - - __ CheckPageFlag(regs_.object(), - regs_.scratch0(), - 1 << MemoryChunk::SCAN_ON_SCAVENGE, - not_zero, - &dont_need_remembered_set); - - // First notify the incremental marker if necessary, then update the - // remembered set. - CheckNeedsToInformIncrementalMarker( - masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); - InformIncrementalMarker(masm, mode); - regs_.Restore(masm); - __ RememberedSetHelper(object_, - address_, - value_, - save_fp_regs_mode_, - MacroAssembler::kReturnAtEnd); - - __ bind(&dont_need_remembered_set); - } - - CheckNeedsToInformIncrementalMarker( - masm, kReturnOnNoNeedToInformIncrementalMarker, mode); - InformIncrementalMarker(masm, mode); - regs_.Restore(masm); - __ ret(0); -} - - -void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { - regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); -#ifdef _WIN64 - Register arg3 = r8; - Register arg2 = rdx; - Register arg1 = rcx; -#else - Register arg3 = rdx; - Register arg2 = rsi; - Register arg1 = rdi; -#endif - Register address = - arg1.is(regs_.address()) ? kScratchRegister : regs_.address(); - ASSERT(!address.is(regs_.object())); - ASSERT(!address.is(arg1)); - __ Move(address, regs_.address()); - __ Move(arg1, regs_.object()); - if (mode == INCREMENTAL_COMPACTION) { - // TODO(gc) Can we just set address arg2 in the beginning? - __ Move(arg2, address); - } else { - ASSERT(mode == INCREMENTAL); - __ movq(arg2, Operand(address, 0)); - } - __ LoadAddress(arg3, ExternalReference::isolate_address()); - int argument_count = 3; - - AllowExternalCallThatCantCauseGC scope(masm); - __ PrepareCallCFunction(argument_count); - if (mode == INCREMENTAL_COMPACTION) { - __ CallCFunction( - ExternalReference::incremental_evacuation_record_write_function( - masm->isolate()), - argument_count); - } else { - ASSERT(mode == INCREMENTAL); - __ CallCFunction( - ExternalReference::incremental_marking_record_write_function( - masm->isolate()), - argument_count); - } - regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); -} - - -void RecordWriteStub::CheckNeedsToInformIncrementalMarker( - MacroAssembler* masm, - OnNoNeedToInformIncrementalMarker on_no_need, - Mode mode) { - Label on_black; - Label need_incremental; - Label need_incremental_pop_object; - - // Let's look at the color of the object: If it is not black we don't have - // to inform the incremental marker. - __ JumpIfBlack(regs_.object(), - regs_.scratch0(), - regs_.scratch1(), - &on_black, - Label::kNear); - - regs_.Restore(masm); - if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { - __ RememberedSetHelper(object_, - address_, - value_, - save_fp_regs_mode_, - MacroAssembler::kReturnAtEnd); - } else { - __ ret(0); - } - - __ bind(&on_black); - - // Get the value from the slot. - __ movq(regs_.scratch0(), Operand(regs_.address(), 0)); - - if (mode == INCREMENTAL_COMPACTION) { - Label ensure_not_white; - - __ CheckPageFlag(regs_.scratch0(), // Contains value. - regs_.scratch1(), // Scratch. - MemoryChunk::kEvacuationCandidateMask, - zero, - &ensure_not_white, - Label::kNear); - - __ CheckPageFlag(regs_.object(), - regs_.scratch1(), // Scratch. - MemoryChunk::kSkipEvacuationSlotsRecordingMask, - zero, - &need_incremental); - - __ bind(&ensure_not_white); - } - - // We need an extra register for this, so we push the object register - // temporarily. - __ push(regs_.object()); - __ EnsureNotWhite(regs_.scratch0(), // The value. - regs_.scratch1(), // Scratch. - regs_.object(), // Scratch. - &need_incremental_pop_object, - Label::kNear); - __ pop(regs_.object()); - - regs_.Restore(masm); - if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { - __ RememberedSetHelper(object_, - address_, - value_, - save_fp_regs_mode_, - MacroAssembler::kReturnAtEnd); - } else { - __ ret(0); - } - - __ bind(&need_incremental_pop_object); - __ pop(regs_.object()); - - __ bind(&need_incremental); - - // Fall through when we need to inform the incremental marker. -} - - #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h index 698ba403c..4058118ee 100644 --- a/deps/v8/src/x64/code-stubs-x64.h +++ b/deps/v8/src/x64/code-stubs-x64.h @@ -59,32 +59,6 @@ class TranscendentalCacheStub: public CodeStub { }; -class StoreBufferOverflowStub: public CodeStub { - public: - explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) - : save_doubles_(save_fp) { } - - void Generate(MacroAssembler* masm); - - virtual bool IsPregenerated() { return true; } - static void GenerateFixedRegStubsAheadOfTime(); - virtual bool SometimesSetsUpAFrame() { return false; } - - private: - SaveFPRegsMode save_doubles_; - - Major MajorKey() { return StoreBufferOverflow; } - int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } -}; - - -// Flag that indicates how to generate code for the stub GenericBinaryOpStub. -enum GenericBinaryFlags { - NO_GENERIC_BINARY_FLAGS = 0, - NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub. -}; - - class UnaryOpStub: public CodeStub { public: UnaryOpStub(Token::Value op, @@ -439,8 +413,6 @@ class StringDictionaryLookupStub: public CodeStub { Register r0, Register r1); - virtual bool SometimesSetsUpAFrame() { return false; } - private: static const int kInlinedProbes = 4; static const int kTotalProbes = 20; @@ -453,7 +425,7 @@ class StringDictionaryLookupStub: public CodeStub { StringDictionary::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return StringDictionaryLookup; } + Major MajorKey() { return StringDictionaryNegativeLookup; } int MinorKey() { return DictionaryBits::encode(dictionary_.code()) | @@ -474,253 +446,6 @@ class StringDictionaryLookupStub: public CodeStub { }; -class RecordWriteStub: public CodeStub { - public: - RecordWriteStub(Register object, - Register value, - Register address, - RememberedSetAction remembered_set_action, - SaveFPRegsMode fp_mode) - : object_(object), - value_(value), - address_(address), - remembered_set_action_(remembered_set_action), - save_fp_regs_mode_(fp_mode), - regs_(object, // An input reg. - address, // An input reg. - value) { // One scratch reg. - } - - enum Mode { - STORE_BUFFER_ONLY, - INCREMENTAL, - INCREMENTAL_COMPACTION - }; - - virtual bool IsPregenerated(); - static void GenerateFixedRegStubsAheadOfTime(); - virtual bool SometimesSetsUpAFrame() { return false; } - - static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8. - static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8. - - static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32. - static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32. - - static Mode GetMode(Code* stub) { - byte first_instruction = stub->instruction_start()[0]; - byte second_instruction = stub->instruction_start()[2]; - - if (first_instruction == kTwoByteJumpInstruction) { - return INCREMENTAL; - } - - ASSERT(first_instruction == kTwoByteNopInstruction); - - if (second_instruction == kFiveByteJumpInstruction) { - return INCREMENTAL_COMPACTION; - } - - ASSERT(second_instruction == kFiveByteNopInstruction); - - return STORE_BUFFER_ONLY; - } - - static void Patch(Code* stub, Mode mode) { - switch (mode) { - case STORE_BUFFER_ONLY: - ASSERT(GetMode(stub) == INCREMENTAL || - GetMode(stub) == INCREMENTAL_COMPACTION); - stub->instruction_start()[0] = kTwoByteNopInstruction; - stub->instruction_start()[2] = kFiveByteNopInstruction; - break; - case INCREMENTAL: - ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); - stub->instruction_start()[0] = kTwoByteJumpInstruction; - break; - case INCREMENTAL_COMPACTION: - ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); - stub->instruction_start()[0] = kTwoByteNopInstruction; - stub->instruction_start()[2] = kFiveByteJumpInstruction; - break; - } - ASSERT(GetMode(stub) == mode); - CPU::FlushICache(stub->instruction_start(), 7); - } - - private: - // This is a helper class for freeing up 3 scratch registers, where the third - // is always rcx (needed for shift operations). The input is two registers - // that must be preserved and one scratch register provided by the caller. - class RegisterAllocation { - public: - RegisterAllocation(Register object, - Register address, - Register scratch0) - : object_orig_(object), - address_orig_(address), - scratch0_orig_(scratch0), - object_(object), - address_(address), - scratch0_(scratch0) { - ASSERT(!AreAliased(scratch0, object, address, no_reg)); - scratch1_ = GetRegThatIsNotRcxOr(object_, address_, scratch0_); - if (scratch0.is(rcx)) { - scratch0_ = GetRegThatIsNotRcxOr(object_, address_, scratch1_); - } - if (object.is(rcx)) { - object_ = GetRegThatIsNotRcxOr(address_, scratch0_, scratch1_); - } - if (address.is(rcx)) { - address_ = GetRegThatIsNotRcxOr(object_, scratch0_, scratch1_); - } - ASSERT(!AreAliased(scratch0_, object_, address_, rcx)); - } - - void Save(MacroAssembler* masm) { - ASSERT(!address_orig_.is(object_)); - ASSERT(object_.is(object_orig_) || address_.is(address_orig_)); - ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); - ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_)); - ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_)); - // We don't have to save scratch0_orig_ because it was given to us as - // a scratch register. But if we had to switch to a different reg then - // we should save the new scratch0_. - if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_); - if (!rcx.is(scratch0_orig_) && - !rcx.is(object_orig_) && - !rcx.is(address_orig_)) { - masm->push(rcx); - } - masm->push(scratch1_); - if (!address_.is(address_orig_)) { - masm->push(address_); - masm->movq(address_, address_orig_); - } - if (!object_.is(object_orig_)) { - masm->push(object_); - masm->movq(object_, object_orig_); - } - } - - void Restore(MacroAssembler* masm) { - // These will have been preserved the entire time, so we just need to move - // them back. Only in one case is the orig_ reg different from the plain - // one, since only one of them can alias with rcx. - if (!object_.is(object_orig_)) { - masm->movq(object_orig_, object_); - masm->pop(object_); - } - if (!address_.is(address_orig_)) { - masm->movq(address_orig_, address_); - masm->pop(address_); - } - masm->pop(scratch1_); - if (!rcx.is(scratch0_orig_) && - !rcx.is(object_orig_) && - !rcx.is(address_orig_)) { - masm->pop(rcx); - } - if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_); - } - - // If we have to call into C then we need to save and restore all caller- - // saved registers that were not already preserved. - - // The three scratch registers (incl. rcx) will be restored by other means - // so we don't bother pushing them here. Rbx, rbp and r12-15 are callee - // save and don't need to be preserved. - void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { - masm->PushCallerSaved(mode, scratch0_, scratch1_, rcx); - } - - inline void RestoreCallerSaveRegisters(MacroAssembler*masm, - SaveFPRegsMode mode) { - masm->PopCallerSaved(mode, scratch0_, scratch1_, rcx); - } - - inline Register object() { return object_; } - inline Register address() { return address_; } - inline Register scratch0() { return scratch0_; } - inline Register scratch1() { return scratch1_; } - - private: - Register object_orig_; - Register address_orig_; - Register scratch0_orig_; - Register object_; - Register address_; - Register scratch0_; - Register scratch1_; - // Third scratch register is always rcx. - - Register GetRegThatIsNotRcxOr(Register r1, - Register r2, - Register r3) { - for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { - Register candidate = Register::FromAllocationIndex(i); - if (candidate.is(rcx)) continue; - if (candidate.is(r1)) continue; - if (candidate.is(r2)) continue; - if (candidate.is(r3)) continue; - return candidate; - } - UNREACHABLE(); - return no_reg; - } - friend class RecordWriteStub; - }; - - enum OnNoNeedToInformIncrementalMarker { - kReturnOnNoNeedToInformIncrementalMarker, - kUpdateRememberedSetOnNoNeedToInformIncrementalMarker - }; - - void Generate(MacroAssembler* masm); - void GenerateIncremental(MacroAssembler* masm, Mode mode); - void CheckNeedsToInformIncrementalMarker( - MacroAssembler* masm, - OnNoNeedToInformIncrementalMarker on_no_need, - Mode mode); - void InformIncrementalMarker(MacroAssembler* masm, Mode mode); - - Major MajorKey() { return RecordWrite; } - - int MinorKey() { - return ObjectBits::encode(object_.code()) | - ValueBits::encode(value_.code()) | - AddressBits::encode(address_.code()) | - RememberedSetActionBits::encode(remembered_set_action_) | - SaveFPRegsModeBits::encode(save_fp_regs_mode_); - } - - bool MustBeInStubCache() { - // All stubs must be registered in the stub cache - // otherwise IncrementalMarker would not be able to find - // and patch it. - return true; - } - - void Activate(Code* code) { - code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); - } - - class ObjectBits: public BitField<int, 0, 4> {}; - class ValueBits: public BitField<int, 4, 4> {}; - class AddressBits: public BitField<int, 8, 4> {}; - class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {}; - class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {}; - - Register object_; - Register value_; - Register address_; - RememberedSetAction remembered_set_action_; - SaveFPRegsMode save_fp_regs_mode_; - Label slow_; - RegisterAllocation regs_; -}; - - } } // namespace v8::internal #endif // V8_X64_CODE_STUBS_X64_H_ diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc index f6102c7c7..507bbd44c 100644 --- a/deps/v8/src/x64/codegen-x64.cc +++ b/deps/v8/src/x64/codegen-x64.cc @@ -38,16 +38,12 @@ namespace internal { // Platform-specific RuntimeCallHelper functions. void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { - masm->EnterFrame(StackFrame::INTERNAL); - ASSERT(!masm->has_frame()); - masm->set_has_frame(true); + masm->EnterInternalFrame(); } void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { - masm->LeaveFrame(StackFrame::INTERNAL); - ASSERT(masm->has_frame()); - masm->set_has_frame(false); + masm->LeaveInternalFrame(); } diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc index 2149fc2d1..423e6f244 100644 --- a/deps/v8/src/x64/debug-x64.cc +++ b/deps/v8/src/x64/debug-x64.cc @@ -100,66 +100,65 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, RegList non_object_regs, bool convert_call_to_jmp) { // Enter an internal frame. - { - FrameScope scope(masm, StackFrame::INTERNAL); - - // Store the registers containing live values on the expression stack to - // make sure that these are correctly updated during GC. Non object values - // are stored as as two smis causing it to be untouched by GC. - ASSERT((object_regs & ~kJSCallerSaved) == 0); - ASSERT((non_object_regs & ~kJSCallerSaved) == 0); - ASSERT((object_regs & non_object_regs) == 0); - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - ASSERT(!reg.is(kScratchRegister)); - if ((object_regs & (1 << r)) != 0) { - __ push(reg); - } - // Store the 64-bit value as two smis. - if ((non_object_regs & (1 << r)) != 0) { - __ movq(kScratchRegister, reg); - __ Integer32ToSmi(reg, reg); - __ push(reg); - __ sar(kScratchRegister, Immediate(32)); - __ Integer32ToSmi(kScratchRegister, kScratchRegister); - __ push(kScratchRegister); - } + __ EnterInternalFrame(); + + // Store the registers containing live values on the expression stack to + // make sure that these are correctly updated during GC. Non object values + // are stored as as two smis causing it to be untouched by GC. + ASSERT((object_regs & ~kJSCallerSaved) == 0); + ASSERT((non_object_regs & ~kJSCallerSaved) == 0); + ASSERT((object_regs & non_object_regs) == 0); + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + ASSERT(!reg.is(kScratchRegister)); + if ((object_regs & (1 << r)) != 0) { + __ push(reg); } + // Store the 64-bit value as two smis. + if ((non_object_regs & (1 << r)) != 0) { + __ movq(kScratchRegister, reg); + __ Integer32ToSmi(reg, reg); + __ push(reg); + __ sar(kScratchRegister, Immediate(32)); + __ Integer32ToSmi(kScratchRegister, kScratchRegister); + __ push(kScratchRegister); + } + } #ifdef DEBUG - __ RecordComment("// Calling from debug break to runtime - come in - over"); + __ RecordComment("// Calling from debug break to runtime - come in - over"); #endif - __ Set(rax, 0); // No arguments (argc == 0). - __ movq(rbx, ExternalReference::debug_break(masm->isolate())); - - CEntryStub ceb(1); - __ CallStub(&ceb); - - // Restore the register values from the expression stack. - for (int i = kNumJSCallerSaved - 1; i >= 0; i--) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - if (FLAG_debug_code) { - __ Set(reg, kDebugZapValue); - } - if ((object_regs & (1 << r)) != 0) { - __ pop(reg); - } - // Reconstruct the 64-bit value from two smis. - if ((non_object_regs & (1 << r)) != 0) { - __ pop(kScratchRegister); - __ SmiToInteger32(kScratchRegister, kScratchRegister); - __ shl(kScratchRegister, Immediate(32)); - __ pop(reg); - __ SmiToInteger32(reg, reg); - __ or_(reg, kScratchRegister); - } + __ Set(rax, 0); // No arguments (argc == 0). + __ movq(rbx, ExternalReference::debug_break(masm->isolate())); + + CEntryStub ceb(1); + __ CallStub(&ceb); + + // Restore the register values from the expression stack. + for (int i = kNumJSCallerSaved - 1; i >= 0; i--) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if (FLAG_debug_code) { + __ Set(reg, kDebugZapValue); + } + if ((object_regs & (1 << r)) != 0) { + __ pop(reg); + } + // Reconstruct the 64-bit value from two smis. + if ((non_object_regs & (1 << r)) != 0) { + __ pop(kScratchRegister); + __ SmiToInteger32(kScratchRegister, kScratchRegister); + __ shl(kScratchRegister, Immediate(32)); + __ pop(reg); + __ SmiToInteger32(reg, reg); + __ or_(reg, kScratchRegister); } - - // Get rid of the internal frame. } + // Get rid of the internal frame. + __ LeaveInternalFrame(); + // If this call did not replace a call but patched other code then there will // be an unwanted return address left on the stack. Here we get rid of that. if (convert_call_to_jmp) { diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc index b7e334ee7..b52e65932 100644 --- a/deps/v8/src/x64/deoptimizer-x64.cc +++ b/deps/v8/src/x64/deoptimizer-x64.cc @@ -197,19 +197,13 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { // Destroy the code which is not supposed to run again. ZapCodeRange(previous_pc, jump_table_address); #endif - Isolate* isolate = code->GetIsolate(); // Add the deoptimizing code to the list. DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); - DeoptimizerData* data = isolate->deoptimizer_data(); + DeoptimizerData* data = code->GetIsolate()->deoptimizer_data(); node->set_next(data->deoptimizing_code_list_); data->deoptimizing_code_list_ = node; - // We might be in the middle of incremental marking with compaction. - // Tell collector to treat this code object in a special way and - // ignore all slots that might have been recorded on it. - isolate->heap()->mark_compact_collector()->InvalidateCode(code); - // Set the code for the function to non-optimized version. function->ReplaceCode(function->shared()->code()); @@ -226,8 +220,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } -void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, - Address pc_after, +void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, Code* check_code, Code* replacement_code) { Address call_target_address = pc_after - kIntSize; @@ -257,13 +250,6 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, *(call_target_address - 2) = 0x90; // nop Assembler::set_target_address_at(call_target_address, replacement_code->entry()); - - RelocInfo rinfo(call_target_address, - RelocInfo::CODE_TARGET, - 0, - unoptimized_code); - unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode( - unoptimized_code, &rinfo, replacement_code); } @@ -282,8 +268,6 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after, *(call_target_address - 2) = 0x07; // offset Assembler::set_target_address_at(call_target_address, check_code->entry()); - check_code->GetHeap()->incremental_marking()-> - RecordCodeTargetPatch(call_target_address, check_code); } @@ -729,10 +713,7 @@ void Deoptimizer::EntryGenerator::Generate() { Isolate* isolate = masm()->isolate(); - { - AllowExternalCallThatCantCauseGC scope(masm()); - __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); - } + __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); // Preserve deoptimizer object in register rax and get the input // frame descriptor pointer. __ movq(rbx, Operand(rax, Deoptimizer::input_offset())); @@ -778,11 +759,8 @@ void Deoptimizer::EntryGenerator::Generate() { __ PrepareCallCFunction(2); __ movq(arg1, rax); __ LoadAddress(arg2, ExternalReference::isolate_address()); - { - AllowExternalCallThatCantCauseGC scope(masm()); - __ CallCFunction( - ExternalReference::compute_output_frames_function(isolate), 2); - } + __ CallCFunction( + ExternalReference::compute_output_frames_function(isolate), 2); __ pop(rax); // Replace the current frame with the output frames. diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc index b5c5fc5e7..556523fad 100644 --- a/deps/v8/src/x64/full-codegen-x64.cc +++ b/deps/v8/src/x64/full-codegen-x64.cc @@ -147,11 +147,6 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { __ bind(&ok); } - // Open a frame scope to indicate that there is a frame on the stack. The - // MANUAL indicates that the scope shouldn't actually generate code to set up - // the frame (that is done below). - FrameScope frame_scope(masm_, StackFrame::MANUAL); - __ push(rbp); // Caller's frame pointer. __ movq(rbp, rsp); __ push(rsi); // Callee's context. @@ -200,9 +195,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // Store it in the context. int context_offset = Context::SlotOffset(var->index()); __ movq(Operand(rsi, context_offset), rax); - // Update the write barrier. This clobbers rax and rbx. - __ RecordWriteContextSlot( - rsi, context_offset, rax, rbx, kDontSaveFPRegs); + // Update the write barrier. This clobbers all involved + // registers, so we have use a third register to avoid + // clobbering rsi. + __ movq(rcx, rsi); + __ RecordWrite(rcx, context_offset, rax, rbx); } } } @@ -254,7 +251,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // constant. if (scope()->is_function_scope() && scope()->function() != NULL) { int ignored = 0; - EmitDeclaration(scope()->function(), CONST, NULL, &ignored); + EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored); } VisitDeclarations(scope()->declarations()); } @@ -641,11 +638,10 @@ void FullCodeGenerator::SetVar(Variable* var, ASSERT(!scratch1.is(src)); MemOperand location = VarOperand(var, scratch0); __ movq(location, src); - // Emit the write barrier code if the location is in the heap. if (var->IsContextSlot()) { int offset = Context::SlotOffset(var->index()); - __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs); + __ RecordWrite(scratch0, offset, src, scratch1); } } @@ -677,7 +673,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state, void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, - VariableMode mode, + Variable::Mode mode, FunctionLiteral* function, int* global_count) { // If it was not possible to allocate the variable at compile time, we @@ -695,7 +691,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, Comment cmnt(masm_, "[ Declaration"); VisitForAccumulatorValue(function); __ movq(StackOperand(variable), result_register()); - } else if (mode == CONST || mode == LET) { + } else if (mode == Variable::CONST || mode == Variable::LET) { Comment cmnt(masm_, "[ Declaration"); __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex); __ movq(StackOperand(variable), kScratchRegister); @@ -719,16 +715,10 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, VisitForAccumulatorValue(function); __ movq(ContextOperand(rsi, variable->index()), result_register()); int offset = Context::SlotOffset(variable->index()); - // We know that we have written a function, which is not a smi. - __ RecordWriteContextSlot(rsi, - offset, - result_register(), - rcx, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + __ movq(rbx, rsi); + __ RecordWrite(rbx, offset, result_register(), rcx); PrepareForBailoutForId(proxy->id(), NO_REGISTERS); - } else if (mode == CONST || mode == LET) { + } else if (mode == Variable::CONST || mode == Variable::LET) { Comment cmnt(masm_, "[ Declaration"); __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex); __ movq(ContextOperand(rsi, variable->index()), kScratchRegister); @@ -742,8 +732,10 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, __ push(rsi); __ Push(variable->name()); // Declaration nodes are always introduced in one of three modes. - ASSERT(mode == VAR || mode == CONST || mode == LET); - PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE; + ASSERT(mode == Variable::VAR || + mode == Variable::CONST || + mode == Variable::LET); + PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE; __ Push(Smi::FromInt(attr)); // Push initial value, if any. // Note: For variables we must not push an initial value (such as @@ -751,7 +743,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, // must not destroy the current value. if (function != NULL) { VisitForStackValue(function); - } else if (mode == CONST || mode == LET) { + } else if (mode == Variable::CONST || mode == Variable::LET) { __ PushRoot(Heap::kTheHoleValueRootIndex); } else { __ Push(Smi::FromInt(0)); // Indicates no initial value. @@ -1176,21 +1168,16 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, // introducing variables. In those cases, we do not want to // perform a runtime call for all variables in the scope // containing the eval. - if (var->mode() == DYNAMIC_GLOBAL) { + if (var->mode() == Variable::DYNAMIC_GLOBAL) { EmitLoadGlobalCheckExtensions(var, typeof_state, slow); __ jmp(done); - } else if (var->mode() == DYNAMIC_LOCAL) { + } else if (var->mode() == Variable::DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); __ movq(rax, ContextSlotOperandCheckExtensions(local, slow)); - if (local->mode() == CONST || local->mode() == LET) { + if (local->mode() == Variable::CONST) { __ CompareRoot(rax, Heap::kTheHoleValueRootIndex); __ j(not_equal, done); - if (local->mode() == CONST) { - __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); - } else { // LET - __ Push(var->name()); - __ CallRuntime(Runtime::kThrowReferenceError, 1); - } + __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); } __ jmp(done); } @@ -1221,7 +1208,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { case Variable::LOCAL: case Variable::CONTEXT: { Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot"); - if (var->mode() != LET && var->mode() != CONST) { + if (var->mode() != Variable::LET && var->mode() != Variable::CONST) { context()->Plug(var); } else { // Let and const need a read barrier. @@ -1229,10 +1216,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { GetVar(rax, var); __ CompareRoot(rax, Heap::kTheHoleValueRootIndex); __ j(not_equal, &done, Label::kNear); - if (var->mode() == LET) { + if (var->mode() == Variable::LET) { __ Push(var->name()); __ CallRuntime(Runtime::kThrowReferenceError, 1); - } else { // CONST + } else { // Variable::CONST __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); } __ bind(&done); @@ -1458,23 +1445,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { VisitForAccumulatorValue(subexpr); // Store the subexpression value in the array's elements. - __ movq(r8, Operand(rsp, 0)); // Copy of array literal. - __ movq(rbx, FieldOperand(r8, JSObject::kElementsOffset)); + __ movq(rbx, Operand(rsp, 0)); // Copy of array literal. + __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset)); int offset = FixedArray::kHeaderSize + (i * kPointerSize); __ movq(FieldOperand(rbx, offset), result_register()); - Label no_map_change; - __ JumpIfSmi(result_register(), &no_map_change); // Update the write barrier for the array store. - __ RecordWriteField(rbx, offset, result_register(), rcx, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset)); - __ CheckFastSmiOnlyElements(rdi, &no_map_change, Label::kNear); - __ push(r8); - __ CallRuntime(Runtime::kNonSmiElementStored, 1); - __ bind(&no_map_change); + __ RecordWrite(rbx, offset, result_register(), rcx); PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS); } @@ -1779,7 +1756,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); } - } else if (var->mode() == LET && op != Token::INIT_LET) { + } else if (var->mode() == Variable::LET && op != Token::INIT_LET) { // Non-initializing assignment to let variable needs a write barrier. if (var->IsLookupSlot()) { __ push(rax); // Value. @@ -1800,12 +1777,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ movq(location, rax); if (var->IsContextSlot()) { __ movq(rdx, rax); - __ RecordWriteContextSlot( - rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs); + __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx); } } - } else if (var->mode() != CONST) { + } else if (var->mode() != Variable::CONST) { // Assignment to var or initializing assignment to let. if (var->IsStackAllocated() || var->IsContextSlot()) { MemOperand location = VarOperand(var, rcx); @@ -1819,8 +1795,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ movq(location, rax); if (var->IsContextSlot()) { __ movq(rdx, rax); - __ RecordWriteContextSlot( - rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs); + __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx); } } else { ASSERT(var->IsLookupSlot()); @@ -2029,8 +2004,10 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, // Push the strict mode flag. In harmony mode every eval call // is a strict mode eval call. - StrictModeFlag strict_mode = - FLAG_harmony_scoping ? kStrictMode : strict_mode_flag(); + StrictModeFlag strict_mode = strict_mode_flag(); + if (FLAG_harmony_block_scoping) { + strict_mode = kStrictMode; + } __ Push(Smi::FromInt(strict_mode)); __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP @@ -2072,7 +2049,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { // context lookup in the runtime system. Label done; Variable* var = proxy->var(); - if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) { + if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) { Label slow; EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow); // Push the function and resolve eval. @@ -2568,24 +2545,20 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) { // Check that the object is a JS object but take special care of JS // functions to make sure they have 'Function' as their class. - // Assume that there are only two callable types, and one of them is at - // either end of the type range for JS object types. Saves extra comparisons. - STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax); // Map is now in rax. __ j(below, &null); - STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == - FIRST_SPEC_OBJECT_TYPE + 1); - __ j(equal, &function); - - __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE); - STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == - LAST_SPEC_OBJECT_TYPE - 1); - __ j(equal, &function); - // Assume that there is no larger type. - STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1); - - // Check if the constructor in the map is a JS function. + + // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and + // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after + // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. + STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); + STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == + LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); + __ CmpInstanceType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE); + __ j(above_equal, &function); + + // Check if the constructor in the map is a function. __ movq(rax, FieldOperand(rax, Map::kConstructorOffset)); __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx); __ j(not_equal, &non_function_constructor); @@ -2753,7 +2726,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) { // Update the write barrier. Save the value as it will be // overwritten by the write barrier code and is needed afterward. __ movq(rdx, rax); - __ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs); + __ RecordWrite(rbx, JSValue::kValueOffset, rdx, rcx); __ bind(&done); context()->Plug(rax); @@ -3037,33 +3010,14 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) { __ movq(Operand(index_2, 0), object); __ movq(Operand(index_1, 0), temp); - Label no_remembered_set; - __ CheckPageFlag(elements, - temp, - 1 << MemoryChunk::SCAN_ON_SCAVENGE, - not_zero, - &no_remembered_set, - Label::kNear); - // Possible optimization: do a check that both values are Smis - // (or them and test against Smi mask.) - - // We are swapping two objects in an array and the incremental marker never - // pauses in the middle of scanning a single object. Therefore the - // incremental marker is not disturbed, so we don't need to call the - // RecordWrite stub that notifies the incremental marker. - __ RememberedSetHelper(elements, - index_1, - temp, - kDontSaveFPRegs, - MacroAssembler::kFallThroughAtEnd); - __ RememberedSetHelper(elements, - index_2, - temp, - kDontSaveFPRegs, - MacroAssembler::kFallThroughAtEnd); - - __ bind(&no_remembered_set); + Label new_space; + __ InNewSpace(elements, temp, equal, &new_space); + + __ movq(object, elements); + __ RecordWriteHelper(object, index_1, temp); + __ RecordWriteHelper(elements, index_2, temp); + __ bind(&new_space); // We are done. Drop elements from the stack, and return undefined. __ addq(rsp, Immediate(3 * kPointerSize)); __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); @@ -3879,14 +3833,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, - Handle<String> check) { - Label materialize_true, materialize_false; - Label* if_true = NULL; - Label* if_false = NULL; - Label* fall_through = NULL; - context()->PrepareTest(&materialize_true, &materialize_false, - &if_true, &if_false, &fall_through); - + Handle<String> check, + Label* if_true, + Label* if_false, + Label* fall_through) { { AccumulatorValueContext context(this); VisitForTypeofValue(expr); } @@ -3925,11 +3875,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, Split(not_zero, if_true, if_false, fall_through); } else if (check->Equals(isolate()->heap()->function_symbol())) { __ JumpIfSmi(rax, if_false); - STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); - __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx); - __ j(equal, if_true); - __ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE); - Split(equal, if_true, if_false, fall_through); + STATIC_ASSERT(LAST_CALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CmpObjectType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, rdx); + Split(above_equal, if_true, if_false, fall_through); } else if (check->Equals(isolate()->heap()->object_symbol())) { __ JumpIfSmi(rax, if_false); if (!FLAG_harmony_typeof) { @@ -3947,7 +3895,18 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } else { if (if_false != fall_through) __ jmp(if_false); } - context()->Plug(if_true, if_false); +} + + +void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr, + Label* if_true, + Label* if_false, + Label* fall_through) { + VisitForAccumulatorValue(expr); + PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); + + __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); + Split(equal, if_true, if_false, fall_through); } @@ -3955,10 +3914,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { Comment cmnt(masm_, "[ CompareOperation"); SetSourcePosition(expr->position()); - // First we try a fast inlined version of the compare when one of - // the operands is a literal. - if (TryLiteralCompare(expr)) return; - // Always perform the comparison for its control flow. Pack the result // into the expression's context after the comparison is performed. Label materialize_true, materialize_false; @@ -3968,6 +3923,13 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); + // First we try a fast inlined version of the compare when one of + // the operands is a literal. + if (TryLiteralCompare(expr, if_true, if_false, fall_through)) { + context()->Plug(if_true, if_false); + return; + } + Token::Value op = expr->op(); VisitForStackValue(expr->left()); switch (op) { @@ -4055,9 +4017,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { } -void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, - Expression* sub_expr, - NilValue nil) { +void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { + Comment cmnt(masm_, "[ CompareToNull"); Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; @@ -4065,20 +4026,14 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - VisitForAccumulatorValue(sub_expr); + VisitForAccumulatorValue(expr->expression()); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - Heap::RootListIndex nil_value = nil == kNullValue ? - Heap::kNullValueRootIndex : - Heap::kUndefinedValueRootIndex; - __ CompareRoot(rax, nil_value); - if (expr->op() == Token::EQ_STRICT) { + __ CompareRoot(rax, Heap::kNullValueRootIndex); + if (expr->is_strict()) { Split(equal, if_true, if_false, fall_through); } else { - Heap::RootListIndex other_nil_value = nil == kNullValue ? - Heap::kUndefinedValueRootIndex : - Heap::kNullValueRootIndex; __ j(equal, if_true); - __ CompareRoot(rax, other_nil_value); + __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); __ j(equal, if_true); __ JumpIfSmi(rax, if_false); // It can be an undetectable object. diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc index 27a96674c..9d55594dc 100644 --- a/deps/v8/src/x64/ic-x64.cc +++ b/deps/v8/src/x64/ic-x64.cc @@ -221,7 +221,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm, // Update write barrier. Make sure not to clobber the value. __ movq(scratch0, value); - __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs); + __ RecordWrite(elements, scratch1, scratch0); } @@ -606,40 +606,45 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- - Label slow, slow_with_tagged_index, fast, array, extra, check_extra_double; - Label fast_object_with_map_check, fast_object_without_map_check; - Label fast_double_with_map_check, fast_double_without_map_check; + Label slow, slow_with_tagged_index, fast, array, extra; // Check that the object isn't a smi. __ JumpIfSmi(rdx, &slow_with_tagged_index); // Get the map from the receiver. - __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset)); + __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); // Check that the receiver does not require access checks. We need // to do this because this generic stub does not perform map checks. - __ testb(FieldOperand(r9, Map::kBitFieldOffset), + __ testb(FieldOperand(rbx, Map::kBitFieldOffset), Immediate(1 << Map::kIsAccessCheckNeeded)); __ j(not_zero, &slow_with_tagged_index); // Check that the key is a smi. __ JumpIfNotSmi(rcx, &slow_with_tagged_index); __ SmiToInteger32(rcx, rcx); - __ CmpInstanceType(r9, JS_ARRAY_TYPE); + __ CmpInstanceType(rbx, JS_ARRAY_TYPE); __ j(equal, &array); // Check that the object is some kind of JSObject. - __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE); + __ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE); __ j(below, &slow); + __ CmpInstanceType(rbx, JS_PROXY_TYPE); + __ j(equal, &slow); + __ CmpInstanceType(rbx, JS_FUNCTION_PROXY_TYPE); + __ j(equal, &slow); // Object case: Check key against length in the elements array. // rax: value // rdx: JSObject // rcx: index __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); - // Check array bounds. + // Check that the object is in fast mode and writable. + __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset), + Heap::kFixedArrayMapRootIndex); + __ j(not_equal, &slow); __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx); // rax: value // rbx: FixedArray // rcx: index - __ j(above, &fast_object_with_map_check); + __ j(above, &fast); // Slow case: call runtime. __ bind(&slow); @@ -661,20 +666,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx); __ j(below_equal, &slow); // Increment index to get new length. - __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset)); - __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex); - __ j(not_equal, &check_extra_double); __ leal(rdi, Operand(rcx, 1)); __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi); - __ jmp(&fast_object_without_map_check); - - __ bind(&check_extra_double); - // rdi: elements array's map - __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex); - __ j(not_equal, &slow); - __ leal(rdi, Operand(rcx, 1)); - __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi); - __ jmp(&fast_double_without_map_check); + __ jmp(&fast); // Array case: Get the length and the elements array from the JS // array. Check that the array is in fast mode (and writable); if it @@ -684,6 +678,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // rdx: receiver (a JSArray) // rcx: index __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); + __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset), + Heap::kFixedArrayMapRootIndex); + __ j(not_equal, &slow); // Check the key against the length in the array, compute the // address to store into and fall through to fast case. @@ -691,45 +688,20 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ j(below_equal, &extra); // Fast case: Do the store. - __ bind(&fast_object_with_map_check); + __ bind(&fast); // rax: value // rbx: receiver's elements array (a FixedArray) // rcx: index - // rdx: receiver (a JSArray) - __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset)); - __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex); - __ j(not_equal, &fast_double_with_map_check); - __ bind(&fast_object_without_map_check); - // Smi stores don't require further checks. Label non_smi_value; - __ JumpIfNotSmi(rax, &non_smi_value); - // It's irrelevant whether array is smi-only or not when writing a smi. __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize), rax); + __ JumpIfNotSmi(rax, &non_smi_value, Label::kNear); __ ret(0); - __ bind(&non_smi_value); - // Writing a non-smi, check whether array allows non-smi elements. - // r9: receiver's map - __ CheckFastObjectElements(r9, &slow, Label::kNear); - __ lea(rcx, - FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize)); - __ movq(Operand(rcx, 0), rax); + // Slow case that needs to retain rcx for use by RecordWrite. + // Update write barrier for the elements array address. __ movq(rdx, rax); - __ RecordWrite( - rbx, rcx, rdx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); - __ ret(0); - - __ bind(&fast_double_with_map_check); - // Check for fast double array case. If this fails, call through to the - // runtime. - // rdi: elements array's map - __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex); - __ j(not_equal, &slow); - __ bind(&fast_double_without_map_check); - // If the value is a number, store it as a double in the FastDoubleElements - // array. - __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0, &slow); + __ RecordWriteNonSmi(rbx, 0, rdx, rcx); __ ret(0); } @@ -874,22 +846,21 @@ static void GenerateCallMiss(MacroAssembler* masm, __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // Enter an internal frame. - { - FrameScope scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); - // Push the receiver and the name of the function. - __ push(rdx); - __ push(rcx); + // Push the receiver and the name of the function. + __ push(rdx); + __ push(rcx); - // Call the entry. - CEntryStub stub(1); - __ Set(rax, 2); - __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate())); - __ CallStub(&stub); + // Call the entry. + CEntryStub stub(1); + __ Set(rax, 2); + __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate())); + __ CallStub(&stub); - // Move result to rdi and exit the internal frame. - __ movq(rdi, rax); - } + // Move result to rdi and exit the internal frame. + __ movq(rdi, rax); + __ LeaveInternalFrame(); // Check if the receiver is a global object of some sort. // This can happen only for regular CallIC but not KeyedCallIC. @@ -1031,14 +1002,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { // This branch is taken when calling KeyedCallIC_Miss is neither required // nor beneficial. __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(rcx); // save the key - __ push(rdx); // pass the receiver - __ push(rcx); // pass the key - __ CallRuntime(Runtime::kKeyedGetProperty, 2); - __ pop(rcx); // restore the key - } + __ EnterInternalFrame(); + __ push(rcx); // save the key + __ push(rdx); // pass the receiver + __ push(rcx); // pass the key + __ CallRuntime(Runtime::kKeyedGetProperty, 2); + __ pop(rcx); // restore the key + __ LeaveInternalFrame(); __ movq(rdi, rax); __ jmp(&do_call); @@ -1242,12 +1212,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { __ movq(mapped_location, rax); __ lea(r9, mapped_location); __ movq(r8, rax); - __ RecordWrite(rbx, - r9, - r8, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - INLINE_SMI_CHECK); + __ RecordWrite(rbx, r9, r8); __ Ret(); __ bind(¬in); // The unmapped lookup expects that the parameter map is in rbx. @@ -1256,12 +1221,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { __ movq(unmapped_location, rax); __ lea(r9, unmapped_location); __ movq(r8, rax); - __ RecordWrite(rbx, - r9, - r8, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - INLINE_SMI_CHECK); + __ RecordWrite(rbx, r9, r8); __ Ret(); __ bind(&slow); GenerateMiss(masm, false); diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc index 45aaad754..9064a266e 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.cc +++ b/deps/v8/src/x64/lithium-codegen-x64.cc @@ -81,12 +81,6 @@ bool LCodeGen::GenerateCode() { HPhase phase("Code generation", chunk()); ASSERT(is_unused()); status_ = GENERATING; - - // Open a frame scope to indicate that there is a frame on the stack. The - // MANUAL indicates that the scope shouldn't actually generate code to set up - // the frame (that is done in GeneratePrologue). - FrameScope frame_scope(masm_, StackFrame::MANUAL); - return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && @@ -223,8 +217,11 @@ bool LCodeGen::GeneratePrologue() { // Store it in the context. int context_offset = Context::SlotOffset(var->index()); __ movq(Operand(rsi, context_offset), rax); - // Update the write barrier. This clobbers rax and rbx. - __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs); + // Update the write barrier. This clobbers all involved + // registers, so we have use a third register to avoid + // clobbering rsi. + __ movq(rcx, rsi); + __ RecordWrite(rcx, context_offset, rax, rbx); } } Comment(";;; End allocate local context"); @@ -283,9 +280,6 @@ bool LCodeGen::GenerateDeferredCode() { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; __ bind(code->entry()); - Comment(";;; Deferred code @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); code->Generate(); __ jmp(code->exit()); } @@ -673,7 +667,7 @@ void LCodeGen::RecordSafepoint( int deoptimization_index) { ASSERT(kind == expected_safepoint_kind_); - const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); + const ZoneList<LOperand*>* operands = pointers->operands(); Safepoint safepoint = safepoints_.DefineSafepoint(masm(), kind, arguments, deoptimization_index); @@ -1583,33 +1577,30 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { } -void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) { +void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { Register reg = ToRegister(instr->InputAt(0)); + int false_block = chunk_->LookupDestination(instr->false_block_id()); - // If the expression is known to be untagged or a smi, then it's definitely - // not null, and it can't be a an undetectable object. if (instr->hydrogen()->representation().IsSpecialization() || instr->hydrogen()->type().IsSmi()) { + // If the expression is known to untagged or smi, then it's definitely + // not null, and it can't be a an undetectable object. + // Jump directly to the false block. EmitGoto(false_block); return; } int true_block = chunk_->LookupDestination(instr->true_block_id()); - Heap::RootListIndex nil_value = instr->nil() == kNullValue ? - Heap::kNullValueRootIndex : - Heap::kUndefinedValueRootIndex; - __ CompareRoot(reg, nil_value); - if (instr->kind() == kStrictEquality) { + + __ CompareRoot(reg, Heap::kNullValueRootIndex); + if (instr->is_strict()) { EmitBranch(true_block, false_block, equal); } else { - Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ? - Heap::kUndefinedValueRootIndex : - Heap::kNullValueRootIndex; Label* true_label = chunk_->GetAssemblyLabel(true_block); Label* false_label = chunk_->GetAssemblyLabel(false_block); __ j(equal, true_label); - __ CompareRoot(reg, other_nil_value); + __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); __ j(equal, true_label); __ JumpIfSmi(reg, false_label); // Check for undetectable objects by looking in the bit field in @@ -1761,40 +1752,30 @@ void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, Handle<String> class_name, Register input, - Register temp, - Register scratch) { + Register temp) { __ JumpIfSmi(input, is_false); + __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp); + __ j(below, is_false); + // Map is now in temp. + // Functions have class 'Function'. + __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE); if (class_name->IsEqualTo(CStrVector("Function"))) { - // Assuming the following assertions, we can use the same compares to test - // for both being a function type and being in the object type range. - STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); - STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == - FIRST_SPEC_OBJECT_TYPE + 1); - STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == - LAST_SPEC_OBJECT_TYPE - 1); - STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); - __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp); - __ j(below, is_false); - __ j(equal, is_true); - __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE); - __ j(equal, is_true); + __ j(above_equal, is_true); } else { - // Faster code path to avoid two compares: subtract lower bound from the - // actual type and do a signed compare with the width of the type range. - __ movq(temp, FieldOperand(input, HeapObject::kMapOffset)); - __ movq(scratch, FieldOperand(temp, Map::kInstanceTypeOffset)); - __ subb(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); - __ cmpb(scratch, - Immediate(static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE))); - __ j(above, is_false); + __ j(above_equal, is_false); } - // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. // Check if the constructor in the map is a function. __ movq(temp, FieldOperand(temp, Map::kConstructorOffset)); + // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last type and + // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after + // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. + STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); + STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == + LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); + // Objects with a non-function constructor have class 'Object'. __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister); if (class_name->IsEqualTo(CStrVector("Object"))) { @@ -1823,7 +1804,6 @@ void LCodeGen::EmitClassOfTest(Label* is_true, void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { Register input = ToRegister(instr->InputAt(0)); Register temp = ToRegister(instr->TempAt(0)); - Register temp2 = ToRegister(instr->TempAt(1)); Handle<String> class_name = instr->hydrogen()->class_name(); int true_block = chunk_->LookupDestination(instr->true_block_id()); @@ -1832,7 +1812,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { Label* true_label = chunk_->GetAssemblyLabel(true_block); Label* false_label = chunk_->GetAssemblyLabel(false_block); - EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2); + EmitClassOfTest(true_label, false_label, class_name, input, temp); EmitBranch(true_block, false_block, equal); } @@ -1873,8 +1853,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { virtual void Generate() { codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_); } - virtual LInstruction* instr() { return instr_; } + Label* map_check() { return &map_check_; } + private: LInstanceOfKnownGlobal* instr_; Label map_check_; @@ -2015,7 +1996,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL); __ movq(result, Operand(result, 0)); } - if (instr->hydrogen()->RequiresHoleCheck()) { + if (instr->hydrogen()->check_hole_value()) { __ CompareRoot(result, Heap::kTheHoleValueRootIndex); DeoptimizeIf(equal, instr->environment()); } @@ -2035,39 +2016,25 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { - Register object = ToRegister(instr->TempAt(0)); - Register address = ToRegister(instr->TempAt(1)); Register value = ToRegister(instr->InputAt(0)); - ASSERT(!value.is(object)); - Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell()); - - __ movq(address, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL); - + Register temp = ToRegister(instr->TempAt(0)); + ASSERT(!value.is(temp)); + bool check_hole = instr->hydrogen()->check_hole_value(); + if (!check_hole && value.is(rax)) { + __ store_rax(instr->hydrogen()->cell().location(), + RelocInfo::GLOBAL_PROPERTY_CELL); + return; + } // If the cell we are storing to contains the hole it could have // been deleted from the property dictionary. In that case, we need // to update the property details in the property dictionary to mark // it as no longer deleted. We deoptimize in that case. - if (instr->hydrogen()->RequiresHoleCheck()) { - __ CompareRoot(Operand(address, 0), Heap::kTheHoleValueRootIndex); + __ movq(temp, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL); + if (check_hole) { + __ CompareRoot(Operand(temp, 0), Heap::kTheHoleValueRootIndex); DeoptimizeIf(equal, instr->environment()); } - - // Store the value. - __ movq(Operand(address, 0), value); - - Label smi_store; - __ JumpIfSmi(value, &smi_store, Label::kNear); - - int offset = JSGlobalPropertyCell::kValueOffset - kHeapObjectTag; - __ lea(object, Operand(address, -offset)); - // Cells are always in the remembered set. - __ RecordWrite(object, - address, - value, - kSaveFPRegs, - OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - __ bind(&smi_store); + __ movq(Operand(temp, 0), value); } @@ -2097,7 +2064,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { if (instr->needs_write_barrier()) { int offset = Context::SlotOffset(instr->slot_index()); Register scratch = ToRegister(instr->TempAt(0)); - __ RecordWriteContextSlot(context, offset, value, scratch, kSaveFPRegs); + __ RecordWrite(context, offset, value, scratch); } } @@ -2316,15 +2283,17 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( LLoadKeyedFastDoubleElement* instr) { XMMRegister result(ToDoubleRegister(instr->result())); - int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + - sizeof(kHoleNanLower32); - Operand hole_check_operand = BuildFastArrayOperand( - instr->elements(), - instr->key(), - FAST_DOUBLE_ELEMENTS, - offset); - __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); - DeoptimizeIf(equal, instr->environment()); + if (instr->hydrogen()->RequiresHoleCheck()) { + int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + + sizeof(kHoleNanLower32); + Operand hole_check_operand = BuildFastArrayOperand( + instr->elements(), + instr->key(), + FAST_DOUBLE_ELEMENTS, + offset); + __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); + DeoptimizeIf(equal, instr->environment()); + } Operand double_load_operand = BuildFastArrayOperand( instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS, @@ -2396,7 +2365,6 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -2713,7 +2681,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { virtual void Generate() { codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); } - virtual LInstruction* instr() { return instr_; } private: LUnaryMathOperation* instr_; }; @@ -3010,7 +2977,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { ASSERT(ToRegister(instr->result()).is(rax)); int arity = instr->arity(); - CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); + CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); __ Drop(1); @@ -3066,7 +3033,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (instr->needs_write_barrier()) { Register temp = ToRegister(instr->TempAt(0)); // Update the write barrier for the object for in-object properties. - __ RecordWriteField(object, offset, value, temp, kSaveFPRegs); + __ RecordWrite(object, offset, value, temp); } } else { Register temp = ToRegister(instr->TempAt(0)); @@ -3075,7 +3042,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (instr->needs_write_barrier()) { // Update the write barrier for the properties array. // object is used as a scratch register. - __ RecordWriteField(temp, offset, value, object, kSaveFPRegs); + __ RecordWrite(temp, offset, value, object); } } } @@ -3123,7 +3090,6 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3159,13 +3125,6 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { Register elements = ToRegister(instr->object()); Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; - // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS - // conversion, so it deopts in that case. - if (instr->hydrogen()->ValueNeedsSmiCheck()) { - Condition cc = masm()->CheckSmi(value); - DeoptimizeIf(NegateCondition(cc), instr->environment()); - } - // Do the store. if (instr->key()->IsConstantOperand()) { ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); @@ -3187,7 +3146,7 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { key, times_pointer_size, FixedArray::kHeaderSize)); - __ RecordWrite(elements, key, value, kSaveFPRegs); + __ RecordWrite(elements, key, value); } } @@ -3237,7 +3196,6 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } - virtual LInstruction* instr() { return instr_; } private: LStringCharCodeAt* instr_; }; @@ -3358,7 +3316,6 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } - virtual LInstruction* instr() { return instr_; } private: LStringCharFromCode* instr_; }; @@ -3435,7 +3392,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } - virtual LInstruction* instr() { return instr_; } private: LNumberTagD* instr_; }; @@ -3531,6 +3487,16 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, } +class DeferredTaggedToI: public LDeferredCode { + public: + DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } + private: + LTaggedToI* instr_; +}; + + void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { Label done, heap_number; Register input_reg = ToRegister(instr->InputAt(0)); @@ -3579,16 +3545,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { void LCodeGen::DoTaggedToI(LTaggedToI* instr) { - class DeferredTaggedToI: public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } - virtual LInstruction* instr() { return instr_; } - private: - LTaggedToI* instr_; - }; - LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); ASSERT(input->Equals(instr->result())); @@ -4025,12 +3981,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, final_branch_condition = not_zero; } else if (type_name->Equals(heap()->function_symbol())) { - STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ JumpIfSmi(input, false_label); - __ CmpObjectType(input, JS_FUNCTION_TYPE, input); - __ j(equal, true_label); - __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE); - final_branch_condition = equal; + __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input); + final_branch_condition = above_equal; } else if (type_name->Equals(heap()->object_symbol())) { __ JumpIfSmi(input, false_label); @@ -4156,7 +4109,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } - virtual LInstruction* instr() { return instr_; } private: LStackCheck* instr_; }; diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h index 106d7bb2e..8cb4cece9 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.h +++ b/deps/v8/src/x64/lithium-codegen-x64.h @@ -140,8 +140,7 @@ class LCodeGen BASE_EMBEDDED { Label* if_false, Handle<String> class_name, Register input, - Register temporary, - Register scratch); + Register temporary); int GetStackSlotCount() const { return chunk()->spill_slot_count(); } int GetParameterCount() const { return scope()->num_parameters(); } @@ -346,20 +345,16 @@ class LCodeGen BASE_EMBEDDED { class LDeferredCode: public ZoneObject { public: explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), - external_exit_(NULL), - instruction_index_(codegen->current_instruction_) { + : codegen_(codegen), external_exit_(NULL) { codegen->AddDeferredCode(this); } virtual ~LDeferredCode() { } virtual void Generate() = 0; - virtual LInstruction* instr() = 0; void SetExit(Label *exit) { external_exit_ = exit; } Label* entry() { return &entry_; } Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } - int instruction_index() const { return instruction_index_; } protected: LCodeGen* codegen() const { return codegen_; } @@ -370,7 +365,6 @@ class LDeferredCode: public ZoneObject { Label entry_; Label exit_; Label* external_exit_; - int instruction_index_; }; } } // namespace v8::internal diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc index a67a59320..5fc56462b 100644 --- a/deps/v8/src/x64/lithium-x64.cc +++ b/deps/v8/src/x64/lithium-x64.cc @@ -214,11 +214,10 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) { } -void LIsNilAndBranch::PrintDataTo(StringStream* stream) { +void LIsNullAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if "); InputAt(0)->PrintTo(stream); - stream->Add(kind() == kStrictEquality ? " === " : " == "); - stream->Add(nil() == kNullValue ? "null" : "undefined"); + stream->Add(is_strict() ? " === null" : " == null"); stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); } @@ -707,9 +706,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble( LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { HEnvironment* hydrogen_env = current_block_->last_environment(); - int argument_index_accumulator = 0; - instr->set_environment(CreateEnvironment(hydrogen_env, - &argument_index_accumulator)); + instr->set_environment(CreateEnvironment(hydrogen_env)); return instr; } @@ -992,13 +989,10 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { } -LEnvironment* LChunkBuilder::CreateEnvironment( - HEnvironment* hydrogen_env, - int* argument_index_accumulator) { +LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { if (hydrogen_env == NULL) return NULL; - LEnvironment* outer = - CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator); + LEnvironment* outer = CreateEnvironment(hydrogen_env->outer()); int ast_id = hydrogen_env->ast_id(); ASSERT(ast_id != AstNode::kNoNumber); int value_count = hydrogen_env->length(); @@ -1008,6 +1002,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment( argument_count_, value_count, outer); + int argument_index = 0; for (int i = 0; i < value_count; ++i) { if (hydrogen_env->is_special_index(i)) continue; @@ -1016,7 +1011,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment( if (value->IsArgumentsObject()) { op = NULL; } else if (value->IsPushArgument()) { - op = new LArgument((*argument_index_accumulator)++); + op = new LArgument(argument_index++); } else { op = UseAny(value); } @@ -1441,10 +1436,10 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch( } -LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) { +LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) { ASSERT(instr->value()->representation().IsTagged()); - LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister(); - return new LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp); + LOperand* temp = instr->is_strict() ? NULL : TempRegister(); + return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp); } @@ -1494,7 +1489,6 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch( LInstruction* LChunkBuilder::DoClassOfTestAndBranch( HClassOfTestAndBranch* instr) { return new LClassOfTestAndBranch(UseTempRegister(instr->value()), - TempRegister(), TempRegister()); } @@ -1722,7 +1716,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) { LLoadGlobalCell* result = new LLoadGlobalCell; - return instr->RequiresHoleCheck() + return instr->check_hole_value() ? AssignEnvironment(DefineAsRegister(result)) : DefineAsRegister(result); } @@ -1737,10 +1731,8 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { LStoreGlobalCell* result = - new LStoreGlobalCell(UseTempRegister(instr->value()), - TempRegister(), - TempRegister()); - return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; + new LStoreGlobalCell(UseRegister(instr->value()), TempRegister()); + return instr->check_hole_value() ? AssignEnvironment(result) : result; } diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h index d43a86a9a..d169bf6df 100644 --- a/deps/v8/src/x64/lithium-x64.h +++ b/deps/v8/src/x64/lithium-x64.h @@ -107,7 +107,7 @@ class LCodeGen; V(Integer32ToDouble) \ V(InvokeFunction) \ V(IsConstructCallAndBranch) \ - V(IsNilAndBranch) \ + V(IsNullAndBranch) \ V(IsObjectAndBranch) \ V(IsSmiAndBranch) \ V(IsUndetectableAndBranch) \ @@ -609,18 +609,17 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> { }; -class LIsNilAndBranch: public LControlInstruction<1, 1> { +class LIsNullAndBranch: public LControlInstruction<1, 1> { public: - LIsNilAndBranch(LOperand* value, LOperand* temp) { + LIsNullAndBranch(LOperand* value, LOperand* temp) { inputs_[0] = value; temps_[0] = temp; } - DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch) + DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch") + DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch) - EqualityKind kind() const { return hydrogen()->kind(); } - NilValue nil() const { return hydrogen()->nil(); } + bool is_strict() const { return hydrogen()->is_strict(); } virtual void PrintDataTo(StringStream* stream); }; @@ -706,12 +705,11 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> { }; -class LClassOfTestAndBranch: public LControlInstruction<1, 2> { +class LClassOfTestAndBranch: public LControlInstruction<1, 1> { public: - LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) { + LClassOfTestAndBranch(LOperand* value, LOperand* temp) { inputs_[0] = value; temps_[0] = temp; - temps_[1] = temp2; } DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, @@ -1199,12 +1197,11 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> { }; -class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> { +class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> { public: - explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) { + explicit LStoreGlobalCell(LOperand* value, LOperand* temp) { inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; + temps_[0] = temp; } DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell") @@ -2149,8 +2146,7 @@ class LChunkBuilder BASE_EMBEDDED { LInstruction* instr, int ast_id); void ClearInstructionPendingDeoptimizationEnvironment(); - LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env, - int* argument_index_accumulator); + LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env); void VisitInstruction(HInstruction* current); diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc index 7fe6d5821..9cfc9b658 100644 --- a/deps/v8/src/x64/macro-assembler-x64.cc +++ b/deps/v8/src/x64/macro-assembler-x64.cc @@ -44,7 +44,6 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) : Assembler(arg_isolate, buffer, size), generating_stub_(false), allow_stub_calls_(true), - has_frame_(false), root_array_available_(true) { if (isolate() != NULL) { code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), @@ -197,47 +196,28 @@ void MacroAssembler::CompareRoot(const Operand& with, } -void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. - Register addr, - Register scratch, - SaveFPRegsMode save_fp, - RememberedSetFinalAction and_then) { - if (FLAG_debug_code) { - Label ok; - JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear); - int3(); - bind(&ok); - } - // Load store buffer top. - LoadRoot(scratch, Heap::kStoreBufferTopRootIndex); - // Store pointer to buffer. - movq(Operand(scratch, 0), addr); - // Increment buffer top. - addq(scratch, Immediate(kPointerSize)); - // Write back new top of buffer. - StoreRoot(scratch, Heap::kStoreBufferTopRootIndex); - // Call stub on end of buffer. - Label done; - // Check for end of buffer. - testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit)); - if (and_then == kReturnAtEnd) { - Label buffer_overflowed; - j(not_equal, &buffer_overflowed, Label::kNear); - ret(0); - bind(&buffer_overflowed); - } else { - ASSERT(and_then == kFallThroughAtEnd); - j(equal, &done, Label::kNear); - } - StoreBufferOverflowStub store_buffer_overflow = - StoreBufferOverflowStub(save_fp); - CallStub(&store_buffer_overflow); - if (and_then == kReturnAtEnd) { - ret(0); - } else { - ASSERT(and_then == kFallThroughAtEnd); - bind(&done); +void MacroAssembler::RecordWriteHelper(Register object, + Register addr, + Register scratch) { + if (emit_debug_code()) { + // Check that the object is not in new space. + Label not_in_new_space; + InNewSpace(object, scratch, not_equal, ¬_in_new_space, Label::kNear); + Abort("new-space object passed to RecordWriteHelper"); + bind(¬_in_new_space); } + + // Compute the page start address from the heap object pointer, and reuse + // the 'object' register for it. + and_(object, Immediate(~Page::kPageAlignmentMask)); + + // Compute number of region covering addr. See Page::GetRegionNumberForAddress + // method for more details. + shrl(addr, Immediate(Page::kRegionSizeLog2)); + andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2)); + + // Set dirty mark for region. + bts(Operand(object, Page::kDirtyFlagOffset), addr); } @@ -245,7 +225,7 @@ void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc, Label* branch, - Label::Distance distance) { + Label::Distance near_jump) { if (Serializer::enabled()) { // Can't do arithmetic on external references if it might get serialized. // The mask isn't really an address. We load it as an external reference in @@ -260,7 +240,7 @@ void MacroAssembler::InNewSpace(Register object, } movq(kScratchRegister, ExternalReference::new_space_start(isolate())); cmpq(scratch, kScratchRegister); - j(cc, branch, distance); + j(cc, branch, near_jump); } else { ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask()))); intptr_t new_space_start = @@ -272,128 +252,127 @@ void MacroAssembler::InNewSpace(Register object, lea(scratch, Operand(object, kScratchRegister, times_1, 0)); } and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask()))); - j(cc, branch, distance); + j(cc, branch, near_jump); } } -void MacroAssembler::RecordWriteField( - Register object, - int offset, - Register value, - Register dst, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action, - SmiCheck smi_check) { +void MacroAssembler::RecordWrite(Register object, + int offset, + Register value, + Register index) { // The compiled code assumes that record write doesn't change the // context register, so we check that none of the clobbered // registers are rsi. - ASSERT(!value.is(rsi) && !dst.is(rsi)); + ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi)); // First, check if a write barrier is even needed. The tests below - // catch stores of Smis. + // catch stores of smis and stores into the young generation. Label done; + JumpIfSmi(value, &done); - // Skip barrier if writing a smi. - if (smi_check == INLINE_SMI_CHECK) { - JumpIfSmi(value, &done); - } - - // Although the object register is tagged, the offset is relative to the start - // of the object, so so offset must be a multiple of kPointerSize. - ASSERT(IsAligned(offset, kPointerSize)); - - lea(dst, FieldOperand(object, offset)); - if (emit_debug_code()) { - Label ok; - testb(dst, Immediate((1 << kPointerSizeLog2) - 1)); - j(zero, &ok, Label::kNear); - int3(); - bind(&ok); - } - - RecordWrite( - object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK); - + RecordWriteNonSmi(object, offset, value, index); bind(&done); - // Clobber clobbered input registers when running with the debug-code flag - // turned on to provoke errors. + // Clobber all input registers when running with the debug-code flag + // turned on to provoke errors. This clobbering repeats the + // clobbering done inside RecordWriteNonSmi but it's necessary to + // avoid having the fast case for smis leave the registers + // unchanged. if (emit_debug_code()) { + movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); - movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE); + movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); } } void MacroAssembler::RecordWrite(Register object, Register address, - Register value, - SaveFPRegsMode fp_mode, - RememberedSetAction remembered_set_action, - SmiCheck smi_check) { + Register value) { // The compiled code assumes that record write doesn't change the // context register, so we check that none of the clobbered // registers are rsi. - ASSERT(!value.is(rsi) && !address.is(rsi)); + ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi)); - ASSERT(!object.is(value)); - ASSERT(!object.is(address)); - ASSERT(!value.is(address)); - if (emit_debug_code()) { - AbortIfSmi(object); - } + // First, check if a write barrier is even needed. The tests below + // catch stores of smis and stores into the young generation. + Label done; + JumpIfSmi(value, &done); - if (remembered_set_action == OMIT_REMEMBERED_SET && - !FLAG_incremental_marking) { - return; - } + InNewSpace(object, value, equal, &done); - if (FLAG_debug_code) { - Label ok; - cmpq(value, Operand(address, 0)); - j(equal, &ok, Label::kNear); - int3(); - bind(&ok); + RecordWriteHelper(object, address, value); + + bind(&done); + + // Clobber all input registers when running with the debug-code flag + // turned on to provoke errors. + if (emit_debug_code()) { + movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); + movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE); + movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); } +} - // First, check if a write barrier is even needed. The tests below - // catch stores of smis and stores into the young generation. + +void MacroAssembler::RecordWriteNonSmi(Register object, + int offset, + Register scratch, + Register index) { Label done; - if (smi_check == INLINE_SMI_CHECK) { - // Skip barrier if writing a smi. - JumpIfSmi(value, &done); + if (emit_debug_code()) { + Label okay; + JumpIfNotSmi(object, &okay, Label::kNear); + Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); + bind(&okay); + + if (offset == 0) { + // index must be int32. + Register tmp = index.is(rax) ? rbx : rax; + push(tmp); + movl(tmp, index); + cmpq(tmp, index); + Check(equal, "Index register for RecordWrite must be untagged int32."); + pop(tmp); + } } - CheckPageFlag(value, - value, // Used as scratch. - MemoryChunk::kPointersToHereAreInterestingMask, - zero, - &done, - Label::kNear); + // Test that the object address is not in the new space. We cannot + // update page dirty marks for new space pages. + InNewSpace(object, scratch, equal, &done); - CheckPageFlag(object, - value, // Used as scratch. - MemoryChunk::kPointersFromHereAreInterestingMask, - zero, - &done, - Label::kNear); + // The offset is relative to a tagged or untagged HeapObject pointer, + // so either offset or offset + kHeapObjectTag must be a + // multiple of kPointerSize. + ASSERT(IsAligned(offset, kPointerSize) || + IsAligned(offset + kHeapObjectTag, kPointerSize)); - RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); - CallStub(&stub); + Register dst = index; + if (offset != 0) { + lea(dst, Operand(object, offset)); + } else { + // array access: calculate the destination address in the same manner as + // KeyedStoreIC::GenerateGeneric. + lea(dst, FieldOperand(object, + index, + times_pointer_size, + FixedArray::kHeaderSize)); + } + RecordWriteHelper(object, dst, scratch); bind(&done); - // Clobber clobbered registers when running with the debug-code flag + // Clobber all input registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE); - movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); + movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); + movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE); + movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); } } - void MacroAssembler::Assert(Condition cc, const char* msg) { if (emit_debug_code()) Check(cc, msg); } @@ -421,7 +400,7 @@ void MacroAssembler::Check(Condition cc, const char* msg) { Label L; j(cc, &L, Label::kNear); Abort(msg); - // Control will not return here. + // will not return here bind(&L); } @@ -469,6 +448,9 @@ void MacroAssembler::Abort(const char* msg) { RecordComment(msg); } #endif + // Disable stub call restrictions to always allow calls to abort. + AllowStubCallsScope allow_scope(this, true); + push(rax); movq(kScratchRegister, p0, RelocInfo::NONE); push(kScratchRegister); @@ -476,28 +458,20 @@ void MacroAssembler::Abort(const char* msg) { reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))), RelocInfo::NONE); push(kScratchRegister); - - if (!has_frame_) { - // We don't actually want to generate a pile of code for this, so just - // claim there is a stack frame, without generating one. - FrameScope scope(this, StackFrame::NONE); - CallRuntime(Runtime::kAbort, 2); - } else { - CallRuntime(Runtime::kAbort, 2); - } - // Control will not return here. + CallRuntime(Runtime::kAbort, 2); + // will not return here int3(); } void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) { - ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs + ASSERT(allow_stub_calls()); // calls are not allowed in some stubs Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); } MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) { - ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs. + ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. MaybeObject* result = stub->TryGetCode(); if (!result->IsFailure()) { call(Handle<Code>(Code::cast(result->ToObjectUnchecked())), @@ -508,12 +482,13 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) { void MacroAssembler::TailCallStub(CodeStub* stub) { - ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); + ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. Jump(stub->GetCode(), RelocInfo::CODE_TARGET); } MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) { + ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. MaybeObject* result = stub->TryGetCode(); if (!result->IsFailure()) { jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())), @@ -529,12 +504,6 @@ void MacroAssembler::StubReturn(int argc) { } -bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { - if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; - return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(); -} - - void MacroAssembler::IllegalOperation(int num_arguments) { if (num_arguments > 0) { addq(rsp, Immediate(num_arguments * kPointerSize)); @@ -571,7 +540,8 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { const Runtime::Function* function = Runtime::FunctionForId(id); Set(rax, function->nargs); LoadAddress(rbx, ExternalReference(function, isolate())); - CEntryStub ces(1, kSaveFPRegs); + CEntryStub ces(1); + ces.SaveDoubles(); CallStub(&ces); } @@ -825,8 +795,8 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference( void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper& call_wrapper) { - // You can't call a builtin without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); + // Calls are not allowed in some stubs. + ASSERT(flag == JUMP_FUNCTION || allow_stub_calls()); // Rely on the assertion to check that the number of provided // arguments match the expected number of arguments. Fake a @@ -855,57 +825,6 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { } -static const Register saved_regs[] = - { rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11 }; -static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register); - - -void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, - Register exclusion1, - Register exclusion2, - Register exclusion3) { - // We don't allow a GC during a store buffer overflow so there is no need to - // store the registers in any particular way, but we do have to store and - // restore them. - for (int i = 0; i < kNumberOfSavedRegs; i++) { - Register reg = saved_regs[i]; - if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) { - push(reg); - } - } - // R12 to r15 are callee save on all platforms. - if (fp_mode == kSaveFPRegs) { - CpuFeatures::Scope scope(SSE2); - subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); - for (int i = 0; i < XMMRegister::kNumRegisters; i++) { - XMMRegister reg = XMMRegister::from_code(i); - movsd(Operand(rsp, i * kDoubleSize), reg); - } - } -} - - -void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, - Register exclusion1, - Register exclusion2, - Register exclusion3) { - if (fp_mode == kSaveFPRegs) { - CpuFeatures::Scope scope(SSE2); - for (int i = 0; i < XMMRegister::kNumRegisters; i++) { - XMMRegister reg = XMMRegister::from_code(i); - movsd(reg, Operand(rsp, i * kDoubleSize)); - } - addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); - } - for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) { - Register reg = saved_regs[i]; - if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) { - pop(reg); - } - } -} - - void MacroAssembler::Set(Register dst, int64_t x) { if (x == 0) { xorl(dst, dst); @@ -2648,91 +2567,13 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { void MacroAssembler::CheckFastElements(Register map, Label* fail, Label::Distance distance) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); - cmpb(FieldOperand(map, Map::kBitField2Offset), - Immediate(Map::kMaximumBitField2FastElementValue)); - j(above, fail, distance); -} - - -void MacroAssembler::CheckFastObjectElements(Register map, - Label* fail, - Label::Distance distance) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); - cmpb(FieldOperand(map, Map::kBitField2Offset), - Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue)); - j(below_equal, fail, distance); + STATIC_ASSERT(FAST_ELEMENTS == 0); cmpb(FieldOperand(map, Map::kBitField2Offset), Immediate(Map::kMaximumBitField2FastElementValue)); j(above, fail, distance); } -void MacroAssembler::CheckFastSmiOnlyElements(Register map, - Label* fail, - Label::Distance distance) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - cmpb(FieldOperand(map, Map::kBitField2Offset), - Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue)); - j(above, fail, distance); -} - - -void MacroAssembler::StoreNumberToDoubleElements( - Register maybe_number, - Register elements, - Register key, - XMMRegister xmm_scratch, - Label* fail) { - Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done; - - JumpIfSmi(maybe_number, &smi_value, Label::kNear); - - CheckMap(maybe_number, - isolate()->factory()->heap_number_map(), - fail, - DONT_DO_SMI_CHECK); - - // Double value, canonicalize NaN. - uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); - cmpl(FieldOperand(maybe_number, offset), - Immediate(kNaNOrInfinityLowerBoundUpper32)); - j(greater_equal, &maybe_nan, Label::kNear); - - bind(¬_nan); - movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset)); - bind(&have_double_value); - movsd(FieldOperand(elements, key, times_8, FixedDoubleArray::kHeaderSize), - xmm_scratch); - jmp(&done); - - bind(&maybe_nan); - // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise - // it's an Infinity, and the non-NaN code path applies. - j(greater, &is_nan, Label::kNear); - cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0)); - j(zero, ¬_nan); - bind(&is_nan); - // Convert all NaNs to the same canonical NaN value when they are stored in - // the double array. - Set(kScratchRegister, BitCast<uint64_t>( - FixedDoubleArray::canonical_not_the_hole_nan_as_double())); - movq(xmm_scratch, kScratchRegister); - jmp(&have_double_value, Label::kNear); - - bind(&smi_value); - // Value is a smi. convert to a double and store. - // Preserve original value. - SmiToInteger32(kScratchRegister, maybe_number); - cvtlsi2sd(xmm_scratch, kScratchRegister); - movsd(FieldOperand(elements, key, times_8, FixedDoubleArray::kHeaderSize), - xmm_scratch); - bind(&done); -} - - void MacroAssembler::CheckMap(Register obj, Handle<Map> map, Label* fail, @@ -2946,10 +2787,10 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) { #ifdef ENABLE_DEBUGGER_SUPPORT void MacroAssembler::DebugBreak() { + ASSERT(allow_stub_calls()); Set(rax, 0); // No arguments. LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate())); CEntryStub ces(1); - ASSERT(AllowThisStubCall(&ces)); Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } #endif // ENABLE_DEBUGGER_SUPPORT @@ -2975,9 +2816,6 @@ void MacroAssembler::InvokeCode(Register code, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { - // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); - Label done; InvokePrologue(expected, actual, @@ -3009,9 +2847,6 @@ void MacroAssembler::InvokeCode(Handle<Code> code, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { - // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); - Label done; Register dummy = rax; InvokePrologue(expected, @@ -3042,9 +2877,6 @@ void MacroAssembler::InvokeFunction(Register function, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { - // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); - ASSERT(function.is(rdi)); movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); movq(rsi, FieldOperand(function, JSFunction::kContextOffset)); @@ -3064,9 +2896,6 @@ void MacroAssembler::InvokeFunction(JSFunction* function, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { - // You can't call a function without a valid frame. - ASSERT(flag == JUMP_FUNCTION || has_frame()); - ASSERT(function->is_compiled()); // Get the function and setup the context. Move(rdi, Handle<JSFunction>(function)); @@ -3930,20 +3759,6 @@ void MacroAssembler::CopyBytes(Register destination, } -void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, - Register end_offset, - Register filler) { - Label loop, entry; - jmp(&entry); - bind(&loop); - movq(Operand(start_offset, 0), filler); - addq(start_offset, Immediate(kPointerSize)); - bind(&entry); - cmpq(start_offset, end_offset); - j(less, &loop); -} - - void MacroAssembler::LoadContext(Register dst, int context_chain_length) { if (context_chain_length > 0) { // Move up the chain of contexts to the context containing the slot. @@ -4043,7 +3858,6 @@ void MacroAssembler::CallCFunction(ExternalReference function, void MacroAssembler::CallCFunction(Register function, int num_arguments) { - ASSERT(has_frame()); // Check stack alignment. if (emit_debug_code()) { CheckStackAlignment(); @@ -4058,17 +3872,6 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) { } -bool AreAliased(Register r1, Register r2, Register r3, Register r4) { - if (r1.is(r2)) return true; - if (r1.is(r3)) return true; - if (r1.is(r4)) return true; - if (r2.is(r3)) return true; - if (r2.is(r4)) return true; - if (r3.is(r4)) return true; - return false; -} - - CodePatcher::CodePatcher(byte* address, int size) : address_(address), size_(size), @@ -4089,195 +3892,6 @@ CodePatcher::~CodePatcher() { ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); } - -void MacroAssembler::CheckPageFlag( - Register object, - Register scratch, - int mask, - Condition cc, - Label* condition_met, - Label::Distance condition_met_distance) { - ASSERT(cc == zero || cc == not_zero); - if (scratch.is(object)) { - and_(scratch, Immediate(~Page::kPageAlignmentMask)); - } else { - movq(scratch, Immediate(~Page::kPageAlignmentMask)); - and_(scratch, object); - } - if (mask < (1 << kBitsPerByte)) { - testb(Operand(scratch, MemoryChunk::kFlagsOffset), - Immediate(static_cast<uint8_t>(mask))); - } else { - testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask)); - } - j(cc, condition_met, condition_met_distance); -} - - -void MacroAssembler::JumpIfBlack(Register object, - Register bitmap_scratch, - Register mask_scratch, - Label* on_black, - Label::Distance on_black_distance) { - ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx)); - GetMarkBits(object, bitmap_scratch, mask_scratch); - - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); - // The mask_scratch register contains a 1 at the position of the first bit - // and a 0 at all other positions, including the position of the second bit. - movq(rcx, mask_scratch); - // Make rcx into a mask that covers both marking bits using the operation - // rcx = mask | (mask << 1). - lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0)); - // Note that we are using a 4-byte aligned 8-byte load. - and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); - cmpq(mask_scratch, rcx); - j(equal, on_black, on_black_distance); -} - - -// Detect some, but not all, common pointer-free objects. This is used by the -// incremental write barrier which doesn't care about oddballs (they are always -// marked black immediately so this code is not hit). -void MacroAssembler::JumpIfDataObject( - Register value, - Register scratch, - Label* not_data_object, - Label::Distance not_data_object_distance) { - Label is_data_object; - movq(scratch, FieldOperand(value, HeapObject::kMapOffset)); - CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); - j(equal, &is_data_object, Label::kNear); - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); - // If it's a string and it's not a cons string then it's an object containing - // no GC pointers. - testb(FieldOperand(scratch, Map::kInstanceTypeOffset), - Immediate(kIsIndirectStringMask | kIsNotStringMask)); - j(not_zero, not_data_object, not_data_object_distance); - bind(&is_data_object); -} - - -void MacroAssembler::GetMarkBits(Register addr_reg, - Register bitmap_reg, - Register mask_reg) { - ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx)); - movq(bitmap_reg, addr_reg); - // Sign extended 32 bit immediate. - and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); - movq(rcx, addr_reg); - int shift = - Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2; - shrl(rcx, Immediate(shift)); - and_(rcx, - Immediate((Page::kPageAlignmentMask >> shift) & - ~(Bitmap::kBytesPerCell - 1))); - - addq(bitmap_reg, rcx); - movq(rcx, addr_reg); - shrl(rcx, Immediate(kPointerSizeLog2)); - and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1)); - movl(mask_reg, Immediate(1)); - shl_cl(mask_reg); -} - - -void MacroAssembler::EnsureNotWhite( - Register value, - Register bitmap_scratch, - Register mask_scratch, - Label* value_is_white_and_not_data, - Label::Distance distance) { - ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx)); - GetMarkBits(value, bitmap_scratch, mask_scratch); - - // If the value is black or grey we don't need to do anything. - ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); - ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); - ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); - - Label done; - - // Since both black and grey have a 1 in the first position and white does - // not have a 1 there we only need to check one bit. - testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); - j(not_zero, &done, Label::kNear); - - if (FLAG_debug_code) { - // Check for impossible bit pattern. - Label ok; - push(mask_scratch); - // shl. May overflow making the check conservative. - addq(mask_scratch, mask_scratch); - testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); - j(zero, &ok, Label::kNear); - int3(); - bind(&ok); - pop(mask_scratch); - } - - // Value is white. We check whether it is data that doesn't need scanning. - // Currently only checks for HeapNumber and non-cons strings. - Register map = rcx; // Holds map while checking type. - Register length = rcx; // Holds length of object after checking type. - Label not_heap_number; - Label is_data_object; - - // Check for heap-number - movq(map, FieldOperand(value, HeapObject::kMapOffset)); - CompareRoot(map, Heap::kHeapNumberMapRootIndex); - j(not_equal, ¬_heap_number, Label::kNear); - movq(length, Immediate(HeapNumber::kSize)); - jmp(&is_data_object, Label::kNear); - - bind(¬_heap_number); - // Check for strings. - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); - // If it's a string and it's not a cons string then it's an object containing - // no GC pointers. - Register instance_type = rcx; - movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset)); - testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask)); - j(not_zero, value_is_white_and_not_data); - // It's a non-indirect (non-cons and non-slice) string. - // If it's external, the length is just ExternalString::kSize. - // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). - Label not_external; - // External strings are the only ones with the kExternalStringTag bit - // set. - ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); - ASSERT_EQ(0, kConsStringTag & kExternalStringTag); - testb(instance_type, Immediate(kExternalStringTag)); - j(zero, ¬_external, Label::kNear); - movq(length, Immediate(ExternalString::kSize)); - jmp(&is_data_object, Label::kNear); - - bind(¬_external); - // Sequential string, either ASCII or UC16. - ASSERT(kAsciiStringTag == 0x04); - and_(length, Immediate(kStringEncodingMask)); - xor_(length, Immediate(kStringEncodingMask)); - addq(length, Immediate(0x04)); - // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2. - imul(length, FieldOperand(value, String::kLengthOffset)); - shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize)); - addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); - and_(length, Immediate(~kObjectAlignmentMask)); - - bind(&is_data_object); - // Value is a data object, and it is white. Mark it black. Since we know - // that the object is white we can make it black by flipping one bit. - or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); - - and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask)); - addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length); - - bind(&done); -} - } } // namespace v8::internal #endif // V8_TARGET_ARCH_X64 diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h index 7e0ba0054..e7eb104c0 100644 --- a/deps/v8/src/x64/macro-assembler-x64.h +++ b/deps/v8/src/x64/macro-assembler-x64.h @@ -29,7 +29,6 @@ #define V8_X64_MACRO_ASSEMBLER_X64_H_ #include "assembler.h" -#include "frames.h" #include "v8globals.h" namespace v8 { @@ -62,11 +61,6 @@ static const int kRootRegisterBias = 128; // Convenience for platform-independent signatures. typedef Operand MemOperand; -enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; -enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; - -bool AreAliased(Register r1, Register r2, Register r3, Register r4); - // Forward declaration. class JumpTarget; @@ -78,7 +72,6 @@ struct SmiIndex { ScaleFactor scale; }; - // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { public: @@ -141,145 +134,56 @@ class MacroAssembler: public Assembler { void CompareRoot(const Operand& with, Heap::RootListIndex index); void PushRoot(Heap::RootListIndex index); - // These functions do not arrange the registers in any particular order so - // they are not useful for calls that can cause a GC. The caller can - // exclude up to 3 registers that do not need to be saved and restored. - void PushCallerSaved(SaveFPRegsMode fp_mode, - Register exclusion1 = no_reg, - Register exclusion2 = no_reg, - Register exclusion3 = no_reg); - void PopCallerSaved(SaveFPRegsMode fp_mode, - Register exclusion1 = no_reg, - Register exclusion2 = no_reg, - Register exclusion3 = no_reg); - -// --------------------------------------------------------------------------- -// GC Support - - - enum RememberedSetFinalAction { - kReturnAtEnd, - kFallThroughAtEnd - }; - - // Record in the remembered set the fact that we have a pointer to new space - // at the address pointed to by the addr register. Only works if addr is not - // in new space. - void RememberedSetHelper(Register object, // Used for debug code. - Register addr, - Register scratch, - SaveFPRegsMode save_fp, - RememberedSetFinalAction and_then); - - void CheckPageFlag(Register object, - Register scratch, - int mask, - Condition cc, - Label* condition_met, - Label::Distance condition_met_distance = Label::kFar); - - // Check if object is in new space. Jumps if the object is not in new space. - // The register scratch can be object itself, but scratch will be clobbered. - void JumpIfNotInNewSpace(Register object, - Register scratch, - Label* branch, - Label::Distance distance = Label::kFar) { - InNewSpace(object, scratch, not_equal, branch, distance); - } - - // Check if object is in new space. Jumps if the object is in new space. - // The register scratch can be object itself, but it will be clobbered. - void JumpIfInNewSpace(Register object, - Register scratch, - Label* branch, - Label::Distance distance = Label::kFar) { - InNewSpace(object, scratch, equal, branch, distance); - } - - // Check if an object has the black incremental marking color. Also uses rcx! - void JumpIfBlack(Register object, - Register scratch0, - Register scratch1, - Label* on_black, - Label::Distance on_black_distance = Label::kFar); - - // Detects conservatively whether an object is data-only, ie it does need to - // be scanned by the garbage collector. - void JumpIfDataObject(Register value, - Register scratch, - Label* not_data_object, - Label::Distance not_data_object_distance); - - // Checks the color of an object. If the object is already grey or black - // then we just fall through, since it is already live. If it is white and - // we can determine that it doesn't need to be scanned, then we just mark it - // black and fall through. For the rest we jump to the label so the - // incremental marker can fix its assumptions. - void EnsureNotWhite(Register object, - Register scratch1, - Register scratch2, - Label* object_is_white_and_not_data, - Label::Distance distance); - - // Notify the garbage collector that we wrote a pointer into an object. - // |object| is the object being stored into, |value| is the object being - // stored. value and scratch registers are clobbered by the operation. - // The offset is the offset from the start of the object, not the offset from - // the tagged HeapObject pointer. For use with FieldOperand(reg, off). - void RecordWriteField( - Register object, - int offset, - Register value, - Register scratch, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); - - // As above, but the offset has the tag presubtracted. For use with - // Operand(reg, off). - void RecordWriteContextSlot( - Register context, - int offset, - Register value, - Register scratch, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK) { - RecordWriteField(context, - offset + kHeapObjectTag, - value, - scratch, - save_fp, - remembered_set_action, - smi_check); - } + // --------------------------------------------------------------------------- + // GC Support + + // For page containing |object| mark region covering |addr| dirty. + // RecordWriteHelper only works if the object is not in new + // space. + void RecordWriteHelper(Register object, + Register addr, + Register scratch); + + // Check if object is in new space. The condition cc can be equal or + // not_equal. If it is equal a jump will be done if the object is on new + // space. The register scratch can be object itself, but it will be clobbered. + void InNewSpace(Register object, + Register scratch, + Condition cc, + Label* branch, + Label::Distance near_jump = Label::kFar); - // Notify the garbage collector that we wrote a pointer into a fixed array. - // |array| is the array being stored into, |value| is the - // object being stored. |index| is the array index represented as a - // Smi. All registers are clobbered by the operation RecordWriteArray - // filters out smis so it does not update the write barrier if the - // value is a smi. - void RecordWriteArray( - Register array, - Register value, - Register index, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); - - // For page containing |object| mark region covering |address| + // For page containing |object| mark region covering [object+offset] + // dirty. |object| is the object being stored into, |value| is the + // object being stored. If |offset| is zero, then the |scratch| + // register contains the array index into the elements array + // represented as an untagged 32-bit integer. All registers are + // clobbered by the operation. RecordWrite filters out smis so it + // does not update the write barrier if the value is a smi. + void RecordWrite(Register object, + int offset, + Register value, + Register scratch); + + // For page containing |object| mark region covering [address] // dirty. |object| is the object being stored into, |value| is the - // object being stored. The address and value registers are clobbered by the + // object being stored. All registers are clobbered by the // operation. RecordWrite filters out smis so it does not update // the write barrier if the value is a smi. - void RecordWrite( - Register object, - Register address, - Register value, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + void RecordWrite(Register object, + Register address, + Register value); + + // For page containing |object| mark region covering [object+offset] dirty. + // The value is known to not be a smi. + // object is the object being stored into, value is the object being stored. + // If offset is zero, then the scratch register contains the array index into + // the elements array represented as an untagged 32-bit integer. + // All registers are clobbered by the operation. + void RecordWriteNonSmi(Register object, + int offset, + Register value, + Register scratch); #ifdef ENABLE_DEBUGGER_SUPPORT // --------------------------------------------------------------------------- @@ -288,6 +192,15 @@ class MacroAssembler: public Assembler { void DebugBreak(); #endif + // --------------------------------------------------------------------------- + // Activation frames + + void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); } + void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); } + + void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } + void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } + // Enter specific kind of exit frame; either in normal or // debug mode. Expects the number of arguments in register rax and // sets up the number of arguments in register rdi and the pointer @@ -847,28 +760,6 @@ class MacroAssembler: public Assembler { Label* fail, Label::Distance distance = Label::kFar); - // Check if a map for a JSObject indicates that the object can have both smi - // and HeapObject elements. Jump to the specified label if it does not. - void CheckFastObjectElements(Register map, - Label* fail, - Label::Distance distance = Label::kFar); - - // Check if a map for a JSObject indicates that the object has fast smi only - // elements. Jump to the specified label if it does not. - void CheckFastSmiOnlyElements(Register map, - Label* fail, - Label::Distance distance = Label::kFar); - - // Check to see if maybe_number can be stored as a double in - // FastDoubleElements. If it can, store it at the index specified by key in - // the FastDoubleElements array elements, otherwise jump to fail. - // Note that key must not be smi-tagged. - void StoreNumberToDoubleElements(Register maybe_number, - Register elements, - Register key, - XMMRegister xmm_scratch, - Label* fail); - // Check if the map of an object is equal to a specified map and // branch to label if not. Skip the smi check if not required // (object is known to be a heap object) @@ -1228,13 +1119,6 @@ class MacroAssembler: public Assembler { int min_length = 0, Register scratch = kScratchRegister); - // Initialize fields with filler values. Fields starting at |start_offset| - // not including end_offset are overwritten with the value in |filler|. At - // the end the loop, |start_offset| takes the value of |end_offset|. - void InitializeFieldsWithFiller(Register start_offset, - Register end_offset, - Register filler); - // --------------------------------------------------------------------------- // StatsCounter support @@ -1267,18 +1151,11 @@ class MacroAssembler: public Assembler { bool generating_stub() { return generating_stub_; } void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } bool allow_stub_calls() { return allow_stub_calls_; } - void set_has_frame(bool value) { has_frame_ = value; } - bool has_frame() { return has_frame_; } - inline bool AllowThisStubCall(CodeStub* stub); static int SafepointRegisterStackIndex(Register reg) { return SafepointRegisterStackIndex(reg.code()); } - // Activation support. - void EnterFrame(StackFrame::Type type); - void LeaveFrame(StackFrame::Type type); - private: // Order general registers are pushed by Pushad. // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15. @@ -1288,7 +1165,6 @@ class MacroAssembler: public Assembler { bool generating_stub_; bool allow_stub_calls_; - bool has_frame_; bool root_array_available_; // Returns a register holding the smi value. The register MUST NOT be @@ -1312,6 +1188,10 @@ class MacroAssembler: public Assembler { const CallWrapper& call_wrapper = NullCallWrapper(), CallKind call_kind = CALL_AS_METHOD); + // Activation support. + void EnterFrame(StackFrame::Type type); + void LeaveFrame(StackFrame::Type type); + void EnterExitFramePrologue(bool save_rax); // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack @@ -1338,20 +1218,6 @@ class MacroAssembler: public Assembler { Register scratch, bool gc_allowed); - // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. - void InNewSpace(Register object, - Register scratch, - Condition cc, - Label* branch, - Label::Distance distance = Label::kFar); - - // Helper for finding the mark bits for an address. Afterwards, the - // bitmap register points at the word with the mark bits and the mask - // the position of the first bit. Uses rcx as scratch and leaves addr_reg - // unchanged. - inline void GetMarkBits(Register addr_reg, - Register bitmap_reg, - Register mask_reg); // Compute memory operands for safepoint stack slots. Operand SafepointRegisterSlot(Register reg); diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc index 55fabc003..a782bd705 100644 --- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc +++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc @@ -193,7 +193,7 @@ void RegExpMacroAssemblerX64::CheckCharacterGT(uc16 limit, Label* on_greater) { void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) { Label not_at_start; // Did we start the match at the start of the string at all? - __ cmpl(Operand(rbp, kStartIndex), Immediate(0)); + __ cmpb(Operand(rbp, kStartIndex), Immediate(0)); BranchOrBacktrack(not_equal, ¬_at_start); // If we did, are we still at the start of the input? __ lea(rax, Operand(rsi, rdi, times_1, 0)); @@ -205,7 +205,7 @@ void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) { void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) { // Did we start the match at the start of the string at all? - __ cmpl(Operand(rbp, kStartIndex), Immediate(0)); + __ cmpb(Operand(rbp, kStartIndex), Immediate(0)); BranchOrBacktrack(not_equal, on_not_at_start); // If we did, are we still at the start of the input? __ lea(rax, Operand(rsi, rdi, times_1, 0)); @@ -431,14 +431,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( // Isolate. __ LoadAddress(rcx, ExternalReference::isolate_address()); #endif - - { // NOLINT: Can't find a way to open this scope without confusing the - // linter. - AllowExternalCallThatCantCauseGC scope(&masm_); - ExternalReference compare = - ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate()); - __ CallCFunction(compare, num_arguments); - } + ExternalReference compare = + ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate()); + __ CallCFunction(compare, num_arguments); // Restore original values before reacting on result value. __ Move(code_object_pointer(), masm_.CodeObject()); @@ -711,12 +706,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { // registers we need. // Entry code: __ bind(&entry_label_); - - // Tell the system that we have a stack frame. Because the type is MANUAL, no - // is generated. - FrameScope scope(&masm_, StackFrame::MANUAL); - - // Actually emit code to start a new stack frame. + // Start new stack frame. __ push(rbp); __ movq(rbp, rsp); // Save parameters and callee-save registers. Order here should correspond diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc index c4b2672f6..76d255579 100644 --- a/deps/v8/src/x64/stub-cache-x64.cc +++ b/deps/v8/src/x64/stub-cache-x64.cc @@ -645,7 +645,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { scratch1, scratch2, scratch3, name, miss_label); - FrameScope scope(masm, StackFrame::INTERNAL); + __ EnterInternalFrame(); // Save the name_ register across the call. __ push(name_); @@ -662,8 +662,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { // Restore the name_ register. __ pop(name_); - - // Leave the internal frame. + __ LeaveInternalFrame(); } void LoadWithInterceptor(MacroAssembler* masm, @@ -671,21 +670,19 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register holder, JSObject* holder_obj, Label* interceptor_succeeded) { - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(holder); // Save the holder. - __ push(name_); // Save the name. - - CompileCallLoadPropertyWithInterceptor(masm, - receiver, - holder, - name_, - holder_obj); - - __ pop(name_); // Restore the name. - __ pop(receiver); // Restore the holder. - // Leave the internal frame. - } + __ EnterInternalFrame(); + __ push(holder); // Save the holder. + __ push(name_); // Save the name. + + CompileCallLoadPropertyWithInterceptor(masm, + receiver, + holder, + name_, + holder_obj); + + __ pop(name_); // Restore the name. + __ pop(receiver); // Restore the holder. + __ LeaveInternalFrame(); __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex); __ j(not_equal, interceptor_succeeded); @@ -784,8 +781,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the value being stored in the now unused name_reg. __ movq(name_reg, rax); - __ RecordWriteField( - receiver_reg, offset, name_reg, scratch, kDontSaveFPRegs); + __ RecordWrite(receiver_reg, offset, name_reg, scratch); } else { // Write to the properties array. int offset = index * kPointerSize + FixedArray::kHeaderSize; @@ -796,8 +792,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the value being stored in the now unused name_reg. __ movq(name_reg, rax); - __ RecordWriteField( - scratch, offset, name_reg, receiver_reg, kDontSaveFPRegs); + __ RecordWrite(scratch, offset, name_reg, receiver_reg); } // Return the value (register rax). @@ -1144,43 +1139,41 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // Save necessary data before invoking an interceptor. // Requires a frame to make GC aware of pushed pointers. - { - FrameScope frame_scope(masm(), StackFrame::INTERNAL); - - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - // CALLBACKS case needs a receiver to be passed into C++ callback. - __ push(receiver); - } - __ push(holder_reg); - __ push(name_reg); - - // Invoke an interceptor. Note: map checks from receiver to - // interceptor's holder has been compiled before (see a caller - // of this method.) - CompileCallLoadPropertyWithInterceptor(masm(), - receiver, - holder_reg, - name_reg, - interceptor_holder); - - // Check if interceptor provided a value for property. If it's - // the case, return immediately. - Label interceptor_failed; - __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex); - __ j(equal, &interceptor_failed); - frame_scope.GenerateLeaveFrame(); - __ ret(0); + __ EnterInternalFrame(); - __ bind(&interceptor_failed); - __ pop(name_reg); - __ pop(holder_reg); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - __ pop(receiver); - } + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + // CALLBACKS case needs a receiver to be passed into C++ callback. + __ push(receiver); + } + __ push(holder_reg); + __ push(name_reg); + + // Invoke an interceptor. Note: map checks from receiver to + // interceptor's holder has been compiled before (see a caller + // of this method.) + CompileCallLoadPropertyWithInterceptor(masm(), + receiver, + holder_reg, + name_reg, + interceptor_holder); + + // Check if interceptor provided a value for property. If it's + // the case, return immediately. + Label interceptor_failed; + __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex); + __ j(equal, &interceptor_failed); + __ LeaveInternalFrame(); + __ ret(0); - // Leave the internal frame. + __ bind(&interceptor_failed); + __ pop(name_reg); + __ pop(holder_reg); + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + __ pop(receiver); } + __ LeaveInternalFrame(); + // Check that the maps from interceptor's holder to lookup's holder // haven't changed. And load lookup's holder into |holder| register. if (interceptor_holder != lookup->holder()) { @@ -1428,7 +1421,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ j(not_equal, &call_builtin); if (argc == 1) { // Otherwise fall through to call builtin. - Label attempt_to_grow_elements, with_write_barrier; + Label exit, with_write_barrier, attempt_to_grow_elements; // Get the array's length into rax and calculate new length. __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset)); @@ -1442,40 +1435,30 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ cmpl(rax, rcx); __ j(greater, &attempt_to_grow_elements); - // Check if value is a smi. - __ movq(rcx, Operand(rsp, argc * kPointerSize)); - __ JumpIfNotSmi(rcx, &with_write_barrier); - // Save new length. __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax); // Push the element. + __ movq(rcx, Operand(rsp, argc * kPointerSize)); __ lea(rdx, FieldOperand(rbx, rax, times_pointer_size, FixedArray::kHeaderSize - argc * kPointerSize)); __ movq(Operand(rdx, 0), rcx); + // Check if value is a smi. __ Integer32ToSmi(rax, rax); // Return new length as smi. - __ ret((argc + 1) * kPointerSize); - __ bind(&with_write_barrier); + __ JumpIfNotSmi(rcx, &with_write_barrier); - __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset)); - __ CheckFastObjectElements(rdi, &call_builtin); + __ bind(&exit); + __ ret((argc + 1) * kPointerSize); - // Save new length. - __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax); + __ bind(&with_write_barrier); - // Push the element. - __ lea(rdx, FieldOperand(rbx, - rax, times_pointer_size, - FixedArray::kHeaderSize - argc * kPointerSize)); - __ movq(Operand(rdx, 0), rcx); + __ InNewSpace(rbx, rcx, equal, &exit); - __ RecordWrite( - rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ RecordWriteHelper(rbx, rdx, rcx); - __ Integer32ToSmi(rax, rax); // Return new length as smi. __ ret((argc + 1) * kPointerSize); __ bind(&attempt_to_grow_elements); @@ -1483,15 +1466,6 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ jmp(&call_builtin); } - __ movq(rdi, Operand(rsp, argc * kPointerSize)); - // Growing elements that are SMI-only requires special handling in case - // the new element is non-Smi. For now, delegate to the builtin. - Label no_fast_elements_check; - __ JumpIfSmi(rdi, &no_fast_elements_check); - __ movq(rsi, FieldOperand(rdx, HeapObject::kMapOffset)); - __ CheckFastObjectElements(rsi, &call_builtin, Label::kFar); - __ bind(&no_fast_elements_check); - ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(isolate()); ExternalReference new_space_allocation_limit = @@ -1515,22 +1489,16 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, // We fit and could grow elements. __ Store(new_space_allocation_top, rcx); + __ movq(rcx, Operand(rsp, argc * kPointerSize)); // Push the argument... - __ movq(Operand(rdx, 0), rdi); + __ movq(Operand(rdx, 0), rcx); // ... and fill the rest with holes. __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex); for (int i = 1; i < kAllocationDelta; i++) { __ movq(Operand(rdx, i * kPointerSize), kScratchRegister); } - // We know the elements array is in new space so we don't need the - // remembered set, but we just pushed a value onto it so we may have to - // tell the incremental marker to rescan the object that we just grew. We - // don't need to worry about the holes because they are in old space and - // already marked black. - __ RecordWrite(rbx, rdx, rdi, kDontSaveFPRegs, OMIT_REMEMBERED_SET); - // Restore receiver to rdx as finish sequence assumes it's here. __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); @@ -1542,6 +1510,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ Integer32ToSmi(rax, rax); __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax); + // Elements are in new space, so write barrier is not required. __ ret((argc + 1) * kPointerSize); } @@ -2494,36 +2463,19 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, Handle<Map>(object->map())); __ j(not_equal, &miss); - // Compute the cell operand to use. - __ Move(rbx, Handle<JSGlobalPropertyCell>(cell)); - Operand cell_operand = FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset); - // Check that the value in the cell is not the hole. If it is, this // cell could have been deleted and reintroducing the global needs // to update the property details in the property dictionary of the // global object. We bail out to the runtime system to do that. - __ CompareRoot(cell_operand, Heap::kTheHoleValueRootIndex); + __ Move(rbx, Handle<JSGlobalPropertyCell>(cell)); + __ CompareRoot(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), + Heap::kTheHoleValueRootIndex); __ j(equal, &miss); // Store the value in the cell. - __ movq(cell_operand, rax); - Label done; - __ JumpIfSmi(rax, &done); - - __ movq(rcx, rax); - __ lea(rdx, cell_operand); - // Cells are always in the remembered set. - __ RecordWrite(rbx, // Object. - rdx, // Address. - rcx, // Value. - kDontSaveFPRegs, - OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - + __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax); // Return the value (register rax). - __ bind(&done); - Counters* counters = isolate()->counters(); __ IncrementCounter(counters->named_store_global_inline(), 1); __ ret(0); @@ -2603,10 +2555,9 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) { } -MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic( +MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic( MapList* receiver_maps, - CodeList* handler_stubs, - MapList* transitioned_maps) { + CodeList* handler_ics) { // ----------- S t a t e ------------- // -- rax : value // -- rcx : key @@ -2614,25 +2565,18 @@ MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic( // -- rsp[0] : return address // ----------------------------------- Label miss; - __ JumpIfSmi(rdx, &miss, Label::kNear); + __ JumpIfSmi(rdx, &miss); - __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset)); + Register map_reg = rbx; + __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset)); int receiver_count = receiver_maps->length(); - for (int i = 0; i < receiver_count; ++i) { + for (int current = 0; current < receiver_count; ++current) { // Check map and tail call if there's a match - Handle<Map> map(receiver_maps->at(i)); - __ Cmp(rdi, map); - if (transitioned_maps->at(i) == NULL) { - __ j(equal, Handle<Code>(handler_stubs->at(i)), RelocInfo::CODE_TARGET); - } else { - Label next_map; - __ j(not_equal, &next_map, Label::kNear); - __ movq(rbx, - Handle<Map>(transitioned_maps->at(i)), - RelocInfo::EMBEDDED_OBJECT); - __ jmp(Handle<Code>(handler_stubs->at(i)), RelocInfo::CODE_TARGET); - __ bind(&next_map); - } + Handle<Map> map(receiver_maps->at(current)); + __ Cmp(map_reg, map); + __ j(equal, + Handle<Code>(handler_ics->at(current)), + RelocInfo::CODE_TARGET); } __ bind(&miss); @@ -3068,7 +3012,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) { } -MaybeObject* KeyedLoadStubCompiler::CompileLoadPolymorphic( +MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic( MapList* receiver_maps, CodeList* handler_ics) { // ----------- S t a t e ------------- @@ -3492,7 +3436,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ movsd(Operand(rbx, rdi, times_8, 0), xmm0); break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3560,7 +3503,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3692,17 +3634,15 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( } -void KeyedStoreStubCompiler::GenerateStoreFastElement( - MacroAssembler* masm, - bool is_js_array, - ElementsKind elements_kind) { +void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, + bool is_js_array) { // ----------- S t a t e ------------- // -- rax : value // -- rcx : key // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- - Label miss_force_generic, transition_elements_kind; + Label miss_force_generic; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -3725,22 +3665,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ j(above_equal, &miss_force_generic); } - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { - __ JumpIfNotSmi(rax, &transition_elements_kind); - __ SmiToInteger32(rcx, rcx); - __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize), - rax); - } else { - // Do the store and update the write barrier. - ASSERT(elements_kind == FAST_ELEMENTS); - __ SmiToInteger32(rcx, rcx); - __ lea(rcx, - FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize)); - __ movq(Operand(rcx, 0), rax); - // Make sure to preserve the value in register rax. - __ movq(rdx, rax); - __ RecordWrite(rdi, rcx, rdx, kDontSaveFPRegs); - } + // Do the store and update the write barrier. Make sure to preserve + // the value in register eax. + __ movq(rdx, rax); + __ SmiToInteger32(rcx, rcx); + __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize), + rax); + __ RecordWrite(rdi, 0, rdx, rcx); // Done. __ ret(0); @@ -3750,10 +3681,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( Handle<Code> ic_force_generic = masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); __ jmp(ic_force_generic, RelocInfo::CODE_TARGET); - - __ bind(&transition_elements_kind); - Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); - __ jmp(ic_miss, RelocInfo::CODE_TARGET); } @@ -3766,7 +3693,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- - Label miss_force_generic, transition_elements_kind; + Label miss_force_generic, smi_value, is_nan, maybe_nan; + Label have_double_value, not_nan; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -3787,9 +3715,50 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ j(above_equal, &miss_force_generic); // Handle smi values specially + __ JumpIfSmi(rax, &smi_value, Label::kNear); + + __ CheckMap(rax, + masm->isolate()->factory()->heap_number_map(), + &miss_force_generic, + DONT_DO_SMI_CHECK); + + // Double value, canonicalize NaN. + uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); + __ cmpl(FieldOperand(rax, offset), + Immediate(kNaNOrInfinityLowerBoundUpper32)); + __ j(greater_equal, &maybe_nan, Label::kNear); + + __ bind(¬_nan); + __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset)); + __ bind(&have_double_value); + __ SmiToInteger32(rcx, rcx); + __ movsd(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize), + xmm0); + __ ret(0); + + __ bind(&maybe_nan); + // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise + // it's an Infinity, and the non-NaN code path applies. + __ j(greater, &is_nan, Label::kNear); + __ cmpl(FieldOperand(rax, HeapNumber::kValueOffset), Immediate(0)); + __ j(zero, ¬_nan); + __ bind(&is_nan); + // Convert all NaNs to the same canonical NaN value when they are stored in + // the double array. + __ Set(kScratchRegister, BitCast<uint64_t>( + FixedDoubleArray::canonical_not_the_hole_nan_as_double())); + __ movq(xmm0, kScratchRegister); + __ jmp(&have_double_value, Label::kNear); + + __ bind(&smi_value); + // Value is a smi. convert to a double and store. + // Preserve original value. + __ SmiToInteger32(rdx, rax); + __ push(rdx); + __ fild_s(Operand(rsp, 0)); + __ pop(rdx); __ SmiToInteger32(rcx, rcx); - __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0, - &transition_elements_kind); + __ fstp_d(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize)); __ ret(0); // Handle store cache miss, replacing the ic with the generic stub. @@ -3797,12 +3766,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( Handle<Code> ic_force_generic = masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); __ jmp(ic_force_generic, RelocInfo::CODE_TARGET); - - __ bind(&transition_elements_kind); - // Restore smi-tagging of rcx. - __ Integer32ToSmi(rcx, rcx); - Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); - __ jmp(ic_miss, RelocInfo::CODE_TARGET); } |