diff options
author | Peter Varga <pvarga@inf.u-szeged.hu> | 2012-06-13 09:21:17 +0200 |
---|---|---|
committer | Qt by Nokia <qt-info@nokia.com> | 2012-06-13 09:55:50 +0200 |
commit | d47c4a1dad750d278bcf00b630af69c4b8bdb97f (patch) | |
tree | 289865bd448633ffec5e8d7e85436b6c0a8a6cad /src/3rdparty/v8/src/x64 | |
parent | 28017b45db578180d0d54e67aea209ad95f73b9f (diff) | |
download | qtjsbackend-d47c4a1dad750d278bcf00b630af69c4b8bdb97f.tar.gz |
Updated V8 from git://github.com/v8/v8.git to 3e6ec7e018bbf2c63ef04b85ff688198ea204c04
Update V8 source to version 3.11.4
* Performance and stability improvements on all platforms.
* Fixed native ARM build (issues 1744, 539)
* Fixed several bugs in heap profiles (including issue 2078).
* Throw syntax errors on illegal escape sequences.
* Made handling of const more consistent when combined with 'eval' and
'with'.
* Fixed V8 on MinGW-x64 (issue 2026).
* Put new global var semantics behind a flag until WebKit tests are
cleaned up.
* Enabled inlining some V8 API functions.
* Enabled MIPS cross-compilation.
* Implemented clearing of CompareICs (issue 2102).
* Fixed python deprecations. (issue 1391)
* Fixed GCC 4.7 (C++11) compilation. (issue 2136)
Change-Id: I72594bd22356391dd55e315c022d0c9f3fd5b451
Reviewed-by: Kent Hansen <kent.hansen@nokia.com>
Diffstat (limited to 'src/3rdparty/v8/src/x64')
-rw-r--r-- | src/3rdparty/v8/src/x64/assembler-x64.h | 3 | ||||
-rw-r--r-- | src/3rdparty/v8/src/x64/code-stubs-x64.cc | 115 | ||||
-rw-r--r-- | src/3rdparty/v8/src/x64/debug-x64.cc | 15 | ||||
-rw-r--r-- | src/3rdparty/v8/src/x64/deoptimizer-x64.cc | 16 | ||||
-rw-r--r-- | src/3rdparty/v8/src/x64/full-codegen-x64.cc | 336 | ||||
-rw-r--r-- | src/3rdparty/v8/src/x64/ic-x64.cc | 22 | ||||
-rw-r--r-- | src/3rdparty/v8/src/x64/lithium-codegen-x64.cc | 296 | ||||
-rw-r--r-- | src/3rdparty/v8/src/x64/lithium-codegen-x64.h | 11 | ||||
-rw-r--r-- | src/3rdparty/v8/src/x64/lithium-x64.cc | 78 | ||||
-rw-r--r-- | src/3rdparty/v8/src/x64/lithium-x64.h | 83 | ||||
-rw-r--r-- | src/3rdparty/v8/src/x64/macro-assembler-x64.cc | 16 | ||||
-rw-r--r-- | src/3rdparty/v8/src/x64/macro-assembler-x64.h | 7 | ||||
-rw-r--r-- | src/3rdparty/v8/src/x64/stub-cache-x64.cc | 122 |
13 files changed, 617 insertions, 503 deletions
diff --git a/src/3rdparty/v8/src/x64/assembler-x64.h b/src/3rdparty/v8/src/x64/assembler-x64.h index 60b29e6..9f5f850 100644 --- a/src/3rdparty/v8/src/x64/assembler-x64.h +++ b/src/3rdparty/v8/src/x64/assembler-x64.h @@ -629,7 +629,8 @@ class Assembler : public AssemblerBase { static const byte kJccShortPrefix = 0x70; static const byte kJncShortOpcode = kJccShortPrefix | not_carry; static const byte kJcShortOpcode = kJccShortPrefix | carry; - + static const byte kJnzShortOpcode = kJccShortPrefix | not_zero; + static const byte kJzShortOpcode = kJccShortPrefix | zero; // --------------------------------------------------------------------------- diff --git a/src/3rdparty/v8/src/x64/code-stubs-x64.cc b/src/3rdparty/v8/src/x64/code-stubs-x64.cc index 63c44e9..d179d2a 100644 --- a/src/3rdparty/v8/src/x64/code-stubs-x64.cc +++ b/src/3rdparty/v8/src/x64/code-stubs-x64.cc @@ -139,10 +139,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx); - // Copy the qmlglobal object from the previous context. - __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::QML_GLOBAL_INDEX))); - __ movq(Operand(rax, Context::SlotOffset(Context::QML_GLOBAL_INDEX)), rbx); - // Initialize the rest of the slots to undefined. __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex); for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { @@ -207,10 +203,6 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_INDEX)); __ movq(ContextOperand(rax, Context::GLOBAL_INDEX), rbx); - // Copy the qmlglobal object from the previous context. - __ movq(rbx, ContextOperand(rsi, Context::QML_GLOBAL_INDEX)); - __ movq(ContextOperand(rax, Context::QML_GLOBAL_INDEX), rbx); - // Initialize the rest of the slots to the hole value. __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex); for (int i = 0; i < slots_; i++) { @@ -3328,37 +3320,6 @@ void CompareStub::Generate(MacroAssembler* masm) { // NOTICE! This code is only reached after a smi-fast-case check, so // it is certain that at least one operand isn't a smi. - { - Label not_user_equal, user_equal; - __ JumpIfSmi(rax, ¬_user_equal); - __ JumpIfSmi(rdx, ¬_user_equal); - - __ CmpObjectType(rax, JS_OBJECT_TYPE, rbx); - __ j(not_equal, ¬_user_equal); - - __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx); - __ j(not_equal, ¬_user_equal); - - __ testb(FieldOperand(rbx, Map::kBitField2Offset), - Immediate(1 << Map::kUseUserObjectComparison)); - __ j(not_zero, &user_equal); - __ testb(FieldOperand(rcx, Map::kBitField2Offset), - Immediate(1 << Map::kUseUserObjectComparison)); - __ j(not_zero, &user_equal); - - __ jmp(¬_user_equal); - - __ bind(&user_equal); - - __ pop(rbx); // Return address. - __ push(rax); - __ push(rdx); - __ push(rbx); - __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1); - - __ bind(¬_user_equal); - } - // Two identical objects are equal unless they are both NaN or undefined. { Label not_identical; @@ -3667,8 +3628,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { void CallFunctionStub::Generate(MacroAssembler* masm) { - // rdi : the function to call // rbx : cache cell for call target + // rdi : the function to call + Isolate* isolate = masm->isolate(); Label slow, non_function; // The receiver might implicitly be the global object. This is @@ -3683,9 +3645,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ CompareRoot(rax, Heap::kTheHoleValueRootIndex); __ j(not_equal, &call, Label::kNear); // Patch the receiver on the stack with the global receiver object. - __ movq(rbx, GlobalObjectOperand()); - __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset)); - __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rbx); + __ movq(rcx, GlobalObjectOperand()); + __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset)); + __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rcx); __ bind(&call); } @@ -3695,6 +3657,10 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); __ j(not_equal, &slow); + if (RecordCallTarget()) { + GenerateRecordCallTarget(masm); + } + // Fast-case: Just invoke the function. ParameterCount actual(argc_); @@ -3717,6 +3683,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // Slow-case: Non-function called. __ bind(&slow); + if (RecordCallTarget()) { + // If there is a call target cache, mark it megamorphic in the + // non-function case. MegamorphicSentinel is an immortal immovable + // object (undefined) so no write barrier is needed. + __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), + TypeFeedbackCells::MegamorphicSentinel(isolate)); + } // Check for function proxy. __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE); __ j(not_equal, &non_function); @@ -5151,56 +5124,24 @@ void SubStringStub::Generate(MacroAssembler* masm) { // rax: string // rbx: instance type // Calculate length of sub string using the smi values. - Label result_longer_than_two; __ movq(rcx, Operand(rsp, kToOffset)); __ movq(rdx, Operand(rsp, kFromOffset)); __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime); __ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen. - __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx); + __ cmpq(rcx, FieldOperand(rax, String::kLengthOffset)); Label not_original_string; - __ j(not_equal, ¬_original_string, Label::kNear); + // Shorter than original string's length: an actual substring. + __ j(below, ¬_original_string, Label::kNear); + // Longer than original string's length or negative: unsafe arguments. + __ j(above, &runtime); + // Return original string. Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->sub_string_native(), 1); __ ret(kArgumentsSize); __ bind(¬_original_string); - // Special handling of sub-strings of length 1 and 2. One character strings - // are handled in the runtime system (looked up in the single character - // cache). Two character strings are looked for in the symbol cache. __ SmiToInteger32(rcx, rcx); - __ cmpl(rcx, Immediate(2)); - __ j(greater, &result_longer_than_two); - __ j(less, &runtime); - - // Sub string of length 2 requested. - // rax: string - // rbx: instance type - // rcx: sub string length (value is 2) - // rdx: from index (smi) - __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &runtime); - - // Get the two characters forming the sub string. - __ SmiToInteger32(rdx, rdx); // From index is no longer smi. - __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize)); - __ movzxbq(rdi, - FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1)); - - // Try to lookup two character string in symbol table. - Label make_two_character_string; - StringHelper::GenerateTwoCharacterSymbolTableProbe( - masm, rbx, rdi, r9, r11, r14, r15, &make_two_character_string); - __ IncrementCounter(counters->sub_string_native(), 1); - __ ret(3 * kPointerSize); - - __ bind(&make_two_character_string); - // Set up registers for allocating the two character string. - __ movzxwq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize)); - __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime); - __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx); - __ IncrementCounter(counters->sub_string_native(), 1); - __ ret(3 * kPointerSize); - __ bind(&result_longer_than_two); // rax: string // rbx: instance type // rcx: sub string length @@ -5756,14 +5697,8 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) { __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx); __ j(not_equal, &miss, Label::kNear); - __ testb(FieldOperand(rcx, Map::kBitField2Offset), - Immediate(1 << Map::kUseUserObjectComparison)); - __ j(not_zero, &miss, Label::kNear); __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx); __ j(not_equal, &miss, Label::kNear); - __ testb(FieldOperand(rcx, Map::kBitField2Offset), - Immediate(1 << Map::kUseUserObjectComparison)); - __ j(not_zero, &miss, Label::kNear); ASSERT(GetCondition() == equal); __ subq(rax, rdx); @@ -5783,14 +5718,8 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); __ Cmp(rcx, known_map_); __ j(not_equal, &miss, Label::kNear); - __ testb(FieldOperand(rcx, Map::kBitField2Offset), - Immediate(1 << Map::kUseUserObjectComparison)); - __ j(not_zero, &miss, Label::kNear); __ Cmp(rbx, known_map_); __ j(not_equal, &miss, Label::kNear); - __ testb(FieldOperand(rbx, Map::kBitField2Offset), - Immediate(1 << Map::kUseUserObjectComparison)); - __ j(not_zero, &miss, Label::kNear); __ subq(rax, rdx); __ ret(0); diff --git a/src/3rdparty/v8/src/x64/debug-x64.cc b/src/3rdparty/v8/src/x64/debug-x64.cc index eec83d9..1b29e58 100644 --- a/src/3rdparty/v8/src/x64/debug-x64.cc +++ b/src/3rdparty/v8/src/x64/debug-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -91,6 +91,8 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() { rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength); } +const bool Debug::FramePaddingLayout::kIsSupported = true; + #define __ ACCESS_MASM(masm) @@ -103,6 +105,12 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, { FrameScope scope(masm, StackFrame::INTERNAL); + // Load padding words on stack. + for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) { + __ Push(Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue)); + } + __ Push(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize)); + // Store the registers containing live values on the expression stack to // make sure that these are correctly updated during GC. Non object values // are stored as as two smis causing it to be untouched by GC. @@ -157,6 +165,11 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, } } + // Read current padding counter and skip corresponding number of words. + __ pop(kScratchRegister); + __ SmiToInteger32(kScratchRegister, kScratchRegister); + __ lea(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0)); + // Get rid of the internal frame. } diff --git a/src/3rdparty/v8/src/x64/deoptimizer-x64.cc b/src/3rdparty/v8/src/x64/deoptimizer-x64.cc index 40b9a1c..f3046b9 100644 --- a/src/3rdparty/v8/src/x64/deoptimizer-x64.cc +++ b/src/3rdparty/v8/src/x64/deoptimizer-x64.cc @@ -458,6 +458,8 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator, void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator, int frame_index) { + Builtins* builtins = isolate_->builtins(); + Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric); JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); unsigned height = iterator->Next(); unsigned height_in_bytes = height * kPointerSize; @@ -465,7 +467,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator, PrintF(" translating construct stub => height=%d\n", height_in_bytes); } - unsigned fixed_frame_size = 6 * kPointerSize; + unsigned fixed_frame_size = 7 * kPointerSize; unsigned output_frame_size = height_in_bytes + fixed_frame_size; // Allocate and store the output frame description. @@ -534,6 +536,16 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator, top_address + output_offset, output_offset, value); } + // The output frame reflects a JSConstructStubGeneric frame. + output_offset -= kPointerSize; + value = reinterpret_cast<intptr_t>(construct_stub); + output_frame->SetFrameSlot(output_offset, value); + if (FLAG_trace_deopt) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; code object\n", + top_address + output_offset, output_offset, value); + } + // Number of incoming arguments. output_offset -= kPointerSize; value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1)); @@ -557,8 +569,6 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator, ASSERT(0 == output_offset); - Builtins* builtins = isolate_->builtins(); - Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric); intptr_t pc = reinterpret_cast<intptr_t>( construct_stub->instruction_start() + isolate_->heap()->construct_stub_deopt_pc_offset()->value()); diff --git a/src/3rdparty/v8/src/x64/full-codegen-x64.cc b/src/3rdparty/v8/src/x64/full-codegen-x64.cc index a0218f5..974269e 100644 --- a/src/3rdparty/v8/src/x64/full-codegen-x64.cc +++ b/src/3rdparty/v8/src/x64/full-codegen-x64.cc @@ -171,13 +171,12 @@ void FullCodeGenerator::Generate() { // Possibly allocate a local context. int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; - if (heap_slots > 0 || - (scope()->is_qml_mode() && scope()->is_global_scope())) { + if (heap_slots > 0) { Comment cmnt(masm_, "[ Allocate local context"); // Argument to NewContext is the function, which is still in rdi. __ push(rdi); if (heap_slots <= FastNewContextStub::kMaximumSlots) { - FastNewContextStub stub((heap_slots < 0)?0:heap_slots); + FastNewContextStub stub(heap_slots); __ CallStub(&stub); } else { __ CallRuntime(Runtime::kNewFunctionContext, 1); @@ -258,11 +257,11 @@ void FullCodeGenerator::Generate() { // For named function expressions, declare the function name as a // constant. if (scope()->is_function_scope() && scope()->function() != NULL) { - VariableProxy* proxy = scope()->function(); - ASSERT(proxy->var()->mode() == CONST || - proxy->var()->mode() == CONST_HARMONY); - ASSERT(proxy->var()->location() != Variable::UNALLOCATED); - EmitDeclaration(proxy, proxy->var()->mode(), NULL); + VariableDeclaration* function = scope()->function(); + ASSERT(function->proxy()->var()->mode() == CONST || + function->proxy()->var()->mode() == CONST_HARMONY); + ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED); + VisitVariableDeclaration(function); } VisitDeclarations(scope()->declarations()); } @@ -754,61 +753,51 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr, } -void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, - VariableMode mode, - FunctionLiteral* function) { +void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) { + // The variable in the declaration always resides in the current function + // context. + ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); + if (FLAG_debug_code) { + // Check that we're not inside a with or catch context. + __ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset)); + __ CompareRoot(rbx, Heap::kWithContextMapRootIndex); + __ Check(not_equal, "Declaration in with context."); + __ CompareRoot(rbx, Heap::kCatchContextMapRootIndex); + __ Check(not_equal, "Declaration in catch context."); + } +} + + +void FullCodeGenerator::VisitVariableDeclaration( + VariableDeclaration* declaration) { // If it was not possible to allocate the variable at compile time, we // need to "declare" it at runtime to make sure it actually exists in the // local context. + VariableProxy* proxy = declaration->proxy(); + VariableMode mode = declaration->mode(); Variable* variable = proxy->var(); - bool binding_needs_init = (function == NULL) && - (mode == CONST || mode == CONST_HARMONY || mode == LET); + bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET; switch (variable->location()) { case Variable::UNALLOCATED: - ++global_count_; + globals_->Add(variable->name()); + globals_->Add(variable->binding_needs_init() + ? isolate()->factory()->the_hole_value() + : isolate()->factory()->undefined_value()); break; case Variable::PARAMETER: case Variable::LOCAL: - if (function != NULL) { - Comment cmnt(masm_, "[ Declaration"); - VisitForAccumulatorValue(function); - __ movq(StackOperand(variable), result_register()); - } else if (binding_needs_init) { - Comment cmnt(masm_, "[ Declaration"); + if (hole_init) { + Comment cmnt(masm_, "[ VariableDeclaration"); __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex); __ movq(StackOperand(variable), kScratchRegister); } break; case Variable::CONTEXT: - // The variable in the decl always resides in the current function - // context. - ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); - if (FLAG_debug_code) { - // Check that we're not inside a with or catch context. - __ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset)); - __ CompareRoot(rbx, Heap::kWithContextMapRootIndex); - __ Check(not_equal, "Declaration in with context."); - __ CompareRoot(rbx, Heap::kCatchContextMapRootIndex); - __ Check(not_equal, "Declaration in catch context."); - } - if (function != NULL) { - Comment cmnt(masm_, "[ Declaration"); - VisitForAccumulatorValue(function); - __ movq(ContextOperand(rsi, variable->index()), result_register()); - int offset = Context::SlotOffset(variable->index()); - // We know that we have written a function, which is not a smi. - __ RecordWriteContextSlot(rsi, - offset, - result_register(), - rcx, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - PrepareForBailoutForId(proxy->id(), NO_REGISTERS); - } else if (binding_needs_init) { - Comment cmnt(masm_, "[ Declaration"); + if (hole_init) { + Comment cmnt(masm_, "[ VariableDeclaration"); + EmitDebugCheckDeclarationContext(variable); __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex); __ movq(ContextOperand(rsi, variable->index()), kScratchRegister); // No write barrier since the hole value is in old space. @@ -817,14 +806,12 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, break; case Variable::LOOKUP: { - Comment cmnt(masm_, "[ Declaration"); + Comment cmnt(masm_, "[ VariableDeclaration"); __ push(rsi); __ Push(variable->name()); // Declaration nodes are always introduced in one of four modes. - ASSERT(mode == VAR || - mode == CONST || - mode == CONST_HARMONY || - mode == LET); + ASSERT(mode == VAR || mode == LET || + mode == CONST || mode == CONST_HARMONY); PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY) ? READ_ONLY : NONE; __ Push(Smi::FromInt(attr)); @@ -832,9 +819,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, // Note: For variables we must not push an initial value (such as // 'undefined') because we may have a (legal) redeclaration and we // must not destroy the current value. - if (function != NULL) { - VisitForStackValue(function); - } else if (binding_needs_init) { + if (hole_init) { __ PushRoot(Heap::kTheHoleValueRootIndex); } else { __ Push(Smi::FromInt(0)); // Indicates no initial value. @@ -846,6 +831,119 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, } +void FullCodeGenerator::VisitFunctionDeclaration( + FunctionDeclaration* declaration) { + VariableProxy* proxy = declaration->proxy(); + Variable* variable = proxy->var(); + switch (variable->location()) { + case Variable::UNALLOCATED: { + globals_->Add(variable->name()); + Handle<SharedFunctionInfo> function = + Compiler::BuildFunctionInfo(declaration->fun(), script()); + // Check for stack-overflow exception. + if (function.is_null()) return SetStackOverflow(); + globals_->Add(function); + break; + } + + case Variable::PARAMETER: + case Variable::LOCAL: { + Comment cmnt(masm_, "[ FunctionDeclaration"); + VisitForAccumulatorValue(declaration->fun()); + __ movq(StackOperand(variable), result_register()); + break; + } + + case Variable::CONTEXT: { + Comment cmnt(masm_, "[ FunctionDeclaration"); + EmitDebugCheckDeclarationContext(variable); + VisitForAccumulatorValue(declaration->fun()); + __ movq(ContextOperand(rsi, variable->index()), result_register()); + int offset = Context::SlotOffset(variable->index()); + // We know that we have written a function, which is not a smi. + __ RecordWriteContextSlot(rsi, + offset, + result_register(), + rcx, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + PrepareForBailoutForId(proxy->id(), NO_REGISTERS); + break; + } + + case Variable::LOOKUP: { + Comment cmnt(masm_, "[ FunctionDeclaration"); + __ push(rsi); + __ Push(variable->name()); + __ Push(Smi::FromInt(NONE)); + VisitForStackValue(declaration->fun()); + __ CallRuntime(Runtime::kDeclareContextSlot, 4); + break; + } + } +} + + +void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { + VariableProxy* proxy = declaration->proxy(); + Variable* variable = proxy->var(); + Handle<JSModule> instance = declaration->module()->interface()->Instance(); + ASSERT(!instance.is_null()); + + switch (variable->location()) { + case Variable::UNALLOCATED: { + Comment cmnt(masm_, "[ ModuleDeclaration"); + globals_->Add(variable->name()); + globals_->Add(instance); + Visit(declaration->module()); + break; + } + + case Variable::CONTEXT: { + Comment cmnt(masm_, "[ ModuleDeclaration"); + EmitDebugCheckDeclarationContext(variable); + __ Move(ContextOperand(rsi, variable->index()), instance); + Visit(declaration->module()); + break; + } + + case Variable::PARAMETER: + case Variable::LOCAL: + case Variable::LOOKUP: + UNREACHABLE(); + } +} + + +void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) { + VariableProxy* proxy = declaration->proxy(); + Variable* variable = proxy->var(); + switch (variable->location()) { + case Variable::UNALLOCATED: + // TODO(rossberg) + break; + + case Variable::CONTEXT: { + Comment cmnt(masm_, "[ ImportDeclaration"); + EmitDebugCheckDeclarationContext(variable); + // TODO(rossberg) + break; + } + + case Variable::PARAMETER: + case Variable::LOCAL: + case Variable::LOOKUP: + UNREACHABLE(); + } +} + + +void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) { + // TODO(rossberg) +} + + void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { // Call the runtime to declare the globals. __ push(rsi); // The context is the first argument. @@ -1200,7 +1298,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var, // All extension objects were empty and it is safe to use a global // load IC call. - __ movq(rax, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand()); + __ movq(rax, GlobalObjectOperand()); __ Move(rcx, var->name()); Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) @@ -1285,7 +1383,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { // Use inline caching. Variable name is passed in rcx and the global // object on the stack. __ Move(rcx, var->name()); - __ movq(rax, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand()); + __ movq(rax, GlobalObjectOperand()); Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT); context()->Plug(rax); @@ -1912,7 +2010,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, if (var->IsUnallocated()) { // Global var, const, or let. __ Move(rcx, var->name()); - __ movq(rdx, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand()); + __ movq(rdx, GlobalObjectOperand()); Handle<Code> ic = is_classic_mode() ? isolate()->builtins()->StoreIC_Initialize() : isolate()->builtins()->StoreIC_Initialize_Strict(); @@ -2175,6 +2273,18 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) { } // Record source position for debugger. SetSourcePosition(expr->position()); + + // Record call targets in unoptimized code, but not in the snapshot. + if (!Serializer::enabled()) { + flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET); + Handle<Object> uninitialized = + TypeFeedbackCells::UninitializedSentinel(isolate()); + Handle<JSGlobalPropertyCell> cell = + isolate()->factory()->NewJSGlobalPropertyCell(uninitialized); + RecordTypeFeedbackCell(expr->id(), cell); + __ Move(rbx, cell); + } + CallFunctionStub stub(arg_count, flags); __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize)); __ CallStub(&stub); @@ -2203,11 +2313,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) { // Push the start position of the scope the calls resides in. __ Push(Smi::FromInt(scope()->start_position())); - // Push the qml mode flag - __ Push(Smi::FromInt(is_qml_mode())); - // Do the runtime call. - __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6); + __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5); } @@ -2260,7 +2367,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { } else if (proxy != NULL && proxy->var()->IsUnallocated()) { // Call to a global variable. Push global object as receiver for the // call IC lookup. - __ push(proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand()); + __ push(GlobalObjectOperand()); EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT); } else if (proxy != NULL && proxy->var()->IsLookupSlot()) { // Call to a lookup slot (dynamically introduced variable). @@ -3253,102 +3360,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { } -void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) { - ZoneList<Expression*>* args = expr->arguments(); - ASSERT(args->length() == 3); - VisitForStackValue(args->at(0)); - VisitForStackValue(args->at(1)); - VisitForStackValue(args->at(2)); - Label done; - Label slow_case; - Register object = rax; - Register index_1 = rbx; - Register index_2 = rcx; - Register elements = rdi; - Register temp = rdx; - __ movq(object, Operand(rsp, 2 * kPointerSize)); - // Fetch the map and check if array is in fast case. - // Check that object doesn't require security checks and - // has no indexed interceptor. - __ CmpObjectType(object, JS_ARRAY_TYPE, temp); - __ j(not_equal, &slow_case); - __ testb(FieldOperand(temp, Map::kBitFieldOffset), - Immediate(KeyedLoadIC::kSlowCaseBitFieldMask)); - __ j(not_zero, &slow_case); - - // Check the object's elements are in fast case and writable. - __ movq(elements, FieldOperand(object, JSObject::kElementsOffset)); - __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset), - Heap::kFixedArrayMapRootIndex); - __ j(not_equal, &slow_case); - - // Check that both indices are smis. - __ movq(index_1, Operand(rsp, 1 * kPointerSize)); - __ movq(index_2, Operand(rsp, 0 * kPointerSize)); - __ JumpIfNotBothSmi(index_1, index_2, &slow_case); - - // Check that both indices are valid. - // The JSArray length field is a smi since the array is in fast case mode. - __ movq(temp, FieldOperand(object, JSArray::kLengthOffset)); - __ SmiCompare(temp, index_1); - __ j(below_equal, &slow_case); - __ SmiCompare(temp, index_2); - __ j(below_equal, &slow_case); - - __ SmiToInteger32(index_1, index_1); - __ SmiToInteger32(index_2, index_2); - // Bring addresses into index1 and index2. - __ lea(index_1, FieldOperand(elements, index_1, times_pointer_size, - FixedArray::kHeaderSize)); - __ lea(index_2, FieldOperand(elements, index_2, times_pointer_size, - FixedArray::kHeaderSize)); - - // Swap elements. Use object and temp as scratch registers. - __ movq(object, Operand(index_1, 0)); - __ movq(temp, Operand(index_2, 0)); - __ movq(Operand(index_2, 0), object); - __ movq(Operand(index_1, 0), temp); - - Label no_remembered_set; - __ CheckPageFlag(elements, - temp, - 1 << MemoryChunk::SCAN_ON_SCAVENGE, - not_zero, - &no_remembered_set, - Label::kNear); - // Possible optimization: do a check that both values are Smis - // (or them and test against Smi mask.) - - // We are swapping two objects in an array and the incremental marker never - // pauses in the middle of scanning a single object. Therefore the - // incremental marker is not disturbed, so we don't need to call the - // RecordWrite stub that notifies the incremental marker. - __ RememberedSetHelper(elements, - index_1, - temp, - kDontSaveFPRegs, - MacroAssembler::kFallThroughAtEnd); - __ RememberedSetHelper(elements, - index_2, - temp, - kDontSaveFPRegs, - MacroAssembler::kFallThroughAtEnd); - - __ bind(&no_remembered_set); - - // We are done. Drop elements from the stack, and return undefined. - __ addq(rsp, Immediate(3 * kPointerSize)); - __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); - __ jmp(&done); - - __ bind(&slow_case); - __ CallRuntime(Runtime::kSwapElements, 3); - - __ bind(&done); - context()->Plug(rax); -} - - void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); ASSERT_EQ(2, args->length()); @@ -3830,7 +3841,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { // but "delete this" is allowed. ASSERT(language_mode() == CLASSIC_MODE || var->is_this()); if (var->IsUnallocated()) { - __ push(var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand()); + __ push(GlobalObjectOperand()); __ Push(var->name()); __ Push(Smi::FromInt(kNonStrictMode)); __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION); @@ -4152,7 +4163,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { if (proxy != NULL && proxy->var()->IsUnallocated()) { Comment cmnt(masm_, "Global variable"); __ Move(rcx, proxy->name()); - __ movq(rax, proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand()); + __ movq(rax, GlobalObjectOperand()); Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); // Use a regular load, not a contextual load, to avoid a reference // error. @@ -4416,7 +4427,8 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) { void FullCodeGenerator::PushFunctionArgumentForContextAllocation() { Scope* declaration_scope = scope()->DeclarationScope(); - if (declaration_scope->is_global_scope()) { + if (declaration_scope->is_global_scope() || + declaration_scope->is_module_scope()) { // Contexts nested in the global context have a canonical empty function // as their closure, not the anonymous closure containing the global // code. Pass a smi sentinel and let the runtime look up the empty diff --git a/src/3rdparty/v8/src/x64/ic-x64.cc b/src/3rdparty/v8/src/x64/ic-x64.cc index 0632ce4..6ba5fb6 100644 --- a/src/3rdparty/v8/src/x64/ic-x64.cc +++ b/src/3rdparty/v8/src/x64/ic-x64.cc @@ -1741,11 +1741,11 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { // Activate inlined smi code. if (previous_state == UNINITIALIZED) { - PatchInlinedSmiCode(address()); + PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); } } -void PatchInlinedSmiCode(Address address) { +void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // The address of the instruction following the call. Address test_instruction_address = address + Assembler::kCallTargetAddressOffset; @@ -1766,14 +1766,18 @@ void PatchInlinedSmiCode(Address address) { address, test_instruction_address, delta); } - // Patch with a short conditional jump. There must be a - // short jump-if-carry/not-carry at this position. + // Patch with a short conditional jump. Enabling means switching from a short + // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the + // reverse operation of that. Address jmp_address = test_instruction_address - delta; - ASSERT(*jmp_address == Assembler::kJncShortOpcode || - *jmp_address == Assembler::kJcShortOpcode); - Condition cc = *jmp_address == Assembler::kJncShortOpcode - ? not_zero - : zero; + ASSERT((check == ENABLE_INLINED_SMI_CHECK) + ? (*jmp_address == Assembler::kJncShortOpcode || + *jmp_address == Assembler::kJcShortOpcode) + : (*jmp_address == Assembler::kJnzShortOpcode || + *jmp_address == Assembler::kJzShortOpcode)); + Condition cc = (check == ENABLE_INLINED_SMI_CHECK) + ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero) + : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry); *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc); } diff --git a/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc b/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc index dc15213..d1cf338 100644 --- a/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc +++ b/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc @@ -184,13 +184,12 @@ bool LCodeGen::GeneratePrologue() { // Possibly allocate a local context. int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; - if (heap_slots > 0 || - (scope()->is_qml_mode() && scope()->is_global_scope())) { + if (heap_slots > 0) { Comment(";;; Allocate local context"); // Argument to NewContext is the function, which is still in rdi. __ push(rdi); if (heap_slots <= FastNewContextStub::kMaximumSlots) { - FastNewContextStub stub((heap_slots < 0)?0:heap_slots); + FastNewContextStub stub(heap_slots); __ CallStub(&stub); } else { __ CallRuntime(Runtime::kNewFunctionContext, 1); @@ -2017,8 +2016,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, RECORD_SAFEPOINT_WITH_REGISTERS, 2); ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check)); - ASSERT(instr->HasDeoptimizationEnvironment()); - LEnvironment* env = instr->deoptimization_environment(); + LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); // Move result to a register that survives the end of the // PushSafepointRegisterScope. @@ -2225,41 +2223,35 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { Register result = ToRegister(instr->result()); int map_count = instr->hydrogen()->types()->length(); - Handle<String> name = instr->hydrogen()->name(); + bool need_generic = instr->hydrogen()->need_generic(); - if (map_count == 0) { - ASSERT(instr->hydrogen()->need_generic()); - __ Move(rcx, instr->hydrogen()->name()); - Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - } else { - Label done; - for (int i = 0; i < map_count - 1; ++i) { - Handle<Map> map = instr->hydrogen()->types()->at(i); + if (map_count == 0 && !need_generic) { + DeoptimizeIf(no_condition, instr->environment()); + return; + } + Handle<String> name = instr->hydrogen()->name(); + Label done; + for (int i = 0; i < map_count; ++i) { + bool last = (i == map_count - 1); + Handle<Map> map = instr->hydrogen()->types()->at(i); + __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map); + if (last && !need_generic) { + DeoptimizeIf(not_equal, instr->environment()); + EmitLoadFieldOrConstantFunction(result, object, map, name); + } else { Label next; - __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map); __ j(not_equal, &next, Label::kNear); EmitLoadFieldOrConstantFunction(result, object, map, name); __ jmp(&done, Label::kNear); __ bind(&next); } - Handle<Map> map = instr->hydrogen()->types()->last(); - __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map); - if (instr->hydrogen()->need_generic()) { - Label generic; - __ j(not_equal, &generic, Label::kNear); - EmitLoadFieldOrConstantFunction(result, object, map, name); - __ jmp(&done, Label::kNear); - __ bind(&generic); - __ Move(rcx, instr->hydrogen()->name()); - Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - } else { - DeoptimizeIf(not_equal, instr->environment()); - EmitLoadFieldOrConstantFunction(result, object, map, name); - } - __ bind(&done); } + if (need_generic) { + __ Move(rcx, name); + Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); + CallCode(ic, RelocInfo::CODE_TARGET, instr); + } + __ bind(&done); } @@ -2377,11 +2369,20 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { Register result = ToRegister(instr->result()); + if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits. + Register key_reg = ToRegister(instr->key()); + __ movsxlq(key_reg, key_reg); + } + // Load the result. __ movq(result, - BuildFastArrayOperand(instr->elements(), instr->key(), + BuildFastArrayOperand(instr->elements(), + instr->key(), FAST_ELEMENTS, - FixedArray::kHeaderSize - kHeapObjectTag)); + FixedArray::kHeaderSize - kHeapObjectTag, + instr->additional_index())); // Check for the hole value. if (instr->hydrogen()->RequiresHoleCheck()) { @@ -2395,19 +2396,30 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( LLoadKeyedFastDoubleElement* instr) { XMMRegister result(ToDoubleRegister(instr->result())); + if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + Register key_reg = ToRegister(instr->key()); + __ movsxlq(key_reg, key_reg); + } + int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + sizeof(kHoleNanLower32); Operand hole_check_operand = BuildFastArrayOperand( instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS, - offset); + offset, + instr->additional_index()); __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); DeoptimizeIf(equal, instr->environment()); Operand double_load_operand = BuildFastArrayOperand( - instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS, - FixedDoubleArray::kHeaderSize - kHeapObjectTag); + instr->elements(), + instr->key(), + FAST_DOUBLE_ELEMENTS, + FixedDoubleArray::kHeaderSize - kHeapObjectTag, + instr->additional_index()); __ movsd(result, double_load_operand); } @@ -2416,7 +2428,8 @@ Operand LCodeGen::BuildFastArrayOperand( LOperand* elements_pointer, LOperand* key, ElementsKind elements_kind, - uint32_t offset) { + uint32_t offset, + uint32_t additional_index) { Register elements_pointer_reg = ToRegister(elements_pointer); int shift_size = ElementsKindToShiftSize(elements_kind); if (key->IsConstantOperand()) { @@ -2425,11 +2438,14 @@ Operand LCodeGen::BuildFastArrayOperand( Abort("array index constant value too big"); } return Operand(elements_pointer_reg, - constant_value * (1 << shift_size) + offset); + ((constant_value + additional_index) << shift_size) + + offset); } else { ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size); - return Operand(elements_pointer_reg, ToRegister(key), - scale_factor, offset); + return Operand(elements_pointer_reg, + ToRegister(key), + scale_factor, + offset + (additional_index << shift_size)); } } @@ -2438,7 +2454,17 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( LLoadKeyedSpecializedArrayElement* instr) { ElementsKind elements_kind = instr->elements_kind(); Operand operand(BuildFastArrayOperand(instr->external_pointer(), - instr->key(), elements_kind, 0)); + instr->key(), + elements_kind, + 0, + instr->additional_index())); + if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + Register key_reg = ToRegister(instr->key()); + __ movsxlq(key_reg, key_reg); + } + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { XMMRegister result(ToDoubleRegister(instr->result())); __ movss(result, operand); @@ -2498,24 +2524,28 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { Register result = ToRegister(instr->result()); - // Check for arguments adapter frame. - Label done, adapted; - __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - __ Cmp(Operand(result, StandardFrameConstants::kContextOffset), - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - __ j(equal, &adapted, Label::kNear); - - // No arguments adaptor frame. - __ movq(result, rbp); - __ jmp(&done, Label::kNear); + if (instr->hydrogen()->from_inlined()) { + __ lea(result, Operand(rsp, -2 * kPointerSize)); + } else { + // Check for arguments adapter frame. + Label done, adapted; + __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + __ Cmp(Operand(result, StandardFrameConstants::kContextOffset), + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + __ j(equal, &adapted, Label::kNear); + + // No arguments adaptor frame. + __ movq(result, rbp); + __ jmp(&done, Label::kNear); - // Arguments adaptor frame present. - __ bind(&adapted); - __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + // Arguments adaptor frame present. + __ bind(&adapted); + __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); - // Result is the frame pointer for the frame if not adapted and for the real - // frame below the adaptor frame if adapted. - __ bind(&done); + // Result is the frame pointer for the frame if not adapted and for the real + // frame below the adaptor frame if adapted. + __ bind(&done); + } } @@ -2623,7 +2653,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { // Invoke the function. __ bind(&invoke); - ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); + ASSERT(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); SafepointGenerator safepoint_generator( @@ -2641,6 +2671,11 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) { } +void LCodeGen::DoDrop(LDrop* instr) { + __ Drop(instr->count()); +} + + void LCodeGen::DoThisFunction(LThisFunction* instr) { Register result = ToRegister(instr->result()); __ LoadHeapObject(result, instr->hydrogen()->closure()); @@ -2671,7 +2706,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { void LCodeGen::DoGlobalObject(LGlobalObject* instr) { Register result = ToRegister(instr->result()); - __ movq(result, instr->qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand()); + __ movq(result, GlobalObjectOperand()); } @@ -2685,7 +2720,8 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { void LCodeGen::CallKnownFunction(Handle<JSFunction> function, int arity, LInstruction* instr, - CallKind call_kind) { + CallKind call_kind, + RDIState rdi_state) { bool can_invoke_directly = !function->NeedsArgumentsAdaption() || function->shared()->formal_parameter_count() == arity; @@ -2693,7 +2729,9 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, RecordPosition(pointers->position()); if (can_invoke_directly) { - __ LoadHeapObject(rdi, function); + if (rdi_state == RDI_UNINITIALIZED) { + __ LoadHeapObject(rdi, function); + } // Change context if needed. bool change_context = @@ -2738,7 +2776,8 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { CallKnownFunction(instr->function(), instr->arity(), instr, - CALL_AS_METHOD); + CALL_AS_METHOD, + RDI_UNINITIALIZED); } @@ -3175,13 +3214,21 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { ASSERT(ToRegister(instr->function()).is(rdi)); ASSERT(instr->HasPointerMap()); - ASSERT(instr->HasDeoptimizationEnvironment()); - LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount count(instr->arity()); - __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD); - __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); + + if (instr->known_function().is_null()) { + LPointerMap* pointers = instr->pointer_map(); + RecordPosition(pointers->position()); + SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); + ParameterCount count(instr->arity()); + __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD); + __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); + } else { + CallKnownFunction(instr->known_function(), + instr->arity(), + instr, + CALL_AS_METHOD, + RDI_CONTAINS_TARGET); + } } @@ -3235,7 +3282,11 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) { void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { ASSERT(ToRegister(instr->result()).is(rax)); - CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION); + CallKnownFunction(instr->target(), + instr->arity(), + instr, + CALL_AS_FUNCTION, + RDI_UNINITIALIZED); } @@ -3315,7 +3366,18 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( LStoreKeyedSpecializedArrayElement* instr) { ElementsKind elements_kind = instr->elements_kind(); Operand operand(BuildFastArrayOperand(instr->external_pointer(), - instr->key(), elements_kind, 0)); + instr->key(), + elements_kind, + 0, + instr->additional_index())); + + if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + Register key_reg = ToRegister(instr->key()); + __ movsxlq(key_reg, key_reg); + } + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { XMMRegister value(ToDoubleRegister(instr->value())); __ cvtsd2ss(value, value); @@ -3385,30 +3447,29 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { Register elements = ToRegister(instr->object()); Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; - // Do the store. - if (instr->key()->IsConstantOperand()) { - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - int offset = - ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize; - __ movq(FieldOperand(elements, offset), value); - } else { - __ movq(FieldOperand(elements, - key, - times_pointer_size, - FixedArray::kHeaderSize), - value); + Operand operand = + BuildFastArrayOperand(instr->object(), + instr->key(), + FAST_ELEMENTS, + FixedArray::kHeaderSize - kHeapObjectTag, + instr->additional_index()); + + if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + Register key_reg = ToRegister(instr->key()); + __ movsxlq(key_reg, key_reg); } + __ movq(operand, value); + if (instr->hydrogen()->NeedsWriteBarrier()) { + ASSERT(!instr->key()->IsConstantOperand()); HType type = instr->hydrogen()->value()->type(); SmiCheck check_needed = type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; // Compute address of modified element and store it into key register. - __ lea(key, FieldOperand(elements, - key, - times_pointer_size, - FixedArray::kHeaderSize)); + __ lea(key, operand); __ RecordWrite(elements, key, value, @@ -3422,19 +3483,34 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { void LCodeGen::DoStoreKeyedFastDoubleElement( LStoreKeyedFastDoubleElement* instr) { XMMRegister value = ToDoubleRegister(instr->value()); - Label have_value; - __ ucomisd(value, value); - __ j(parity_odd, &have_value); // NaN. + if (instr->NeedsCanonicalization()) { + Label have_value; + + __ ucomisd(value, value); + __ j(parity_odd, &have_value); // NaN. - __ Set(kScratchRegister, BitCast<uint64_t>( - FixedDoubleArray::canonical_not_the_hole_nan_as_double())); - __ movq(value, kScratchRegister); + __ Set(kScratchRegister, BitCast<uint64_t>( + FixedDoubleArray::canonical_not_the_hole_nan_as_double())); + __ movq(value, kScratchRegister); + + __ bind(&have_value); + } - __ bind(&have_value); Operand double_store_operand = BuildFastArrayOperand( - instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS, - FixedDoubleArray::kHeaderSize - kHeapObjectTag); + instr->elements(), + instr->key(), + FAST_DOUBLE_ELEMENTS, + FixedDoubleArray::kHeaderSize - kHeapObjectTag, + instr->additional_index()); + + if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + Register key_reg = ToRegister(instr->key()); + __ movsxlq(key_reg, key_reg); + } + __ movsd(double_store_operand, value); } @@ -4268,9 +4344,10 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, __ movq(FieldOperand(result, total_offset), rcx); } } else if (elements->IsFixedArray()) { + Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements); for (int i = 0; i < elements_length; i++) { int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); - Handle<Object> value = JSObject::GetElement(object, i); + Handle<Object> value(fast_elements->get(i)); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); __ lea(rcx, Operand(result, *offset)); @@ -4294,6 +4371,23 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, void LCodeGen::DoFastLiteral(LFastLiteral* instr) { int size = instr->hydrogen()->total_size(); + ElementsKind boilerplate_elements_kind = + instr->hydrogen()->boilerplate()->GetElementsKind(); + + // Deopt if the literal boilerplate ElementsKind is of a type different than + // the expected one. The check isn't necessary if the boilerplate has already + // been converted to FAST_ELEMENTS. + if (boilerplate_elements_kind != FAST_ELEMENTS) { + __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate()); + __ movq(rcx, FieldOperand(rbx, HeapObject::kMapOffset)); + // Load the map's "bit field 2". + __ movb(rcx, FieldOperand(rcx, Map::kBitField2Offset)); + // Retrieve elements_kind from bit field 2. + __ and_(rcx, Immediate(Map::kElementsKindMask)); + __ cmpb(rcx, Immediate(boilerplate_elements_kind << + Map::kElementsKindShift)); + DeoptimizeIf(not_equal, instr->environment()); + } // Allocate all objects that are part of the literal in one big // allocation. This avoids multiple limit checks. @@ -4592,7 +4686,7 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { LOperand* key = instr->key(); EmitPushTaggedOperand(obj); EmitPushTaggedOperand(key); - ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); + ASSERT(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); // Create safepoint generator that will also ensure enough space in the @@ -4610,7 +4704,7 @@ void LCodeGen::DoIn(LIn* instr) { LOperand* key = instr->key(); EmitPushTaggedOperand(key); EmitPushTaggedOperand(obj); - ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); + ASSERT(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); SafepointGenerator safepoint_generator( diff --git a/src/3rdparty/v8/src/x64/lithium-codegen-x64.h b/src/3rdparty/v8/src/x64/lithium-codegen-x64.h index f5045b6..73e1a9b 100644 --- a/src/3rdparty/v8/src/x64/lithium-codegen-x64.h +++ b/src/3rdparty/v8/src/x64/lithium-codegen-x64.h @@ -196,12 +196,18 @@ class LCodeGen BASE_EMBEDDED { int argc, LInstruction* instr); + enum RDIState { + RDI_UNINITIALIZED, + RDI_CONTAINS_TARGET + }; + // Generate a direct call to a known function. Expects the function // to be in rdi. void CallKnownFunction(Handle<JSFunction> function, int arity, LInstruction* instr, - CallKind call_kind); + CallKind call_kind, + RDIState rdi_state); void RecordSafepointWithLazyDeopt(LInstruction* instr, @@ -225,7 +231,8 @@ class LCodeGen BASE_EMBEDDED { LOperand* elements_pointer, LOperand* key, ElementsKind elements_kind, - uint32_t offset); + uint32_t offset, + uint32_t additional_index = 0); // Specific math operations - used from DoUnaryMathOperation. void EmitIntegerMathAbs(LUnaryMathOperation* instr); diff --git a/src/3rdparty/v8/src/x64/lithium-x64.cc b/src/3rdparty/v8/src/x64/lithium-x64.cc index 95c3a8e..3ba0cae 100644 --- a/src/3rdparty/v8/src/x64/lithium-x64.cc +++ b/src/3rdparty/v8/src/x64/lithium-x64.cc @@ -110,22 +110,17 @@ void LInstruction::PrintTo(StringStream* stream) { } -template<int R, int I, int T> -void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) { +void LInstruction::PrintDataTo(StringStream* stream) { stream->Add("= "); - for (int i = 0; i < inputs_.length(); i++) { + for (int i = 0; i < InputCount(); i++) { if (i > 0) stream->Add(" "); - inputs_[i]->PrintTo(stream); + InputAt(i)->PrintTo(stream); } } -template<int R, int I, int T> -void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) { - for (int i = 0; i < results_.length(); i++) { - if (i > 0) stream->Add(" "); - results_[i]->PrintTo(stream); - } +void LInstruction::PrintOutputOperandTo(StringStream* stream) { + if (HasResult()) result()->PrintTo(stream); } @@ -727,22 +722,6 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { } -LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment( - LInstruction* instr, int ast_id) { - ASSERT(instruction_pending_deoptimization_environment_ == NULL); - ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber); - instruction_pending_deoptimization_environment_ = instr; - pending_deoptimization_ast_id_ = ast_id; - return instr; -} - - -void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() { - instruction_pending_deoptimization_environment_ = NULL; - pending_deoptimization_ast_id_ = AstNode::kNoNumber; -} - - LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, HInstruction* hinstr, CanDeoptimize can_deoptimize) { @@ -755,8 +734,10 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, if (hinstr->HasObservableSideEffects()) { ASSERT(hinstr->next()->IsSimulate()); HSimulate* sim = HSimulate::cast(hinstr->next()); - instr = SetInstructionPendingDeoptimizationEnvironment( - instr, sim->ast_id()); + ASSERT(instruction_pending_deoptimization_environment_ == NULL); + ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber); + instruction_pending_deoptimization_environment_ = instr; + pending_deoptimization_ast_id_ = sim->ast_id(); } // If instruction does not have side-effects lazy deoptimization @@ -774,12 +755,6 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, } -LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) { - instr->MarkAsSaveDoubles(); - return instr; -} - - LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { ASSERT(!instr->HasPointerMap()); instr->set_pointer_map(new(zone()) LPointerMap(position_)); @@ -1144,7 +1119,7 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) { - return DefineAsRegister(new(zone()) LGlobalObject(instr->qml_global())); + return DefineAsRegister(new(zone()) LGlobalObject); } @@ -1214,7 +1189,7 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) { LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) { argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(instr->qml_global()), rax), instr); + return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, rax), instr); } @@ -1285,6 +1260,7 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) { ASSERT(instr->value()->representation().IsInteger32()); ASSERT(instr->representation().IsInteger32()); + if (instr->HasNoUses()) return NULL; LOperand* input = UseRegisterAtStart(instr->value()); LBitNotI* result = new(zone()) LBitNotI(input); return DefineSameAsFirst(result); @@ -1309,6 +1285,12 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { } +LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { + UNIMPLEMENTED(); + return NULL; +} + + LInstruction* LChunkBuilder::DoMod(HMod* instr) { if (instr->representation().IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); @@ -2241,9 +2223,12 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { if (pending_deoptimization_ast_id_ == instr->ast_id()) { LLazyBailout* lazy_bailout = new(zone()) LLazyBailout; LInstruction* result = AssignEnvironment(lazy_bailout); + // Store the lazy deopt environment with the instruction if needed. Right + // now it is only used for LInstanceOfKnownGlobal. instruction_pending_deoptimization_environment_-> - set_deoptimization_environment(result->environment()); - ClearInstructionPendingDeoptimizationEnvironment(); + SetDeferredLazyDeoptimizationEnvironment(result->environment()); + instruction_pending_deoptimization_environment_ = NULL; + pending_deoptimization_ast_id_ = AstNode::kNoNumber; return result; } @@ -2270,8 +2255,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { undefined, instr->call_kind(), instr->is_construct()); - if (instr->arguments() != NULL) { - inner->Bind(instr->arguments(), graph()->GetArgumentsObject()); + if (instr->arguments_var() != NULL) { + inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject()); } current_block_->UpdateEnvironment(inner); chunk_->AddInlinedClosure(instr->closure()); @@ -2280,10 +2265,21 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { + LInstruction* pop = NULL; + + HEnvironment* env = current_block_->last_environment(); + + if (instr->arguments_pushed()) { + int argument_count = env->arguments_environment()->parameter_count(); + pop = new(zone()) LDrop(argument_count); + argument_count_ -= argument_count; + } + HEnvironment* outer = current_block_->last_environment()-> DiscardInlined(false); current_block_->UpdateEnvironment(outer); - return NULL; + + return pop; } diff --git a/src/3rdparty/v8/src/x64/lithium-x64.h b/src/3rdparty/v8/src/x64/lithium-x64.h index 390eb49..642a0a0 100644 --- a/src/3rdparty/v8/src/x64/lithium-x64.h +++ b/src/3rdparty/v8/src/x64/lithium-x64.h @@ -179,7 +179,8 @@ class LCodeGen; V(CheckMapValue) \ V(LoadFieldByIndex) \ V(DateField) \ - V(WrapReceiver) + V(WrapReceiver) \ + V(Drop) #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ @@ -203,16 +204,15 @@ class LInstruction: public ZoneObject { LInstruction() : environment_(NULL), hydrogen_value_(NULL), - is_call_(false), - is_save_doubles_(false) { } + is_call_(false) { } virtual ~LInstruction() { } virtual void CompileToNative(LCodeGen* generator) = 0; virtual const char* Mnemonic() const = 0; virtual void PrintTo(StringStream* stream); - virtual void PrintDataTo(StringStream* stream) = 0; - virtual void PrintOutputOperandTo(StringStream* stream) = 0; + virtual void PrintDataTo(StringStream* stream); + virtual void PrintOutputOperandTo(StringStream* stream); enum Opcode { // Declare a unique enum value for each instruction. @@ -247,22 +247,12 @@ class LInstruction: public ZoneObject { void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } HValue* hydrogen_value() const { return hydrogen_value_; } - void set_deoptimization_environment(LEnvironment* env) { - deoptimization_environment_.set(env); - } - LEnvironment* deoptimization_environment() const { - return deoptimization_environment_.get(); - } - bool HasDeoptimizationEnvironment() const { - return deoptimization_environment_.is_set(); - } - void MarkAsCall() { is_call_ = true; } - void MarkAsSaveDoubles() { is_save_doubles_ = true; } + + virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { } // Interface to the register allocator and iterators. bool IsMarkedAsCall() const { return is_call_; } - bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; } virtual bool HasResult() const = 0; virtual LOperand* result() = 0; @@ -283,9 +273,7 @@ class LInstruction: public ZoneObject { LEnvironment* environment_; SetOncePointer<LPointerMap> pointer_map_; HValue* hydrogen_value_; - SetOncePointer<LEnvironment> deoptimization_environment_; bool is_call_; - bool is_save_doubles_; }; @@ -307,9 +295,6 @@ class LTemplateInstruction: public LInstruction { int TempCount() { return T; } LOperand* TempAt(int i) { return temps_[i]; } - virtual void PrintDataTo(StringStream* stream); - virtual void PrintOutputOperandTo(StringStream* stream); - protected: EmbeddedContainer<LOperand*, R> results_; EmbeddedContainer<LOperand*, I> inputs_; @@ -535,9 +520,8 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> { class LArgumentsElements: public LTemplateInstruction<1, 0, 0> { public: - LArgumentsElements() { } - DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements") + DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements) }; @@ -831,6 +815,15 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> { DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal) Handle<JSFunction> function() const { return hydrogen()->function(); } + LEnvironment* GetDeferredLazyDeoptimizationEnvironment() { + return lazy_deopt_env_; + } + virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { + lazy_deopt_env_ = env; + } + + private: + LEnvironment* lazy_deopt_env_; }; @@ -1206,6 +1199,7 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1222,13 +1216,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, - LOperand* key) { + LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) { inputs_[0] = external_pointer; inputs_[1] = key; } @@ -1242,6 +1236,7 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1358,6 +1353,19 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> { }; +class LDrop: public LTemplateInstruction<0, 0, 0> { + public: + explicit LDrop(int count) : count_(count) { } + + int count() const { return count_; } + + DECLARE_CONCRETE_INSTRUCTION(Drop, "drop") + + private: + int count_; +}; + + class LThisFunction: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") @@ -1392,13 +1400,7 @@ class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> { class LGlobalObject: public LTemplateInstruction<1, 0, 0> { public: - explicit LGlobalObject(bool qml_global) : qml_global_(qml_global) {} - DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object") - - bool qml_global() { return qml_global_; } - private: - bool qml_global_; }; @@ -1440,6 +1442,7 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> { virtual void PrintDataTo(StringStream* stream); int arity() const { return hydrogen()->argument_count() - 1; } + Handle<JSFunction> known_function() { return hydrogen()->known_function(); } }; @@ -1491,16 +1494,10 @@ class LCallGlobal: public LTemplateInstruction<1, 0, 0> { DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global") DECLARE_HYDROGEN_ACCESSOR(CallGlobal) - explicit LCallGlobal(bool qml_global) : qml_global_(qml_global) {} - virtual void PrintDataTo(StringStream* stream); Handle<String> name() const {return hydrogen()->name(); } int arity() const { return hydrogen()->argument_count() - 1; } - - bool qml_global() { return qml_global_; } - private: - bool qml_global_; }; @@ -1697,6 +1694,7 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { LOperand* object() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1719,6 +1717,9 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + + bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1742,6 +1743,7 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> { ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -2347,11 +2349,6 @@ class LChunkBuilder BASE_EMBEDDED { LInstruction* instr, HInstruction* hinstr, CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); - LInstruction* MarkAsSaveDoubles(LInstruction* instr); - - LInstruction* SetInstructionPendingDeoptimizationEnvironment( - LInstruction* instr, int ast_id); - void ClearInstructionPendingDeoptimizationEnvironment(); LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env, int* argument_index_accumulator); diff --git a/src/3rdparty/v8/src/x64/macro-assembler-x64.cc b/src/3rdparty/v8/src/x64/macro-assembler-x64.cc index 12e653c..3d380a2 100644 --- a/src/3rdparty/v8/src/x64/macro-assembler-x64.cc +++ b/src/3rdparty/v8/src/x64/macro-assembler-x64.cc @@ -150,6 +150,20 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) { } +void MacroAssembler::PushAddress(ExternalReference source) { + int64_t address = reinterpret_cast<int64_t>(source.address()); + if (is_int32(address) && !Serializer::enabled()) { + if (emit_debug_code()) { + movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE); + } + push(Immediate(static_cast<int32_t>(address))); + return; + } + LoadAddress(kScratchRegister, source); + push(kScratchRegister); +} + + void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) { ASSERT(root_array_available_); movq(destination, Operand(kRootRegister, @@ -4174,7 +4188,7 @@ bool AreAliased(Register r1, Register r2, Register r3, Register r4) { CodePatcher::CodePatcher(byte* address, int size) : address_(address), size_(size), - masm_(Isolate::Current(), address, size + Assembler::kGap) { + masm_(NULL, address, size + Assembler::kGap) { // Create a new macro assembler pointing to the address of the code to patch. // The size is adjusted with kGap on order for the assembler to generate size // bytes of instructions without failing with buffer size constraints. diff --git a/src/3rdparty/v8/src/x64/macro-assembler-x64.h b/src/3rdparty/v8/src/x64/macro-assembler-x64.h index f7b36c5..66587d5 100644 --- a/src/3rdparty/v8/src/x64/macro-assembler-x64.h +++ b/src/3rdparty/v8/src/x64/macro-assembler-x64.h @@ -127,6 +127,8 @@ class MacroAssembler: public Assembler { // Returns the size of the code generated by LoadAddress. // Used by CallSize(ExternalReference) to find the size of a call. int LoadAddressSize(ExternalReference source); + // Pushes the address of the external reference onto the stack. + void PushAddress(ExternalReference source); // Operations on roots in the root-array. void LoadRoot(Register destination, Heap::RootListIndex index); @@ -1443,11 +1445,6 @@ inline Operand GlobalObjectOperand() { } -static inline Operand QmlGlobalObjectOperand() { - return ContextOperand(rsi, Context::QML_GLOBAL_INDEX); -} - - // Provides access to exit frame stack space (not GCed). inline Operand StackSpaceOperand(int index) { #ifdef _WIN64 diff --git a/src/3rdparty/v8/src/x64/stub-cache-x64.cc b/src/3rdparty/v8/src/x64/stub-cache-x64.cc index 9dfcf7a..5721e9b 100644 --- a/src/3rdparty/v8/src/x64/stub-cache-x64.cc +++ b/src/3rdparty/v8/src/x64/stub-cache-x64.cc @@ -379,6 +379,7 @@ static void PushInterceptorArguments(MacroAssembler* masm, __ push(receiver); __ push(holder); __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset)); + __ PushAddress(ExternalReference::isolate_address()); } @@ -393,7 +394,7 @@ static void CompileCallLoadPropertyWithInterceptor( ExternalReference ref = ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly), masm->isolate()); - __ Set(rax, 5); + __ Set(rax, 6); __ LoadAddress(rbx, ref); CEntryStub stub(1); @@ -402,7 +403,7 @@ static void CompileCallLoadPropertyWithInterceptor( // Number of pointers to be reserved on stack for fast API call. -static const int kFastApiCallArguments = 3; +static const int kFastApiCallArguments = 4; // Reserves space for the extra arguments to API function in the @@ -452,10 +453,11 @@ static void GenerateFastApiCall(MacroAssembler* masm, // -- rsp[16] : api function // (first fast api call extra argument) // -- rsp[24] : api call data - // -- rsp[32] : last argument + // -- rsp[32] : isolate + // -- rsp[40] : last argument // -- ... - // -- rsp[(argc + 3) * 8] : first argument - // -- rsp[(argc + 4) * 8] : receiver + // -- rsp[(argc + 4) * 8] : first argument + // -- rsp[(argc + 5) * 8] : receiver // ----------------------------------- // Get the function and setup the context. Handle<JSFunction> function = optimization.constant_function(); @@ -473,9 +475,11 @@ static void GenerateFastApiCall(MacroAssembler* masm, } else { __ Move(Operand(rsp, 3 * kPointerSize), call_data); } + __ movq(kScratchRegister, ExternalReference::isolate_address()); + __ movq(Operand(rsp, 4 * kPointerSize), kScratchRegister); // Prepare arguments. - __ lea(rbx, Operand(rsp, 3 * kPointerSize)); + __ lea(rbx, Operand(rsp, 4 * kPointerSize)); #if defined(__MINGW64__) Register arguments_arg = rcx; @@ -665,7 +669,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { __ CallExternalReference( ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall), masm->isolate()), - 5); + 6); // Restore the name_ register. __ pop(name_); @@ -732,8 +736,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, Register scratch, Label* miss_label) { // Check that the map of the object hasn't changed. + CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS + : REQUIRE_EXACT_MAP; __ CheckMap(receiver_reg, Handle<Map>(object->map()), - miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); + miss_label, DO_SMI_CHECK, mode); // Perform global security token check if needed. if (object->IsJSGlobalProxy()) { @@ -1005,6 +1011,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object, } else { __ Push(Handle<Object>(callback->data())); } + __ PushAddress(ExternalReference::isolate_address()); // isolate __ push(name_reg); // name // Save a pointer to where we pushed the arguments pointer. // This will be passed as the const AccessorInfo& to the C++ callback. @@ -1025,14 +1032,14 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object, __ movq(name_arg, rsp); __ push(scratch2); // Restore return address. - // 3 elements array for v8::Arguments::values_ and handler for name. - const int kStackSpace = 4; + // 4 elements array for v8::Arguments::values_ and handler for name. + const int kStackSpace = 5; // Allocate v8::AccessorInfo in non-GCed stack space. const int kArgStackSpace = 1; __ PrepareCallApiFunction(kArgStackSpace); - __ lea(rax, Operand(name_arg, 3 * kPointerSize)); + __ lea(rax, Operand(name_arg, 4 * kPointerSize)); // v8::AccessorInfo::args_. __ movq(StackSpaceOperand(0), rax); @@ -1107,13 +1114,20 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, name, miss); ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1)); + // Preserve the receiver register explicitly whenever it is different from + // the holder and it is needed should the interceptor return without any + // result. The CALLBACKS case needs the receiver to be passed into C++ code, + // the FIELD case might cause a miss during the prototype check. + bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder(); + bool must_preserve_receiver_reg = !receiver.is(holder_reg) && + (lookup->type() == CALLBACKS || must_perfrom_prototype_check); + // Save necessary data before invoking an interceptor. // Requires a frame to make GC aware of pushed pointers. { FrameScope frame_scope(masm(), StackFrame::INTERNAL); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - // CALLBACKS case needs a receiver to be passed into C++ callback. + if (must_preserve_receiver_reg) { __ push(receiver); } __ push(holder_reg); @@ -1139,7 +1153,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, __ bind(&interceptor_failed); __ pop(name_reg); __ pop(holder_reg); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + if (must_preserve_receiver_reg) { __ pop(receiver); } @@ -1148,7 +1162,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, // Check that the maps from interceptor's holder to lookup's holder // haven't changed. And load lookup's holder into |holder| register. - if (*interceptor_holder != lookup->holder()) { + if (must_perfrom_prototype_check) { holder_reg = CheckPrototypes(interceptor_holder, holder_reg, Handle<JSObject>(lookup->holder()), @@ -1182,6 +1196,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, __ push(holder_reg); __ Move(holder_reg, callback); __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset)); + __ PushAddress(ExternalReference::isolate_address()); __ push(holder_reg); __ push(name_reg); __ push(scratch2); // restore return address @@ -1189,7 +1204,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, ExternalReference ref = ExternalReference(IC_Utility(IC::kLoadCallbackProperty), isolate()); - __ TailCallExternalReference(ref, 5, 1); + __ TailCallExternalReference(ref, 6, 1); } } else { // !compile_followup_inline // Call the runtime system to load the interceptor. @@ -1204,7 +1219,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, ExternalReference ref = ExternalReference( IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate()); - __ TailCallExternalReference(ref, 5, 1); + __ TailCallExternalReference(ref, 6, 1); } } @@ -1996,7 +2011,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall( name, depth, &miss); // Move the return address on top of the stack. - __ movq(rax, Operand(rsp, 3 * kPointerSize)); + __ movq(rax, Operand(rsp, 4 * kPointerSize)); __ movq(Operand(rsp, 0 * kPointerSize), rax); GenerateFastApiCall(masm(), optimization, argc); @@ -3117,6 +3132,32 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( __ jmp(miss_ic, RelocInfo::CODE_TARGET); } + +static void GenerateSmiKeyCheck(MacroAssembler* masm, + Register key, + Register scratch, + XMMRegister xmm_scratch0, + XMMRegister xmm_scratch1, + Label* fail) { + // Check that key is a smi or a heap number containing a smi and branch + // if the check fails. + Label key_ok; + __ JumpIfSmi(key, &key_ok); + __ CheckMap(key, + masm->isolate()->factory()->heap_number_map(), + fail, + DONT_DO_SMI_CHECK); + __ movsd(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset)); + __ cvttsd2si(scratch, xmm_scratch0); + __ cvtlsi2sd(xmm_scratch1, scratch); + __ ucomisd(xmm_scratch1, xmm_scratch0); + __ j(not_equal, fail); + __ j(parity_even, fail); // NaN. + __ Integer32ToSmi(key, scratch); + __ bind(&key_ok); +} + + void KeyedLoadStubCompiler::GenerateLoadExternalArray( MacroAssembler* masm, ElementsKind elements_kind) { @@ -3130,8 +3171,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - // Check that the key is a smi. - __ JumpIfNotSmi(rax, &miss_force_generic); + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic); // Check that the index is in range. __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); @@ -3265,8 +3306,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - // Check that the key is a smi. - __ JumpIfNotSmi(rcx, &miss_force_generic); + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic); // Check that the index is in range. __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); @@ -3367,30 +3408,28 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } else { // Perform float-to-int conversion with truncation (round-to-zero) // behavior. + // Fast path: use machine instruction to convert to int64. If that + // fails (out-of-range), go into the runtime. + __ cvttsd2siq(r8, xmm0); + __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000)); + __ cmpq(r8, kScratchRegister); + __ j(equal, &slow); - // Convert to int32 and store the low byte/word. - // If the value is NaN or +/-infinity, the result is 0x80000000, - // which is automatically zero when taken mod 2^n, n < 32. // rdx: value (converted to an untagged integer) // rdi: untagged index // rbx: base pointer of external storage switch (elements_kind) { case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ cvttsd2si(rdx, xmm0); - __ movb(Operand(rbx, rdi, times_1, 0), rdx); + __ movb(Operand(rbx, rdi, times_1, 0), r8); break; case EXTERNAL_SHORT_ELEMENTS: case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ cvttsd2si(rdx, xmm0); - __ movw(Operand(rbx, rdi, times_2, 0), rdx); + __ movw(Operand(rbx, rdi, times_2, 0), r8); break; case EXTERNAL_INT_ELEMENTS: case EXTERNAL_UNSIGNED_INT_ELEMENTS: - // Convert to int64, so that NaN and infinities become - // 0x8000000000000000, which is zero mod 2^32. - __ cvttsd2siq(rdx, xmm0); - __ movl(Operand(rbx, rdi, times_4, 0), rdx); + __ movl(Operand(rbx, rdi, times_4, 0), r8); break; case EXTERNAL_PIXEL_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS: @@ -3447,8 +3486,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - // Check that the key is a smi. - __ JumpIfNotSmi(rax, &miss_force_generic); + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic); // Get the elements array. __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset)); @@ -3489,8 +3528,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - // Check that the key is a smi. - __ JumpIfNotSmi(rax, &miss_force_generic); + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic); // Get the elements array. __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset)); @@ -3545,8 +3584,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - // Check that the key is a smi. - __ JumpIfNotSmi(rcx, &miss_force_generic); + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic); if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { __ JumpIfNotSmi(rax, &transition_elements_kind); @@ -3687,8 +3726,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - // Check that the key is a smi. - __ JumpIfNotSmi(rcx, &miss_force_generic); + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic); // Get the elements array. __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset)); @@ -3770,6 +3809,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // Increment the length of the array. __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1)); + __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset)); __ jmp(&finish_store); __ bind(&check_capacity); |