diff options
Diffstat (limited to 'deps/v8/src/x64')
-rw-r--r-- | deps/v8/src/x64/assembler-x64.h | 3 | ||||
-rw-r--r-- | deps/v8/src/x64/builtins-x64.cc | 9 | ||||
-rw-r--r-- | deps/v8/src/x64/code-stubs-x64.cc | 41 | ||||
-rw-r--r-- | deps/v8/src/x64/codegen-x64.cc | 4 | ||||
-rw-r--r-- | deps/v8/src/x64/debug-x64.cc | 15 | ||||
-rw-r--r-- | deps/v8/src/x64/disasm-x64.cc | 2 | ||||
-rw-r--r-- | deps/v8/src/x64/full-codegen-x64.cc | 20 | ||||
-rw-r--r-- | deps/v8/src/x64/ic-x64.cc | 36 | ||||
-rw-r--r-- | deps/v8/src/x64/lithium-codegen-x64.cc | 218 | ||||
-rw-r--r-- | deps/v8/src/x64/lithium-codegen-x64.h | 3 | ||||
-rw-r--r-- | deps/v8/src/x64/lithium-x64.cc | 5 | ||||
-rw-r--r-- | deps/v8/src/x64/lithium-x64.h | 9 | ||||
-rw-r--r-- | deps/v8/src/x64/macro-assembler-x64.cc | 90 | ||||
-rw-r--r-- | deps/v8/src/x64/macro-assembler-x64.h | 9 | ||||
-rw-r--r-- | deps/v8/src/x64/regexp-macro-assembler-x64.cc | 173 | ||||
-rw-r--r-- | deps/v8/src/x64/regexp-macro-assembler-x64.h | 25 | ||||
-rw-r--r-- | deps/v8/src/x64/simulator-x64.h | 8 | ||||
-rw-r--r-- | deps/v8/src/x64/stub-cache-x64.cc | 41 |
18 files changed, 251 insertions, 460 deletions
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h index 9f5f85029..60b29e647 100644 --- a/deps/v8/src/x64/assembler-x64.h +++ b/deps/v8/src/x64/assembler-x64.h @@ -629,8 +629,7 @@ class Assembler : public AssemblerBase { static const byte kJccShortPrefix = 0x70; static const byte kJncShortOpcode = kJccShortPrefix | not_carry; static const byte kJcShortOpcode = kJccShortPrefix | carry; - static const byte kJnzShortOpcode = kJccShortPrefix | not_zero; - static const byte kJzShortOpcode = kJccShortPrefix | zero; + // --------------------------------------------------------------------------- diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc index 0af0a4347..4e037ff46 100644 --- a/deps/v8/src/x64/builtins-x64.cc +++ b/deps/v8/src/x64/builtins-x64.cc @@ -977,7 +977,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, const int initial_capacity = JSArray::kPreallocatedArrayElements; STATIC_ASSERT(initial_capacity >= 0); - __ LoadInitialArrayMap(array_function, scratch2, scratch1, false); + __ LoadInitialArrayMap(array_function, scratch2, scratch1); // Allocate the JSArray object together with space for a fixed array with the // requested elements. @@ -1076,8 +1076,7 @@ static void AllocateJSArray(MacroAssembler* masm, Register scratch, bool fill_with_hole, Label* gc_required) { - __ LoadInitialArrayMap(array_function, scratch, - elements_array, fill_with_hole); + __ LoadInitialArrayMap(array_function, scratch, elements_array); if (FLAG_debug_code) { // Assert that array size is not zero. __ testq(array_size, array_size); @@ -1304,10 +1303,10 @@ static void ArrayNativeCode(MacroAssembler* masm, __ jmp(call_generic_code); __ bind(¬_double); - // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS. + // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS. // rbx: JSArray __ movq(r11, FieldOperand(rbx, HeapObject::kMapOffset)); - __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS, r11, kScratchRegister, diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc index 61d6c8791..d179d2a5d 100644 --- a/deps/v8/src/x64/code-stubs-x64.cc +++ b/deps/v8/src/x64/code-stubs-x64.cc @@ -2864,37 +2864,30 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ IncrementCounter(counters->regexp_entry_native(), 1); // Isolates: note we add an additional parameter here (isolate pointer). - static const int kRegExpExecuteArguments = 9; + static const int kRegExpExecuteArguments = 8; int argument_slots_on_stack = masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments); __ EnterApiExitFrame(argument_slots_on_stack); - // Argument 9: Pass current isolate address. + // Argument 8: Pass current isolate address. // __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize), // Immediate(ExternalReference::isolate_address())); __ LoadAddress(kScratchRegister, ExternalReference::isolate_address()); __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize), kScratchRegister); - // Argument 8: Indicate that this is a direct call from JavaScript. + // Argument 7: Indicate that this is a direct call from JavaScript. __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), Immediate(1)); - // Argument 7: Start (high end) of backtracking stack memory area. + // Argument 6: Start (high end) of backtracking stack memory area. __ movq(kScratchRegister, address_of_regexp_stack_memory_address); __ movq(r9, Operand(kScratchRegister, 0)); __ movq(kScratchRegister, address_of_regexp_stack_memory_size); __ addq(r9, Operand(kScratchRegister, 0)); - __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9); - - // Argument 6: Set the number of capture registers to zero to force global - // regexps to behave as non-global. This does not affect non-global regexps. - // Argument 6 is passed in r9 on Linux and on the stack on Windows. + // Argument 6 passed in r9 on Linux and on the stack on Windows. #ifdef _WIN64 - __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), - Immediate(0)); -#else - __ Set(r9, 0); + __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9); #endif // Argument 5: static offsets vector buffer. @@ -2902,7 +2895,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ExternalReference::address_of_static_offsets_vector(isolate)); // Argument 5 passed in r8 on Linux and on the stack on Windows. #ifdef _WIN64 - __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8); + __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), r8); #endif // First four arguments are passed in registers on both Linux and Windows. @@ -2967,9 +2960,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check the result. Label success; Label exception; - __ cmpl(rax, Immediate(1)); - // We expect exactly one result since we force the called regexp to behave - // as non-global. + __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS)); __ j(equal, &success, Label::kNear); __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION)); __ j(equal, &exception); @@ -6002,12 +5993,12 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { // KeyedStoreStubCompiler::GenerateStoreFastElement. { REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET}, { REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET}, - // ElementsTransitionGenerator::GenerateMapChangeElementTransition - // and ElementsTransitionGenerator::GenerateSmiToDouble + // ElementsTransitionGenerator::GenerateSmiOnlyToObject + // and ElementsTransitionGenerator::GenerateSmiOnlyToObject // and ElementsTransitionGenerator::GenerateDoubleToObject { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET}, { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET}, - // ElementsTransitionGenerator::GenerateSmiToDouble + // ElementsTransitionGenerator::GenerateSmiOnlyToDouble // and ElementsTransitionGenerator::GenerateDoubleToObject { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET}, // ElementsTransitionGenerator::GenerateDoubleToObject @@ -6281,9 +6272,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { __ CheckFastElements(rdi, &double_elements); - // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS + // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS __ JumpIfSmi(rax, &smi_element); - __ CheckFastSmiElements(rdi, &fast_elements); + __ CheckFastSmiOnlyElements(rdi, &fast_elements); // Store into the array literal requires a elements transition. Call into // the runtime. @@ -6301,7 +6292,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { // place. __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); - // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. + // Array literal has ElementsKind of FAST_ELEMENTS and value is an object. __ bind(&fast_elements); __ SmiToInteger32(kScratchRegister, rcx); __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset)); @@ -6315,8 +6306,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { OMIT_SMI_CHECK); __ ret(0); - // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or - // FAST_*_ELEMENTS, and value is Smi. + // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or + // FAST_ELEMENTS, and value is Smi. __ bind(&smi_element); __ SmiToInteger32(kScratchRegister, rcx); __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset)); diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc index 2924810c1..a8d39b25f 100644 --- a/deps/v8/src/x64/codegen-x64.cc +++ b/deps/v8/src/x64/codegen-x64.cc @@ -220,7 +220,7 @@ ModuloFunction CreateModuloFunction() { #define __ ACCESS_MASM(masm) -void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( +void ElementsTransitionGenerator::GenerateSmiOnlyToObject( MacroAssembler* masm) { // ----------- S t a t e ------------- // -- rax : value @@ -241,7 +241,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( } -void ElementsTransitionGenerator::GenerateSmiToDouble( +void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( MacroAssembler* masm, Label* fail) { // ----------- S t a t e ------------- // -- rax : value diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc index 1b29e58d5..eec83d9d1 100644 --- a/deps/v8/src/x64/debug-x64.cc +++ b/deps/v8/src/x64/debug-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -91,8 +91,6 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() { rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength); } -const bool Debug::FramePaddingLayout::kIsSupported = true; - #define __ ACCESS_MASM(masm) @@ -105,12 +103,6 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, { FrameScope scope(masm, StackFrame::INTERNAL); - // Load padding words on stack. - for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) { - __ Push(Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue)); - } - __ Push(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize)); - // Store the registers containing live values on the expression stack to // make sure that these are correctly updated during GC. Non object values // are stored as as two smis causing it to be untouched by GC. @@ -165,11 +157,6 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, } } - // Read current padding counter and skip corresponding number of words. - __ pop(kScratchRegister); - __ SmiToInteger32(kScratchRegister, kScratchRegister); - __ lea(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0)); - // Get rid of the internal frame. } diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc index 073815358..7ed81b47e 100644 --- a/deps/v8/src/x64/disasm-x64.cc +++ b/deps/v8/src/x64/disasm-x64.cc @@ -1684,7 +1684,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, default: UNREACHABLE(); } - AppendToBuffer("test%c rax,0x%" V8_PTR_PREFIX "x", + AppendToBuffer("test%c rax,0x%"V8_PTR_PREFIX"x", operand_size_code(), value); break; diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc index 0db7424eb..974269e5d 100644 --- a/deps/v8/src/x64/full-codegen-x64.cc +++ b/deps/v8/src/x64/full-codegen-x64.cc @@ -659,7 +659,7 @@ void FullCodeGenerator::DoTest(Expression* condition, Label* fall_through) { ToBooleanStub stub(result_register()); __ push(result_register()); - __ CallStub(&stub, condition->test_id()); + __ CallStub(&stub); __ testq(result_register(), result_register()); // The stub returns nonzero for true. Split(not_zero, if_true, if_false, fall_through); @@ -1659,8 +1659,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { ASSERT_EQ(2, constant_elements->length()); ElementsKind constant_elements_kind = static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value()); - bool has_constant_fast_elements = - IsFastObjectElementsKind(constant_elements_kind); + bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS; Handle<FixedArrayBase> constant_elements_values( FixedArrayBase::cast(constant_elements->get(1))); @@ -1671,7 +1670,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { Heap* heap = isolate()->heap(); if (has_constant_fast_elements && constant_elements_values->map() == heap->fixed_cow_array_map()) { - // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot + // If the elements are already FAST_ELEMENTS, the boilerplate cannot // change, so it's possible to specialize the stub in advance. __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1); FastCloneShallowArrayStub stub( @@ -1683,9 +1682,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); } else { - ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || + ASSERT(constant_elements_kind == FAST_ELEMENTS || + constant_elements_kind == FAST_SMI_ONLY_ELEMENTS || FLAG_smi_only_arrays); - // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot + // If the elements are already FAST_ELEMENTS, the boilerplate cannot // change, so it's possible to specialize the stub in advance. FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements ? FastCloneShallowArrayStub::CLONE_ELEMENTS @@ -1713,9 +1713,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } VisitForAccumulatorValue(subexpr); - if (IsFastObjectElementsKind(constant_elements_kind)) { - // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they - // cannot transition and don't need to call the runtime stub. + if (constant_elements_kind == FAST_ELEMENTS) { + // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot + // transition and don't need to call the runtime stub. int offset = FixedArray::kHeaderSize + (i * kPointerSize); __ movq(rbx, Operand(rsp, 0)); // Copy of array literal. __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset)); @@ -2287,7 +2287,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) { CallFunctionStub stub(arg_count, flags); __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub, expr->id()); + __ CallStub(&stub); RecordJSReturnSite(expr); // Restore context register. __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc index 82fdb3cec..0632ce439 100644 --- a/deps/v8/src/x64/ic-x64.cc +++ b/deps/v8/src/x64/ic-x64.cc @@ -769,25 +769,25 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex); __ j(not_equal, &non_double_value); - // Value is a double. Transition FAST_SMI_ELEMENTS -> + // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS -> // FAST_DOUBLE_ELEMENTS and complete the store. - __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, FAST_DOUBLE_ELEMENTS, rbx, rdi, &slow); - ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow); + ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow); __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); __ bind(&non_double_value); - // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS - __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS + __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS, rbx, rdi, &slow); - ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm); + ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm); __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); __ jmp(&finish_object_store); @@ -1642,7 +1642,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) { // Must return the modified receiver in eax. if (!FLAG_trace_elements_transitions) { Label fail; - ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); + ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail); __ movq(rax, rdx); __ Ret(); __ bind(&fail); @@ -1741,11 +1741,11 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { // Activate inlined smi code. if (previous_state == UNINITIALIZED) { - PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); + PatchInlinedSmiCode(address()); } } -void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { +void PatchInlinedSmiCode(Address address) { // The address of the instruction following the call. Address test_instruction_address = address + Assembler::kCallTargetAddressOffset; @@ -1766,18 +1766,14 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { address, test_instruction_address, delta); } - // Patch with a short conditional jump. Enabling means switching from a short - // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the - // reverse operation of that. + // Patch with a short conditional jump. There must be a + // short jump-if-carry/not-carry at this position. Address jmp_address = test_instruction_address - delta; - ASSERT((check == ENABLE_INLINED_SMI_CHECK) - ? (*jmp_address == Assembler::kJncShortOpcode || - *jmp_address == Assembler::kJcShortOpcode) - : (*jmp_address == Assembler::kJnzShortOpcode || - *jmp_address == Assembler::kJzShortOpcode)); - Condition cc = (check == ENABLE_INLINED_SMI_CHECK) - ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero) - : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry); + ASSERT(*jmp_address == Assembler::kJncShortOpcode || + *jmp_address == Assembler::kJcShortOpcode); + Condition cc = *jmp_address == Assembler::kJncShortOpcode + ? not_zero + : zero; *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc); } diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc index f1c631bc5..85e7ac0bf 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.cc +++ b/deps/v8/src/x64/lithium-codegen-x64.cc @@ -2223,35 +2223,41 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { Register result = ToRegister(instr->result()); int map_count = instr->hydrogen()->types()->length(); - bool need_generic = instr->hydrogen()->need_generic(); - - if (map_count == 0 && !need_generic) { - DeoptimizeIf(no_condition, instr->environment()); - return; - } Handle<String> name = instr->hydrogen()->name(); - Label done; - for (int i = 0; i < map_count; ++i) { - bool last = (i == map_count - 1); - Handle<Map> map = instr->hydrogen()->types()->at(i); - __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map); - if (last && !need_generic) { - DeoptimizeIf(not_equal, instr->environment()); - EmitLoadFieldOrConstantFunction(result, object, map, name); - } else { + + if (map_count == 0) { + ASSERT(instr->hydrogen()->need_generic()); + __ Move(rcx, instr->hydrogen()->name()); + Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); + CallCode(ic, RelocInfo::CODE_TARGET, instr); + } else { + Label done; + for (int i = 0; i < map_count - 1; ++i) { + Handle<Map> map = instr->hydrogen()->types()->at(i); Label next; + __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map); __ j(not_equal, &next, Label::kNear); EmitLoadFieldOrConstantFunction(result, object, map, name); __ jmp(&done, Label::kNear); __ bind(&next); } + Handle<Map> map = instr->hydrogen()->types()->last(); + __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map); + if (instr->hydrogen()->need_generic()) { + Label generic; + __ j(not_equal, &generic, Label::kNear); + EmitLoadFieldOrConstantFunction(result, object, map, name); + __ jmp(&done, Label::kNear); + __ bind(&generic); + __ Move(rcx, instr->hydrogen()->name()); + Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); + CallCode(ic, RelocInfo::CODE_TARGET, instr); + } else { + DeoptimizeIf(not_equal, instr->environment()); + EmitLoadFieldOrConstantFunction(result, object, map, name); + } + __ bind(&done); } - if (need_generic) { - __ Move(rcx, name); - Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - } - __ bind(&done); } @@ -2324,10 +2330,8 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) { __ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset)); __ and_(temp, Immediate(Map::kElementsKindMask)); __ shr(temp, Immediate(Map::kElementsKindShift)); - __ cmpl(temp, Immediate(GetInitialFastElementsKind())); - __ j(less, &fail, Label::kNear); - __ cmpl(temp, Immediate(TERMINAL_FAST_ELEMENTS_KIND)); - __ j(less_equal, &ok, Label::kNear); + __ cmpl(temp, Immediate(FAST_ELEMENTS)); + __ j(equal, &ok, Label::kNear); __ cmpl(temp, Immediate(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND)); __ j(less, &fail, Label::kNear); __ cmpl(temp, Immediate(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND)); @@ -2371,20 +2375,11 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { Register result = ToRegister(instr->result()); - if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { - // Sign extend key because it could be a 32 bit negative value - // and the dehoisted address computation happens in 64 bits. - Register key_reg = ToRegister(instr->key()); - __ movsxlq(key_reg, key_reg); - } - // Load the result. __ movq(result, - BuildFastArrayOperand(instr->elements(), - instr->key(), + BuildFastArrayOperand(instr->elements(), instr->key(), FAST_ELEMENTS, - FixedArray::kHeaderSize - kHeapObjectTag, - instr->additional_index())); + FixedArray::kHeaderSize - kHeapObjectTag)); // Check for the hole value. if (instr->hydrogen()->RequiresHoleCheck()) { @@ -2398,32 +2393,19 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( LLoadKeyedFastDoubleElement* instr) { XMMRegister result(ToDoubleRegister(instr->result())); - if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { - // Sign extend key because it could be a 32 bit negative value - // and the dehoisted address computation happens in 64 bits - Register key_reg = ToRegister(instr->key()); - __ movsxlq(key_reg, key_reg); - } - - if (instr->hydrogen()->RequiresHoleCheck()) { - int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + - sizeof(kHoleNanLower32); - Operand hole_check_operand = BuildFastArrayOperand( - instr->elements(), - instr->key(), - FAST_DOUBLE_ELEMENTS, - offset, - instr->additional_index()); - __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); - DeoptimizeIf(equal, instr->environment()); - } - - Operand double_load_operand = BuildFastArrayOperand( + int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + + sizeof(kHoleNanLower32); + Operand hole_check_operand = BuildFastArrayOperand( instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS, - FixedDoubleArray::kHeaderSize - kHeapObjectTag, - instr->additional_index()); + offset); + __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); + DeoptimizeIf(equal, instr->environment()); + + Operand double_load_operand = BuildFastArrayOperand( + instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS, + FixedDoubleArray::kHeaderSize - kHeapObjectTag); __ movsd(result, double_load_operand); } @@ -2432,8 +2414,7 @@ Operand LCodeGen::BuildFastArrayOperand( LOperand* elements_pointer, LOperand* key, ElementsKind elements_kind, - uint32_t offset, - uint32_t additional_index) { + uint32_t offset) { Register elements_pointer_reg = ToRegister(elements_pointer); int shift_size = ElementsKindToShiftSize(elements_kind); if (key->IsConstantOperand()) { @@ -2442,14 +2423,11 @@ Operand LCodeGen::BuildFastArrayOperand( Abort("array index constant value too big"); } return Operand(elements_pointer_reg, - ((constant_value + additional_index) << shift_size) - + offset); + constant_value * (1 << shift_size) + offset); } else { ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size); - return Operand(elements_pointer_reg, - ToRegister(key), - scale_factor, - offset + (additional_index << shift_size)); + return Operand(elements_pointer_reg, ToRegister(key), + scale_factor, offset); } } @@ -2458,17 +2436,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( LLoadKeyedSpecializedArrayElement* instr) { ElementsKind elements_kind = instr->elements_kind(); Operand operand(BuildFastArrayOperand(instr->external_pointer(), - instr->key(), - elements_kind, - 0, - instr->additional_index())); - if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { - // Sign extend key because it could be a 32 bit negative value - // and the dehoisted address computation happens in 64 bits - Register key_reg = ToRegister(instr->key()); - __ movsxlq(key_reg, key_reg); - } - + instr->key(), elements_kind, 0)); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { XMMRegister result(ToDoubleRegister(instr->result())); __ movss(result, operand); @@ -2505,11 +2473,8 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3373,18 +3338,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( LStoreKeyedSpecializedArrayElement* instr) { ElementsKind elements_kind = instr->elements_kind(); Operand operand(BuildFastArrayOperand(instr->external_pointer(), - instr->key(), - elements_kind, - 0, - instr->additional_index())); - - if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { - // Sign extend key because it could be a 32 bit negative value - // and the dehoisted address computation happens in 64 bits - Register key_reg = ToRegister(instr->key()); - __ movsxlq(key_reg, key_reg); - } - + instr->key(), elements_kind, 0)); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { XMMRegister value(ToDoubleRegister(instr->value())); __ cvtsd2ss(value, value); @@ -3410,11 +3364,8 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3457,29 +3408,30 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { Register elements = ToRegister(instr->object()); Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; - Operand operand = - BuildFastArrayOperand(instr->object(), - instr->key(), - FAST_ELEMENTS, - FixedArray::kHeaderSize - kHeapObjectTag, - instr->additional_index()); - - if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { - // Sign extend key because it could be a 32 bit negative value - // and the dehoisted address computation happens in 64 bits - Register key_reg = ToRegister(instr->key()); - __ movsxlq(key_reg, key_reg); + // Do the store. + if (instr->key()->IsConstantOperand()) { + ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); + LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); + int offset = + ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize; + __ movq(FieldOperand(elements, offset), value); + } else { + __ movq(FieldOperand(elements, + key, + times_pointer_size, + FixedArray::kHeaderSize), + value); } - __ movq(operand, value); - if (instr->hydrogen()->NeedsWriteBarrier()) { - ASSERT(!instr->key()->IsConstantOperand()); HType type = instr->hydrogen()->value()->type(); SmiCheck check_needed = type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; // Compute address of modified element and store it into key register. - __ lea(key, operand); + __ lea(key, FieldOperand(elements, + key, + times_pointer_size, + FixedArray::kHeaderSize)); __ RecordWrite(elements, key, value, @@ -3508,19 +3460,8 @@ void LCodeGen::DoStoreKeyedFastDoubleElement( } Operand double_store_operand = BuildFastArrayOperand( - instr->elements(), - instr->key(), - FAST_DOUBLE_ELEMENTS, - FixedDoubleArray::kHeaderSize - kHeapObjectTag, - instr->additional_index()); - - if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { - // Sign extend key because it could be a 32 bit negative value - // and the dehoisted address computation happens in 64 bits - Register key_reg = ToRegister(instr->key()); - __ movsxlq(key_reg, key_reg); - } - + instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS, + FixedDoubleArray::kHeaderSize - kHeapObjectTag); __ movsd(double_store_operand, value); } @@ -3549,22 +3490,21 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); __ j(not_equal, ¬_applicable); __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT); - if (IsSimpleMapChangeTransition(from_kind, to_kind)) { + if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) { __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg); // Write barrier. ASSERT_NE(instr->temp_reg(), NULL); __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, ToRegister(instr->temp_reg()), kDontSaveFPRegs); - } else if (IsFastSmiElementsKind(from_kind) && - IsFastDoubleElementsKind(to_kind)) { + } else if (from_kind == FAST_SMI_ONLY_ELEMENTS && + to_kind == FAST_DOUBLE_ELEMENTS) { Register fixed_object_reg = ToRegister(instr->temp_reg()); ASSERT(fixed_object_reg.is(rdx)); ASSERT(new_map_reg.is(rbx)); __ movq(fixed_object_reg, object_reg); CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), RelocInfo::CODE_TARGET, instr); - } else if (IsFastDoubleElementsKind(from_kind) && - IsFastObjectElementsKind(to_kind)) { + } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) { Register fixed_object_reg = ToRegister(instr->temp_reg()); ASSERT(fixed_object_reg.is(rdx)); ASSERT(new_map_reg.is(rbx)); @@ -4238,9 +4178,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { // Deopt if the array literal boilerplate ElementsKind is of a type different // than the expected one. The check isn't necessary if the boilerplate has - // already been converted to TERMINAL_FAST_ELEMENTS_KIND. - if (CanTransitionToMoreGeneralFastElementsKind( - boilerplate_elements_kind, true)) { + // already been converted to FAST_ELEMENTS. + if (boilerplate_elements_kind != FAST_ELEMENTS) { __ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object()); __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); // Load the map's "bit field 2". @@ -4386,11 +4325,10 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) { ElementsKind boilerplate_elements_kind = instr->hydrogen()->boilerplate()->GetElementsKind(); - // Deopt if the array literal boilerplate ElementsKind is of a type different - // than the expected one. The check isn't necessary if the boilerplate has - // already been converted to TERMINAL_FAST_ELEMENTS_KIND. - if (CanTransitionToMoreGeneralFastElementsKind( - boilerplate_elements_kind, true)) { + // Deopt if the literal boilerplate ElementsKind is of a type different than + // the expected one. The check isn't necessary if the boilerplate has already + // been converted to FAST_ELEMENTS. + if (boilerplate_elements_kind != FAST_ELEMENTS) { __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate()); __ movq(rcx, FieldOperand(rbx, HeapObject::kMapOffset)); // Load the map's "bit field 2". diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h index 73e1a9b96..1331fba55 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.h +++ b/deps/v8/src/x64/lithium-codegen-x64.h @@ -231,8 +231,7 @@ class LCodeGen BASE_EMBEDDED { LOperand* elements_pointer, LOperand* key, ElementsKind elements_kind, - uint32_t offset, - uint32_t additional_index = 0); + uint32_t offset); // Specific math operations - used from DoUnaryMathOperation. void EmitIntegerMathAbs(LUnaryMathOperation* instr); diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc index 6094dbb94..3ba0cae7a 100644 --- a/deps/v8/src/x64/lithium-x64.cc +++ b/deps/v8/src/x64/lithium-x64.cc @@ -2012,9 +2012,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - ElementsKind from_kind = instr->original_map()->elements_kind(); - ElementsKind to_kind = instr->transitioned_map()->elements_kind(); - if (IsSimpleMapChangeTransition(from_kind, to_kind)) { + if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS && + instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) { LOperand* object = UseRegister(instr->object()); LOperand* new_map_reg = TempRegister(); LOperand* temp_reg = TempRegister(); diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h index 642a0a003..9083c1f8d 100644 --- a/deps/v8/src/x64/lithium-x64.h +++ b/deps/v8/src/x64/lithium-x64.h @@ -1199,7 +1199,6 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } - uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1216,13 +1215,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } - uint32_t additional_index() const { return hydrogen()->index_offset(); } }; class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) { + LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, + LOperand* key) { inputs_[0] = external_pointer; inputs_[1] = key; } @@ -1236,7 +1235,6 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } - uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1694,7 +1692,6 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { LOperand* object() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } - uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1719,7 +1716,6 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> { LOperand* value() { return inputs_[2]; } bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } - uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1743,7 +1739,6 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> { ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } - uint32_t additional_index() const { return hydrogen()->index_offset(); } }; diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc index 95b43f42a..53becf6c3 100644 --- a/deps/v8/src/x64/macro-assembler-x64.cc +++ b/deps/v8/src/x64/macro-assembler-x64.cc @@ -2658,12 +2658,10 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { void MacroAssembler::CheckFastElements(Register map, Label* fail, Label::Distance distance) { - STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); - STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); - STATIC_ASSERT(FAST_ELEMENTS == 2); - STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + STATIC_ASSERT(FAST_ELEMENTS == 1); cmpb(FieldOperand(map, Map::kBitField2Offset), - Immediate(Map::kMaximumBitField2FastHoleyElementValue)); + Immediate(Map::kMaximumBitField2FastElementValue)); j(above, fail, distance); } @@ -2671,26 +2669,23 @@ void MacroAssembler::CheckFastElements(Register map, void MacroAssembler::CheckFastObjectElements(Register map, Label* fail, Label::Distance distance) { - STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); - STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); - STATIC_ASSERT(FAST_ELEMENTS == 2); - STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + STATIC_ASSERT(FAST_ELEMENTS == 1); cmpb(FieldOperand(map, Map::kBitField2Offset), - Immediate(Map::kMaximumBitField2FastHoleySmiElementValue)); + Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue)); j(below_equal, fail, distance); cmpb(FieldOperand(map, Map::kBitField2Offset), - Immediate(Map::kMaximumBitField2FastHoleyElementValue)); + Immediate(Map::kMaximumBitField2FastElementValue)); j(above, fail, distance); } -void MacroAssembler::CheckFastSmiElements(Register map, - Label* fail, - Label::Distance distance) { - STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); - STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); +void MacroAssembler::CheckFastSmiOnlyElements(Register map, + Label* fail, + Label::Distance distance) { + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); cmpb(FieldOperand(map, Map::kBitField2Offset), - Immediate(Map::kMaximumBitField2FastHoleySmiElementValue)); + Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue)); j(above, fail, distance); } @@ -2754,18 +2749,24 @@ void MacroAssembler::CompareMap(Register obj, CompareMapMode mode) { Cmp(FieldOperand(obj, HeapObject::kMapOffset), map); if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { - ElementsKind kind = map->elements_kind(); - if (IsFastElementsKind(kind)) { - bool packed = IsFastPackedElementsKind(kind); - Map* current_map = *map; - while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) { - kind = GetNextMoreGeneralFastElementsKind(kind, packed); - current_map = current_map->LookupElementsTransitionMap(kind, NULL); - if (!current_map) break; - j(equal, early_success, Label::kNear); - Cmp(FieldOperand(obj, HeapObject::kMapOffset), - Handle<Map>(current_map)); - } + Map* transitioned_fast_element_map( + map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL)); + ASSERT(transitioned_fast_element_map == NULL || + map->elements_kind() != FAST_ELEMENTS); + if (transitioned_fast_element_map != NULL) { + j(equal, early_success, Label::kNear); + Cmp(FieldOperand(obj, HeapObject::kMapOffset), + Handle<Map>(transitioned_fast_element_map)); + } + + Map* transitioned_double_map( + map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL)); + ASSERT(transitioned_double_map == NULL || + map->elements_kind() == FAST_SMI_ONLY_ELEMENTS); + if (transitioned_double_map != NULL) { + j(equal, early_success, Label::kNear); + Cmp(FieldOperand(obj, HeapObject::kMapOffset), + Handle<Map>(transitioned_double_map)); } } } @@ -4056,38 +4057,27 @@ void MacroAssembler::LoadTransitionedArrayMapConditional( movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset)); // Check that the function's map is the same as the expected cached map. - movq(scratch, Operand(scratch, - Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); - - int offset = expected_kind * kPointerSize + - FixedArrayBase::kHeaderSize; - cmpq(map_in_out, FieldOperand(scratch, offset)); + int expected_index = + Context::GetContextMapIndexFromElementsKind(expected_kind); + cmpq(map_in_out, Operand(scratch, Context::SlotOffset(expected_index))); j(not_equal, no_map_match); // Use the transitioned cached map. - offset = transitioned_kind * kPointerSize + - FixedArrayBase::kHeaderSize; - movq(map_in_out, FieldOperand(scratch, offset)); + int trans_index = + Context::GetContextMapIndexFromElementsKind(transitioned_kind); + movq(map_in_out, Operand(scratch, Context::SlotOffset(trans_index))); } void MacroAssembler::LoadInitialArrayMap( - Register function_in, Register scratch, - Register map_out, bool can_have_holes) { + Register function_in, Register scratch, Register map_out) { ASSERT(!function_in.is(map_out)); Label done; movq(map_out, FieldOperand(function_in, JSFunction::kPrototypeOrInitialMapOffset)); if (!FLAG_smi_only_arrays) { - ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; - LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, - kind, - map_out, - scratch, - &done); - } else if (can_have_holes) { - LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, - FAST_HOLEY_SMI_ELEMENTS, + LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + FAST_ELEMENTS, map_out, scratch, &done); @@ -4198,7 +4188,7 @@ bool AreAliased(Register r1, Register r2, Register r3, Register r4) { CodePatcher::CodePatcher(byte* address, int size) : address_(address), size_(size), - masm_(NULL, address, size + Assembler::kGap) { + masm_(Isolate::Current(), address, size + Assembler::kGap) { // Create a new macro assembler pointing to the address of the code to patch. // The size is adjusted with kGap on order for the assembler to generate size // bytes of instructions without failing with buffer size constraints. diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h index 1c1cd95e9..66587d531 100644 --- a/deps/v8/src/x64/macro-assembler-x64.h +++ b/deps/v8/src/x64/macro-assembler-x64.h @@ -877,9 +877,9 @@ class MacroAssembler: public Assembler { // Check if a map for a JSObject indicates that the object has fast smi only // elements. Jump to the specified label if it does not. - void CheckFastSmiElements(Register map, - Label* fail, - Label::Distance distance = Label::kFar); + void CheckFastSmiOnlyElements(Register map, + Label* fail, + Label::Distance distance = Label::kFar); // Check to see if maybe_number can be stored as a double in // FastDoubleElements. If it can, store it at the index specified by index in @@ -1141,8 +1141,7 @@ class MacroAssembler: public Assembler { // Load the initial map for new Arrays from a JSFunction. void LoadInitialArrayMap(Register function_in, Register scratch, - Register map_out, - bool can_have_holes); + Register map_out); // Load the global function with the given index. void LoadGlobalFunction(int index, Register function); diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc index cb1e02931..bf232bff9 100644 --- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc +++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -44,23 +44,21 @@ namespace internal { /* * This assembler uses the following register assignment convention - * - rdx : Currently loaded character(s) as ASCII or UC16. Must be loaded - * using LoadCurrentCharacter before using any of the dispatch methods. - * Temporarily stores the index of capture start after a matching pass - * for a global regexp. - * - rdi : Current position in input, as negative offset from end of string. + * - rdx : currently loaded character(s) as ASCII or UC16. Must be loaded using + * LoadCurrentCharacter before using any of the dispatch methods. + * - rdi : current position in input, as negative offset from end of string. * Please notice that this is the byte offset, not the character - * offset! Is always a 32-bit signed (negative) offset, but must be + * offset! Is always a 32-bit signed (negative) offset, but must be * maintained sign-extended to 64 bits, since it is used as index. - * - rsi : End of input (points to byte after last character in input), + * - rsi : end of input (points to byte after last character in input), * so that rsi+rdi points to the current character. - * - rbp : Frame pointer. Used to access arguments, local variables and + * - rbp : frame pointer. Used to access arguments, local variables and * RegExp registers. - * - rsp : Points to tip of C stack. - * - rcx : Points to tip of backtrack stack. The backtrack stack contains - * only 32-bit values. Most are offsets from some base (e.g., character + * - rsp : points to tip of C stack. + * - rcx : points to tip of backtrack stack. The backtrack stack contains + * only 32-bit values. Most are offsets from some base (e.g., character * positions from end of string or code location from Code* pointer). - * - r8 : Code object pointer. Used to convert between absolute and + * - r8 : code object pointer. Used to convert between absolute and * code-object-relative addresses. * * The registers rax, rbx, r9 and r11 are free to use for computations. @@ -74,22 +72,20 @@ namespace internal { * * The stack will have the following content, in some order, indexable from the * frame pointer (see, e.g., kStackHighEnd): - * - Isolate* isolate (address of the current isolate) + * - Isolate* isolate (Address of the current isolate) * - direct_call (if 1, direct call from JavaScript code, if 0 call * through the runtime system) - * - stack_area_base (high end of the memory area to use as + * - stack_area_base (High end of the memory area to use as * backtracking stack) - * - capture array size (may fit multiple sets of matches) * - int* capture_array (int[num_saved_registers_], for output). - * - end of input (address of end of string) - * - start of input (address of first character in string) + * - end of input (Address of end of string) + * - start of input (Address of first character in string) * - start index (character index of start) * - String* input_string (input string) * - return address * - backup of callee save registers (rbx, possibly rsi and rdi). - * - success counter (only useful for global regexp to count matches) * - Offset of location before start of input (effectively character - * position -1). Used to initialize capture registers to a non-position. + * position -1). Used to initialize capture registers to a non-position. * - At start of string (if 1, we are starting at the start of the * string, otherwise 0) * - register 0 rbp[-n] (Only positions must be stored in the first @@ -98,7 +94,7 @@ namespace internal { * * The first num_saved_registers_ registers are initialized to point to * "character -1" in the string (i.e., char_size() bytes before the first - * character of the string). The remaining registers starts out uninitialized. + * character of the string). The remaining registers starts out uninitialized. * * The first seven values must be provided by the calling code by * calling the code's entry address cast to a function pointer with the @@ -748,16 +744,13 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type, void RegExpMacroAssemblerX64::Fail() { - STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero. - if (!global()) { - __ Set(rax, FAILURE); - } + ASSERT(FAILURE == 0); // Return value for failure is zero. + __ Set(rax, 0); __ jmp(&exit_label_); } Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { - Label return_rax; // Finalize code - write the entry point code now we know how many // registers we need. // Entry code: @@ -791,7 +784,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { ASSERT_EQ(kInputStart, -3 * kPointerSize); ASSERT_EQ(kInputEnd, -4 * kPointerSize); ASSERT_EQ(kRegisterOutput, -5 * kPointerSize); - ASSERT_EQ(kNumOutputRegisters, -6 * kPointerSize); + ASSERT_EQ(kStackHighEnd, -6 * kPointerSize); __ push(rdi); __ push(rsi); __ push(rdx); @@ -802,8 +795,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { __ push(rbx); // Callee-save #endif - __ push(Immediate(0)); // Number of successful matches in a global regexp. - __ push(Immediate(0)); // Make room for "input start - 1" constant. + __ push(Immediate(0)); // Make room for "at start" constant. // Check if we have space on the stack for registers. Label stack_limit_hit; @@ -823,14 +815,14 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { // Exit with OutOfMemory exception. There is not enough space on the stack // for our working registers. __ Set(rax, EXCEPTION); - __ jmp(&return_rax); + __ jmp(&exit_label_); __ bind(&stack_limit_hit); __ Move(code_object_pointer(), masm_.CodeObject()); CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp. __ testq(rax, rax); // If returned value is non-zero, we exit with the returned value as result. - __ j(not_zero, &return_rax); + __ j(not_zero, &exit_label_); __ bind(&stack_ok); @@ -855,7 +847,19 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { // position registers. __ movq(Operand(rbp, kInputStartMinusOne), rax); -#ifdef WIN32 + if (num_saved_registers_ > 0) { + // Fill saved registers with initial value = start offset - 1 + // Fill in stack push order, to avoid accessing across an unwritten + // page (a problem on Windows). + __ Set(rcx, kRegisterZero); + Label init_loop; + __ bind(&init_loop); + __ movq(Operand(rbp, rcx, times_1, 0), rax); + __ subq(rcx, Immediate(kPointerSize)); + __ cmpq(rcx, + Immediate(kRegisterZero - num_saved_registers_ * kPointerSize)); + __ j(greater, &init_loop); + } // Ensure that we have written to each stack page, in order. Skipping a page // on Windows can cause segmentation faults. Assuming page size is 4k. const int kPageSize = 4096; @@ -865,50 +869,22 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { i += kRegistersPerPage) { __ movq(register_location(i), rax); // One write every page. } -#endif // WIN32 + // Initialize backtrack stack pointer. + __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd)); // Initialize code object pointer. __ Move(code_object_pointer(), masm_.CodeObject()); - - Label load_char_start_regexp, start_regexp; - // Load newline if index is at start, previous character otherwise. - __ cmpl(Operand(rbp, kStartIndex), Immediate(0)); - __ j(not_equal, &load_char_start_regexp, Label::kNear); + // Load previous char as initial value of current-character. + Label at_start; + __ cmpb(Operand(rbp, kStartIndex), Immediate(0)); + __ j(equal, &at_start); + LoadCurrentCharacterUnchecked(-1, 1); // Load previous char. + __ jmp(&start_label_); + __ bind(&at_start); __ Set(current_character(), '\n'); - __ jmp(&start_regexp, Label::kNear); - - // Global regexp restarts matching here. - __ bind(&load_char_start_regexp); - // Load previous char as initial value of current character register. - LoadCurrentCharacterUnchecked(-1, 1); - __ bind(&start_regexp); - - // Initialize on-stack registers. - if (num_saved_registers_ > 0) { - // Fill saved registers with initial value = start offset - 1 - // Fill in stack push order, to avoid accessing across an unwritten - // page (a problem on Windows). - if (num_saved_registers_ > 8) { - __ Set(rcx, kRegisterZero); - Label init_loop; - __ bind(&init_loop); - __ movq(Operand(rbp, rcx, times_1, 0), rax); - __ subq(rcx, Immediate(kPointerSize)); - __ cmpq(rcx, - Immediate(kRegisterZero - num_saved_registers_ * kPointerSize)); - __ j(greater, &init_loop); - } else { // Unroll the loop. - for (int i = 0; i < num_saved_registers_; i++) { - __ movq(register_location(i), rax); - } - } - } - - // Initialize backtrack stack pointer. - __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd)); - __ jmp(&start_label_); + // Exit code: if (success_label_.is_linked()) { // Save captures when successful. @@ -926,10 +902,6 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { } for (int i = 0; i < num_saved_registers_; i++) { __ movq(rax, register_location(i)); - if (i == 0 && global()) { - // Keep capture start in rdx for the zero-length check later. - __ movq(rdx, rax); - } __ addq(rax, rcx); // Convert to index from start, not end. if (mode_ == UC16) { __ sar(rax, Immediate(1)); // Convert byte index to character index. @@ -937,54 +909,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { __ movl(Operand(rbx, i * kIntSize), rax); } } - - if (global()) { - // Restart matching if the regular expression is flagged as global. - // Increment success counter. - __ incq(Operand(rbp, kSuccessfulCaptures)); - // Capture results have been stored, so the number of remaining global - // output registers is reduced by the number of stored captures. - __ movsxlq(rcx, Operand(rbp, kNumOutputRegisters)); - __ subq(rcx, Immediate(num_saved_registers_)); - // Check whether we have enough room for another set of capture results. - __ cmpq(rcx, Immediate(num_saved_registers_)); - __ j(less, &exit_label_); - - __ movq(Operand(rbp, kNumOutputRegisters), rcx); - // Advance the location for output. - __ addq(Operand(rbp, kRegisterOutput), - Immediate(num_saved_registers_ * kIntSize)); - - // Prepare rax to initialize registers with its value in the next run. - __ movq(rax, Operand(rbp, kInputStartMinusOne)); - - // Special case for zero-length matches. - // rdx: capture start index - __ cmpq(rdi, rdx); - // Not a zero-length match, restart. - __ j(not_equal, &load_char_start_regexp); - // rdi (offset from the end) is zero if we already reached the end. - __ testq(rdi, rdi); - __ j(zero, &exit_label_, Label::kNear); - // Advance current position after a zero-length match. - if (mode_ == UC16) { - __ addq(rdi, Immediate(2)); - } else { - __ incq(rdi); - } - __ jmp(&load_char_start_regexp); - } else { - __ movq(rax, Immediate(SUCCESS)); - } + __ Set(rax, SUCCESS); } + // Exit and return rax __ bind(&exit_label_); - if (global()) { - // Return the number of successful captures. - __ movq(rax, Operand(rbp, kSuccessfulCaptures)); - } - __ bind(&return_rax); #ifdef _WIN64 // Restore callee save registers. __ lea(rsp, Operand(rbp, kLastCalleeSaveRegister)); @@ -1021,7 +951,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { __ testq(rax, rax); // If returning non-zero, we should end execution with the given // result as return value. - __ j(not_zero, &return_rax); + __ j(not_zero, &exit_label_); // Restore registers. __ Move(code_object_pointer(), masm_.CodeObject()); @@ -1082,7 +1012,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { __ bind(&exit_with_exception); // Exit with Result EXCEPTION(-1) to signal thrown exception. __ Set(rax, EXCEPTION); - __ jmp(&return_rax); + __ jmp(&exit_label_); } FixupCodeRelativePositions(); @@ -1205,9 +1135,8 @@ void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) { } -bool RegExpMacroAssemblerX64::Succeed() { +void RegExpMacroAssemblerX64::Succeed() { __ jmp(&success_label_); - return global(); } diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h index 31fc8efd9..cd24b6098 100644 --- a/deps/v8/src/x64/regexp-macro-assembler-x64.h +++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h @@ -1,4 +1,4 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -109,7 +109,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { virtual void ReadStackPointerFromRegister(int reg); virtual void SetCurrentPositionFromEnd(int by); virtual void SetRegister(int register_index, int to); - virtual bool Succeed(); + virtual void Succeed(); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void ClearRegisters(int reg_from, int reg_to); virtual void WriteStackPointerToRegister(int reg); @@ -154,12 +154,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { static const int kInputStart = kStartIndex + kPointerSize; static const int kInputEnd = kInputStart + kPointerSize; static const int kRegisterOutput = kInputEnd + kPointerSize; - // For the case of global regular expression, we have room to store at least - // one set of capture results. For the case of non-global regexp, we ignore - // this value. NumOutputRegisters is passed as 32-bit value. The upper - // 32 bit of this 64-bit stack slot may contain garbage. - static const int kNumOutputRegisters = kRegisterOutput + kPointerSize; - static const int kStackHighEnd = kNumOutputRegisters + kPointerSize; + static const int kStackHighEnd = kRegisterOutput + kPointerSize; // DirectCall is passed as 32 bit int (values 0 or 1). static const int kDirectCall = kStackHighEnd + kPointerSize; static const int kIsolate = kDirectCall + kPointerSize; @@ -172,12 +167,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { static const int kInputStart = kStartIndex - kPointerSize; static const int kInputEnd = kInputStart - kPointerSize; static const int kRegisterOutput = kInputEnd - kPointerSize; - // For the case of global regular expression, we have room to store at least - // one set of capture results. For the case of non-global regexp, we ignore - // this value. - static const int kNumOutputRegisters = kRegisterOutput - kPointerSize; - static const int kStackHighEnd = kFrameAlign; - static const int kDirectCall = kStackHighEnd + kPointerSize; + static const int kStackHighEnd = kRegisterOutput - kPointerSize; + static const int kDirectCall = kFrameAlign; static const int kIsolate = kDirectCall + kPointerSize; #endif @@ -192,14 +183,14 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { // AMD64 Calling Convention has only one callee-save register that // we use. We push this after the frame pointer (and after the // parameters). - static const int kBackup_rbx = kNumOutputRegisters - kPointerSize; + static const int kBackup_rbx = kStackHighEnd - kPointerSize; static const int kLastCalleeSaveRegister = kBackup_rbx; #endif - static const int kSuccessfulCaptures = kLastCalleeSaveRegister - kPointerSize; // When adding local variables remember to push space for them in // the frame in GetCode. - static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize; + static const int kInputStartMinusOne = + kLastCalleeSaveRegister - kPointerSize; // First register address. Following registers are below it on the stack. static const int kRegisterZero = kInputStartMinusOne - kPointerSize; diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h index 8aba70181..df8423a65 100644 --- a/deps/v8/src/x64/simulator-x64.h +++ b/deps/v8/src/x64/simulator-x64.h @@ -1,4 +1,4 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -40,12 +40,12 @@ namespace internal { (entry(p0, p1, p2, p3, p4)) typedef int (*regexp_matcher)(String*, int, const byte*, - const byte*, int*, int, Address, int, Isolate*); + const byte*, int*, Address, int, Isolate*); // Call the generated regexp code directly. The code at the entry address should // expect eight int/pointer sized arguments and return an int. -#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ - (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8)) +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ + (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7)) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ (reinterpret_cast<TryCatch*>(try_catch_address)) diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc index 1b8ed3848..5721e9b37 100644 --- a/deps/v8/src/x64/stub-cache-x64.cc +++ b/deps/v8/src/x64/stub-cache-x64.cc @@ -1434,32 +1434,17 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ jmp(&fast_object); // In case of fast smi-only, convert to fast object, otherwise bail out. __ bind(¬_fast_object); - __ CheckFastSmiElements(rbx, &call_builtin); + __ CheckFastSmiOnlyElements(rbx, &call_builtin); // rdx: receiver // rbx: map - - Label try_holey_map; - __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + __ movq(r9, rdi); // Backup rdi as it is going to be trashed. + __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS, rbx, rdi, - &try_holey_map); - - ElementsTransitionGenerator:: - GenerateMapChangeElementsTransition(masm()); - // Restore edi. - __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset)); - __ jmp(&fast_object); - - __ bind(&try_holey_map); - __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS, - FAST_HOLEY_ELEMENTS, - rbx, - rdi, &call_builtin); - ElementsTransitionGenerator:: - GenerateMapChangeElementsTransition(masm()); - __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset)); + ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm()); + __ movq(rdi, r9); __ bind(&fast_object); } else { __ CheckFastObjectElements(rbx, &call_builtin); @@ -3384,11 +3369,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ movsd(Operand(rbx, rdi, times_8, 0), xmm0); break; case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3453,11 +3435,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3608,7 +3587,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // Check that the key is a smi or a heap number convertible to a smi. GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic); - if (IsFastSmiElementsKind(elements_kind)) { + if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { __ JumpIfNotSmi(rax, &transition_elements_kind); } @@ -3632,13 +3611,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ j(not_equal, &miss_force_generic); __ bind(&finish_store); - if (IsFastSmiElementsKind(elements_kind)) { + if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { __ SmiToInteger32(rcx, rcx); __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize), rax); } else { // Do the store and update the write barrier. - ASSERT(IsFastObjectElementsKind(elements_kind)); + ASSERT(elements_kind == FAST_ELEMENTS); __ SmiToInteger32(rcx, rcx); __ lea(rcx, FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize)); |