diff options
Diffstat (limited to 'deps/v8/src/arm')
-rw-r--r-- | deps/v8/src/arm/builtins-arm.cc | 9 | ||||
-rw-r--r-- | deps/v8/src/arm/code-stubs-arm.cc | 146 | ||||
-rw-r--r-- | deps/v8/src/arm/codegen-arm.cc | 4 | ||||
-rw-r--r-- | deps/v8/src/arm/debug-arm.cc | 4 | ||||
-rw-r--r-- | deps/v8/src/arm/full-codegen-arm.cc | 293 | ||||
-rw-r--r-- | deps/v8/src/arm/ic-arm.cc | 61 | ||||
-rw-r--r-- | deps/v8/src/arm/lithium-arm.cc | 210 | ||||
-rw-r--r-- | deps/v8/src/arm/lithium-arm.h | 116 | ||||
-rw-r--r-- | deps/v8/src/arm/lithium-codegen-arm.cc | 548 | ||||
-rw-r--r-- | deps/v8/src/arm/lithium-codegen-arm.h | 41 | ||||
-rw-r--r-- | deps/v8/src/arm/lithium-gap-resolver-arm.cc | 4 | ||||
-rw-r--r-- | deps/v8/src/arm/macro-assembler-arm.cc | 127 | ||||
-rw-r--r-- | deps/v8/src/arm/macro-assembler-arm.h | 27 | ||||
-rw-r--r-- | deps/v8/src/arm/regexp-macro-assembler-arm.cc | 244 | ||||
-rw-r--r-- | deps/v8/src/arm/regexp-macro-assembler-arm.h | 24 | ||||
-rw-r--r-- | deps/v8/src/arm/simulator-arm.h | 12 | ||||
-rw-r--r-- | deps/v8/src/arm/stub-cache-arm.cc | 342 |
17 files changed, 1577 insertions, 635 deletions
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index c99e778a7f..578bd810d4 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -114,7 +114,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, Label* gc_required) { const int initial_capacity = JSArray::kPreallocatedArrayElements; STATIC_ASSERT(initial_capacity >= 0); - __ LoadInitialArrayMap(array_function, scratch2, scratch1); + __ LoadInitialArrayMap(array_function, scratch2, scratch1, false); // Allocate the JSArray object together with space for a fixed array with the // requested elements. @@ -208,7 +208,8 @@ static void AllocateJSArray(MacroAssembler* masm, bool fill_with_hole, Label* gc_required) { // Load the initial map from the array function. - __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage); + __ LoadInitialArrayMap(array_function, scratch2, + elements_array_storage, fill_with_hole); if (FLAG_debug_code) { // Assert that array size is not zero. __ tst(array_size, array_size); @@ -440,10 +441,10 @@ static void ArrayNativeCode(MacroAssembler* masm, __ b(call_generic_code); __ bind(¬_double); - // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS. + // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS. // r3: JSArray __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, r2, r9, diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index f772db9be2..761123f639 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -3737,9 +3737,13 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // Compute the return address in lr to return to after the jump below. Pc is // already at '+ 8' from the current instruction but return is after three // instructions so add another 4 to pc to get the return address. - masm->add(lr, pc, Operand(4)); - __ str(lr, MemOperand(sp, 0)); - masm->Jump(r5); + { + // Prevent literal pool emission before return address. + Assembler::BlockConstPoolScope block_const_pool(masm); + masm->add(lr, pc, Operand(4)); + __ str(lr, MemOperand(sp, 0)); + masm->Jump(r5); + } if (always_allocate) { // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 @@ -3956,14 +3960,21 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Jump to a faked try block that does the invoke, with a faked catch // block that sets the pending exception. __ jmp(&invoke); - __ bind(&handler_entry); - handler_offset_ = handler_entry.pos(); - // Caught exception: Store result (exception) in the pending exception - // field in the JSEnv and return a failure sentinel. Coming in here the - // fp will be invalid because the PushTryHandler below sets it to 0 to - // signal the existence of the JSEntry frame. - __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); + + // Block literal pool emission whilst taking the position of the handler + // entry. This avoids making the assumption that literal pools are always + // emitted after an instruction is emitted, rather than before. + { + Assembler::BlockConstPoolScope block_const_pool(masm); + __ bind(&handler_entry); + handler_offset_ = handler_entry.pos(); + // Caught exception: Store result (exception) in the pending exception + // field in the JSEnv and return a failure sentinel. Coming in here the + // fp will be invalid because the PushTryHandler below sets it to 0 to + // signal the existence of the JSEntry frame. + __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, + isolate))); + } __ str(r0, MemOperand(ip)); __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); __ b(&exit); @@ -4006,9 +4017,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Branch and link to JSEntryTrampoline. We don't use the double underscore // macro for the add instruction because we don't want the coverage tool - // inserting instructions here after we read the pc. - __ mov(lr, Operand(pc)); - masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); + // inserting instructions here after we read the pc. We block literal pool + // emission for the same reason. + { + Assembler::BlockConstPoolScope block_const_pool(masm); + __ mov(lr, Operand(pc)); + masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); + } // Unlink this frame from the handler chain. __ PopTryHandler(); @@ -4824,27 +4839,32 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); // Isolates: note we add an additional parameter here (isolate pointer). - const int kRegExpExecuteArguments = 8; + const int kRegExpExecuteArguments = 9; const int kParameterRegisters = 4; __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); // Stack pointer now points to cell where return address is to be written. // Arguments are before that on the stack or in registers. - // Argument 8 (sp[16]): Pass current isolate address. + // Argument 9 (sp[20]): Pass current isolate address. __ mov(r0, Operand(ExternalReference::isolate_address())); - __ str(r0, MemOperand(sp, 4 * kPointerSize)); + __ str(r0, MemOperand(sp, 5 * kPointerSize)); - // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript. + // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript. __ mov(r0, Operand(1)); - __ str(r0, MemOperand(sp, 3 * kPointerSize)); + __ str(r0, MemOperand(sp, 4 * kPointerSize)); - // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area. + // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area. __ mov(r0, Operand(address_of_regexp_stack_memory_address)); __ ldr(r0, MemOperand(r0, 0)); __ mov(r2, Operand(address_of_regexp_stack_memory_size)); __ ldr(r2, MemOperand(r2, 0)); __ add(r0, r0, Operand(r2)); + __ str(r0, MemOperand(sp, 3 * kPointerSize)); + + // Argument 6: Set the number of capture registers to zero to force global + // regexps to behave as non-global. This does not affect non-global regexps. + __ mov(r0, Operand(0)); __ str(r0, MemOperand(sp, 2 * kPointerSize)); // Argument 5 (sp[4]): static offsets vector buffer. @@ -4893,7 +4913,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check the result. Label success; - __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS)); + __ cmp(r0, Operand(1)); + // We expect exactly one result since we force the called regexp to behave + // as non-global. __ b(eq, &success); Label failure; __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); @@ -5169,9 +5191,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ CompareRoot(r4, Heap::kTheHoleValueRootIndex); __ b(ne, &call); // Patch the receiver on the stack with the global receiver object. - __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset)); - __ str(r2, MemOperand(sp, argc_ * kPointerSize)); + __ ldr(r3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset)); + __ str(r3, MemOperand(sp, argc_ * kPointerSize)); __ bind(&call); } @@ -5179,9 +5201,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // r1: pushed function (to be verified) __ JumpIfSmi(r1, &non_function); // Get the map of the function object. - __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); + __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); __ b(ne, &slow); + if (RecordCallTarget()) { + GenerateRecordCallTarget(masm); + } + // Fast-case: Invoke the function now. // r1: pushed function ParameterCount actual(argc_); @@ -5205,8 +5231,17 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // Slow-case: Non-function called. __ bind(&slow); + if (RecordCallTarget()) { + // If there is a call target cache, mark it megamorphic in the + // non-function case. MegamorphicSentinel is an immortal immovable + // object (undefined) so no write barrier is needed. + ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), + masm->isolate()->heap()->undefined_value()); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); + } // Check for function proxy. - __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE)); + __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); __ b(ne, &non_function); __ push(r1); // put proxy as additional argument __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE)); @@ -5873,36 +5908,12 @@ void SubStringStub::Generate(MacroAssembler* masm) { // r2: result string length __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset)); __ cmp(r2, Operand(r4, ASR, 1)); + // Return original string. __ b(eq, &return_r0); + // Longer than original string's length or negative: unsafe arguments. + __ b(hi, &runtime); + // Shorter than original string's length: an actual substring. - Label result_longer_than_two; - // Check for special case of two character ASCII string, in which case - // we do a lookup in the symbol table first. - __ cmp(r2, Operand(2)); - __ b(gt, &result_longer_than_two); - __ b(lt, &runtime); - - __ JumpIfInstanceTypeIsNotSequentialAscii(r1, r1, &runtime); - - // Get the two characters forming the sub string. - __ add(r0, r0, Operand(r3)); - __ ldrb(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); - __ ldrb(r4, FieldMemOperand(r0, SeqAsciiString::kHeaderSize + 1)); - - // Try to lookup two character string in symbol table. - Label make_two_character_string; - StringHelper::GenerateTwoCharacterSymbolTableProbe( - masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string); - __ jmp(&return_r0); - - // r2: result string length. - // r3: two characters combined into halfword in little endian byte order. - __ bind(&make_two_character_string); - __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime); - __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); - __ jmp(&return_r0); - - __ bind(&result_longer_than_two); // Deal with different string types: update the index if necessary // and put the underlying string into r5. // r0: original string @@ -6816,6 +6827,10 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) { __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()), RelocInfo::CODE_TARGET)); + + // Prevent literal pool emission during calculation of return address. + Assembler::BlockConstPoolScope block_const_pool(masm); + // Push return address (accessible to GC through exit frame pc). // Note that using pc with str is deprecated. Label start; @@ -7106,8 +7121,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { // KeyedStoreStubCompiler::GenerateStoreFastElement. { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET }, { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET }, - // ElementsTransitionGenerator::GenerateSmiOnlyToObject - // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble + // ElementsTransitionGenerator::GenerateMapChangeElementTransition + // and ElementsTransitionGenerator::GenerateSmiToDouble // and ElementsTransitionGenerator::GenerateDoubleToObject { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET }, @@ -7176,8 +7191,13 @@ void RecordWriteStub::Generate(MacroAssembler* masm) { // forth between a compare instructions (a nop in this position) and the // real branch when we start and stop incremental heap marking. // See RecordWriteStub::Patch for details. - __ b(&skip_to_incremental_noncompacting); - __ b(&skip_to_incremental_compacting); + { + // Block literal pool emission, as the position of these two instructions + // is assumed by the patching code. + Assembler::BlockConstPoolScope block_const_pool(masm); + __ b(&skip_to_incremental_noncompacting); + __ b(&skip_to_incremental_compacting); + } if (remembered_set_action_ == EMIT_REMEMBERED_SET) { __ RememberedSetHelper(object_, @@ -7370,9 +7390,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { Label fast_elements; __ CheckFastElements(r2, r5, &double_elements); - // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS + // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS __ JumpIfSmi(r0, &smi_element); - __ CheckFastSmiOnlyElements(r2, r5, &fast_elements); + __ CheckFastSmiElements(r2, r5, &fast_elements); // Store into the array literal requires a elements transition. Call into // the runtime. @@ -7384,7 +7404,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { __ Push(r5, r4); __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); - // Array literal has ElementsKind of FAST_ELEMENTS and value is an object. + // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. __ bind(&fast_elements); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); @@ -7395,8 +7415,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ Ret(); - // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or - // FAST_ELEMENTS, and value is Smi. + // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, + // and value is Smi. __ bind(&smi_element); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index befd8f2de7..e00afb9035 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -73,7 +73,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { // ------------------------------------------------------------------------- // Code generators -void ElementsTransitionGenerator::GenerateSmiOnlyToObject( +void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r0 : value @@ -96,7 +96,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject( } -void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( +void ElementsTransitionGenerator::GenerateSmiToDouble( MacroAssembler* masm, Label* fail) { // ----------- S t a t e ------------- // -- r0 : value diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index 96139a2597..3e7a1e9d0e 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -125,6 +125,8 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() { Assembler::kDebugBreakSlotInstructions); } +const bool Debug::FramePaddingLayout::kIsSupported = false; + #define __ ACCESS_MASM(masm) diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 69b12ce5ee..ff7c3c139e 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -73,9 +73,6 @@ class JumpPatchSite BASE_EMBEDDED { Assembler::BlockConstPoolScope block_const_pool(masm_); __ bind(&patch_site_); __ cmp(reg, Operand(reg)); - // Don't use b(al, ...) as that might emit the constant pool right after the - // branch. After patching when the branch is no longer unconditional - // execution can continue into the constant pool. __ b(eq, target); // Always taken before patched. } @@ -90,6 +87,8 @@ class JumpPatchSite BASE_EMBEDDED { } void EmitPatchInfo() { + // Block literal pool emission whilst recording patch site information. + Assembler::BlockConstPoolScope block_const_pool(masm_); if (patch_site_.is_bound()) { int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_); Register reg; @@ -112,13 +111,6 @@ class JumpPatchSite BASE_EMBEDDED { }; -// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove. -int FullCodeGenerator::self_optimization_header_size() { - UNREACHABLE(); - return 24; -} - - // Generate code for a JS function. On entry to the function the receiver // and arguments have been pushed on the stack left to right. The actual // argument count matches the formal parameter count expected by the @@ -275,11 +267,11 @@ void FullCodeGenerator::Generate() { // For named function expressions, declare the function name as a // constant. if (scope()->is_function_scope() && scope()->function() != NULL) { - VariableProxy* proxy = scope()->function(); - ASSERT(proxy->var()->mode() == CONST || - proxy->var()->mode() == CONST_HARMONY); - ASSERT(proxy->var()->location() != Variable::UNALLOCATED); - EmitDeclaration(proxy, proxy->var()->mode(), NULL); + VariableDeclaration* function = scope()->function(); + ASSERT(function->proxy()->var()->mode() == CONST || + function->proxy()->var()->mode() == CONST_HARMONY); + ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED); + VisitVariableDeclaration(function); } VisitDeclarations(scope()->declarations()); } @@ -351,6 +343,8 @@ static const int kBackEdgeDistanceDivisor = 142; void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, Label* back_edge_target) { Comment cmnt(masm_, "[ Stack check"); + // Block literal pools whilst emitting stack check code. + Assembler::BlockConstPoolScope block_const_pool(masm_); Label ok; if (FLAG_count_based_interrupts) { @@ -789,62 +783,52 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr, } -void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, - VariableMode mode, - FunctionLiteral* function) { +void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) { + // The variable in the declaration always resides in the current function + // context. + ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); + if (FLAG_debug_code) { + // Check that we're not inside a with or catch context. + __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset)); + __ CompareRoot(r1, Heap::kWithContextMapRootIndex); + __ Check(ne, "Declaration in with context."); + __ CompareRoot(r1, Heap::kCatchContextMapRootIndex); + __ Check(ne, "Declaration in catch context."); + } +} + + +void FullCodeGenerator::VisitVariableDeclaration( + VariableDeclaration* declaration) { // If it was not possible to allocate the variable at compile time, we // need to "declare" it at runtime to make sure it actually exists in the // local context. + VariableProxy* proxy = declaration->proxy(); + VariableMode mode = declaration->mode(); Variable* variable = proxy->var(); - bool binding_needs_init = (function == NULL) && - (mode == CONST || mode == CONST_HARMONY || mode == LET); + bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET; switch (variable->location()) { case Variable::UNALLOCATED: - ++global_count_; + globals_->Add(variable->name(), zone()); + globals_->Add(variable->binding_needs_init() + ? isolate()->factory()->the_hole_value() + : isolate()->factory()->undefined_value(), + zone()); break; case Variable::PARAMETER: case Variable::LOCAL: - if (function != NULL) { - Comment cmnt(masm_, "[ Declaration"); - VisitForAccumulatorValue(function); - __ str(result_register(), StackOperand(variable)); - } else if (binding_needs_init) { - Comment cmnt(masm_, "[ Declaration"); + if (hole_init) { + Comment cmnt(masm_, "[ VariableDeclaration"); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ str(ip, StackOperand(variable)); } break; case Variable::CONTEXT: - // The variable in the decl always resides in the current function - // context. - ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); - if (FLAG_debug_code) { - // Check that we're not inside a with or catch context. - __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset)); - __ CompareRoot(r1, Heap::kWithContextMapRootIndex); - __ Check(ne, "Declaration in with context."); - __ CompareRoot(r1, Heap::kCatchContextMapRootIndex); - __ Check(ne, "Declaration in catch context."); - } - if (function != NULL) { - Comment cmnt(masm_, "[ Declaration"); - VisitForAccumulatorValue(function); - __ str(result_register(), ContextOperand(cp, variable->index())); - int offset = Context::SlotOffset(variable->index()); - // We know that we have written a function, which is not a smi. - __ RecordWriteContextSlot(cp, - offset, - result_register(), - r2, - kLRHasBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - PrepareForBailoutForId(proxy->id(), NO_REGISTERS); - } else if (binding_needs_init) { - Comment cmnt(masm_, "[ Declaration"); + if (hole_init) { + Comment cmnt(masm_, "[ VariableDeclaration"); + EmitDebugCheckDeclarationContext(variable); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ str(ip, ContextOperand(cp, variable->index())); // No write barrier since the_hole_value is in old space. @@ -853,13 +837,11 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, break; case Variable::LOOKUP: { - Comment cmnt(masm_, "[ Declaration"); + Comment cmnt(masm_, "[ VariableDeclaration"); __ mov(r2, Operand(variable->name())); // Declaration nodes are always introduced in one of four modes. - ASSERT(mode == VAR || - mode == CONST || - mode == CONST_HARMONY || - mode == LET); + ASSERT(mode == VAR || mode == LET || + mode == CONST || mode == CONST_HARMONY); PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY) ? READ_ONLY : NONE; __ mov(r1, Operand(Smi::FromInt(attr))); @@ -867,11 +849,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, // Note: For variables we must not push an initial value (such as // 'undefined') because we may have a (legal) redeclaration and we // must not destroy the current value. - if (function != NULL) { - __ Push(cp, r2, r1); - // Push initial value for function declaration. - VisitForStackValue(function); - } else if (binding_needs_init) { + if (hole_init) { __ LoadRoot(r0, Heap::kTheHoleValueRootIndex); __ Push(cp, r2, r1, r0); } else { @@ -885,6 +863,122 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, } +void FullCodeGenerator::VisitFunctionDeclaration( + FunctionDeclaration* declaration) { + VariableProxy* proxy = declaration->proxy(); + Variable* variable = proxy->var(); + switch (variable->location()) { + case Variable::UNALLOCATED: { + globals_->Add(variable->name(), zone()); + Handle<SharedFunctionInfo> function = + Compiler::BuildFunctionInfo(declaration->fun(), script()); + // Check for stack-overflow exception. + if (function.is_null()) return SetStackOverflow(); + globals_->Add(function, zone()); + break; + } + + case Variable::PARAMETER: + case Variable::LOCAL: { + Comment cmnt(masm_, "[ FunctionDeclaration"); + VisitForAccumulatorValue(declaration->fun()); + __ str(result_register(), StackOperand(variable)); + break; + } + + case Variable::CONTEXT: { + Comment cmnt(masm_, "[ FunctionDeclaration"); + EmitDebugCheckDeclarationContext(variable); + VisitForAccumulatorValue(declaration->fun()); + __ str(result_register(), ContextOperand(cp, variable->index())); + int offset = Context::SlotOffset(variable->index()); + // We know that we have written a function, which is not a smi. + __ RecordWriteContextSlot(cp, + offset, + result_register(), + r2, + kLRHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + PrepareForBailoutForId(proxy->id(), NO_REGISTERS); + break; + } + + case Variable::LOOKUP: { + Comment cmnt(masm_, "[ FunctionDeclaration"); + __ mov(r2, Operand(variable->name())); + __ mov(r1, Operand(Smi::FromInt(NONE))); + __ Push(cp, r2, r1); + // Push initial value for function declaration. + VisitForStackValue(declaration->fun()); + __ CallRuntime(Runtime::kDeclareContextSlot, 4); + break; + } + } +} + + +void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { + VariableProxy* proxy = declaration->proxy(); + Variable* variable = proxy->var(); + Handle<JSModule> instance = declaration->module()->interface()->Instance(); + ASSERT(!instance.is_null()); + + switch (variable->location()) { + case Variable::UNALLOCATED: { + Comment cmnt(masm_, "[ ModuleDeclaration"); + globals_->Add(variable->name(), zone()); + globals_->Add(instance, zone()); + Visit(declaration->module()); + break; + } + + case Variable::CONTEXT: { + Comment cmnt(masm_, "[ ModuleDeclaration"); + EmitDebugCheckDeclarationContext(variable); + __ mov(r1, Operand(instance)); + __ str(r1, ContextOperand(cp, variable->index())); + Visit(declaration->module()); + break; + } + + case Variable::PARAMETER: + case Variable::LOCAL: + case Variable::LOOKUP: + UNREACHABLE(); + } +} + + +void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) { + VariableProxy* proxy = declaration->proxy(); + Variable* variable = proxy->var(); + switch (variable->location()) { + case Variable::UNALLOCATED: + // TODO(rossberg) + break; + + case Variable::CONTEXT: { + Comment cmnt(masm_, "[ ImportDeclaration"); + EmitDebugCheckDeclarationContext(variable); + // TODO(rossberg) + break; + } + + case Variable::PARAMETER: + case Variable::LOCAL: + case Variable::LOOKUP: + UNREACHABLE(); + } +} + + +void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) { + // TODO(rossberg) +} + + void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { // Call the runtime to declare the globals. // The context is the first argument. @@ -1511,7 +1605,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { // Mark all computed expressions that are bound to a key that // is shadowed by a later occurrence of the same key. For the // marked expressions, no store code is emitted. - expr->CalculateEmitStore(); + expr->CalculateEmitStore(zone()); AccessorTable accessor_table(isolate()->zone()); for (int i = 0; i < expr->properties()->length(); i++) { @@ -1609,7 +1703,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { ASSERT_EQ(2, constant_elements->length()); ElementsKind constant_elements_kind = static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value()); - bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS; + bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind); Handle<FixedArrayBase> constant_elements_values( FixedArrayBase::cast(constant_elements->get(1))); @@ -1630,8 +1724,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); } else { - ASSERT(constant_elements_kind == FAST_ELEMENTS || - constant_elements_kind == FAST_SMI_ONLY_ELEMENTS || + ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || FLAG_smi_only_arrays); FastCloneShallowArrayStub::Mode mode = has_fast_elements ? FastCloneShallowArrayStub::CLONE_ELEMENTS @@ -1659,7 +1752,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } VisitForAccumulatorValue(subexpr); - if (constant_elements_kind == FAST_ELEMENTS) { + if (IsFastObjectElementsKind(constant_elements_kind)) { int offset = FixedArray::kHeaderSize + (i * kPointerSize); __ ldr(r6, MemOperand(sp)); // Copy of array literal. __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset)); @@ -2271,6 +2364,18 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) { } // Record source position for debugger. SetSourcePosition(expr->position()); + + // Record call targets in unoptimized code, but not in the snapshot. + if (!Serializer::enabled()) { + flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET); + Handle<Object> uninitialized = + TypeFeedbackCells::UninitializedSentinel(isolate()); + Handle<JSGlobalPropertyCell> cell = + isolate()->factory()->NewJSGlobalPropertyCell(uninitialized); + RecordTypeFeedbackCell(expr->id(), cell); + __ mov(r2, Operand(cell)); + } + CallFunctionStub stub(arg_count, flags); __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); __ CallStub(&stub); @@ -3564,7 +3669,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset)); - __ add(string_length, string_length, Operand(scratch1)); + __ add(string_length, string_length, Operand(scratch1), SetCC); __ b(vs, &bailout); __ cmp(element, elements_end); __ b(lt, &loop); @@ -3601,7 +3706,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ b(ne, &bailout); __ tst(scratch2, Operand(0x80000000)); __ b(ne, &bailout); - __ add(string_length, string_length, Operand(scratch2)); + __ add(string_length, string_length, Operand(scratch2), SetCC); __ b(vs, &bailout); __ SmiUntag(string_length); @@ -4357,7 +4462,8 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) { void FullCodeGenerator::PushFunctionArgumentForContextAllocation() { Scope* declaration_scope = scope()->DeclarationScope(); - if (declaration_scope->is_global_scope()) { + if (declaration_scope->is_global_scope() || + declaration_scope->is_module_scope()) { // Contexts nested in the global context have a canonical empty function // as their closure, not the anonymous closure containing the global // code. Pass a smi sentinel and let the runtime look up the empty @@ -4388,14 +4494,55 @@ void FullCodeGenerator::EnterFinallyBlock() { ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); STATIC_ASSERT(kSmiTag == 0); __ add(r1, r1, Operand(r1)); // Convert to smi. + + // Store result register while executing finally block. + __ push(r1); + + // Store pending message while executing finally block. + ExternalReference pending_message_obj = + ExternalReference::address_of_pending_message_obj(isolate()); + __ mov(ip, Operand(pending_message_obj)); + __ ldr(r1, MemOperand(ip)); + __ push(r1); + + ExternalReference has_pending_message = + ExternalReference::address_of_has_pending_message(isolate()); + __ mov(ip, Operand(has_pending_message)); + __ ldr(r1, MemOperand(ip)); + __ push(r1); + + ExternalReference pending_message_script = + ExternalReference::address_of_pending_message_script(isolate()); + __ mov(ip, Operand(pending_message_script)); + __ ldr(r1, MemOperand(ip)); __ push(r1); } void FullCodeGenerator::ExitFinallyBlock() { ASSERT(!result_register().is(r1)); + // Restore pending message from stack. + __ pop(r1); + ExternalReference pending_message_script = + ExternalReference::address_of_pending_message_script(isolate()); + __ mov(ip, Operand(pending_message_script)); + __ str(r1, MemOperand(ip)); + + __ pop(r1); + ExternalReference has_pending_message = + ExternalReference::address_of_has_pending_message(isolate()); + __ mov(ip, Operand(has_pending_message)); + __ str(r1, MemOperand(ip)); + + __ pop(r1); + ExternalReference pending_message_obj = + ExternalReference::address_of_pending_message_obj(isolate()); + __ mov(ip, Operand(pending_message_obj)); + __ str(r1, MemOperand(ip)); + // Restore result register from stack. __ pop(r1); + // Uncook return address and return. __ pop(result_register()); ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index c88c257092..fd93480986 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -1249,7 +1249,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) { // Must return the modified receiver in r0. if (!FLAG_trace_elements_transitions) { Label fail; - ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); __ mov(r0, r2); __ Ret(); __ bind(&fail); @@ -1462,27 +1462,27 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex); __ b(ne, &non_double_value); - // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS -> + // Value is a double. Transition FAST_SMI_ELEMENTS -> // FAST_DOUBLE_ELEMENTS and complete the store. - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r4, &slow); ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 - ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); __ bind(&non_double_value); - // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, receiver_map, r4, &slow); ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 - ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); @@ -1690,12 +1690,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { // Activate inlined smi code. if (previous_state == UNINITIALIZED) { - PatchInlinedSmiCode(address()); + PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); } } -void PatchInlinedSmiCode(Address address) { +void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { Address cmp_instruction_address = address + Assembler::kCallTargetAddressOffset; @@ -1729,34 +1729,31 @@ void PatchInlinedSmiCode(Address address) { Instr instr_at_patch = Assembler::instr_at(patch_address); Instr branch_instr = Assembler::instr_at(patch_address + Instruction::kInstrSize); - ASSERT(Assembler::IsCmpRegister(instr_at_patch)); - ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(), - Assembler::GetRm(instr_at_patch).code()); + // This is patching a conditional "jump if not smi/jump if smi" site. + // Enabling by changing from + // cmp rx, rx + // b eq/ne, <target> + // to + // tst rx, #kSmiTagMask + // b ne/eq, <target> + // and vice-versa to be disabled again. + CodePatcher patcher(patch_address, 2); + Register reg = Assembler::GetRn(instr_at_patch); + if (check == ENABLE_INLINED_SMI_CHECK) { + ASSERT(Assembler::IsCmpRegister(instr_at_patch)); + ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(), + Assembler::GetRm(instr_at_patch).code()); + patcher.masm()->tst(reg, Operand(kSmiTagMask)); + } else { + ASSERT(check == DISABLE_INLINED_SMI_CHECK); + ASSERT(Assembler::IsTstImmediate(instr_at_patch)); + patcher.masm()->cmp(reg, reg); + } ASSERT(Assembler::IsBranch(branch_instr)); if (Assembler::GetCondition(branch_instr) == eq) { - // This is patching a "jump if not smi" site to be active. - // Changing - // cmp rx, rx - // b eq, <target> - // to - // tst rx, #kSmiTagMask - // b ne, <target> - CodePatcher patcher(patch_address, 2); - Register reg = Assembler::GetRn(instr_at_patch); - patcher.masm()->tst(reg, Operand(kSmiTagMask)); patcher.EmitCondition(ne); } else { ASSERT(Assembler::GetCondition(branch_instr) == ne); - // This is patching a "jump if smi" site to be active. - // Changing - // cmp rx, rx - // b ne, <target> - // to - // tst rx, #kSmiTagMask - // b eq, <target> - CodePatcher patcher(patch_address, 2); - Register reg = Assembler::GetRn(instr_at_patch); - patcher.masm()->tst(reg, Operand(kSmiTagMask)); patcher.EmitCondition(eq); } } diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index c3dd1cbaa2..283862c787 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -108,22 +108,17 @@ void LInstruction::PrintTo(StringStream* stream) { } -template<int R, int I, int T> -void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) { +void LInstruction::PrintDataTo(StringStream* stream) { stream->Add("= "); - for (int i = 0; i < inputs_.length(); i++) { + for (int i = 0; i < InputCount(); i++) { if (i > 0) stream->Add(" "); - inputs_[i]->PrintTo(stream); + InputAt(i)->PrintTo(stream); } } -template<int R, int I, int T> -void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) { - for (int i = 0; i < results_.length(); i++) { - if (i > 0) stream->Add(" "); - results_[i]->PrintTo(stream); - } +void LInstruction::PrintOutputOperandTo(StringStream* stream) { + if (HasResult()) result()->PrintTo(stream); } @@ -416,9 +411,9 @@ LChunk::LChunk(CompilationInfo* info, HGraph* graph) : spill_slot_count_(0), info_(info), graph_(graph), - instructions_(32), - pointer_maps_(8), - inlined_closures_(1) { + instructions_(32, graph->zone()), + pointer_maps_(8, graph->zone()), + inlined_closures_(1, graph->zone()) { } @@ -432,9 +427,9 @@ int LChunk::GetNextSpillIndex(bool is_double) { LOperand* LChunk::GetNextSpillSlot(bool is_double) { int index = GetNextSpillIndex(is_double); if (is_double) { - return LDoubleStackSlot::Create(index); + return LDoubleStackSlot::Create(index, zone()); } else { - return LStackSlot::Create(index); + return LStackSlot::Create(index, zone()); } } @@ -479,23 +474,23 @@ void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) { LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block); int index = -1; if (instr->IsControl()) { - instructions_.Add(gap); + instructions_.Add(gap, zone()); index = instructions_.length(); - instructions_.Add(instr); + instructions_.Add(instr, zone()); } else { index = instructions_.length(); - instructions_.Add(instr); - instructions_.Add(gap); + instructions_.Add(instr, zone()); + instructions_.Add(gap, zone()); } if (instr->HasPointerMap()) { - pointer_maps_.Add(instr->pointer_map()); + pointer_maps_.Add(instr->pointer_map(), zone()); instr->pointer_map()->set_lithium_position(index); } } LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) { - return LConstantOperand::Create(constant->id()); + return LConstantOperand::Create(constant->id(), zone()); } @@ -534,7 +529,8 @@ int LChunk::NearestGapPos(int index) const { void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) { - GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to); + GetGapAt(index)->GetOrCreateParallelMove( + LGap::START, zone())->AddMove(from, to, zone()); } @@ -732,22 +728,6 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { } -LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment( - LInstruction* instr, int ast_id) { - ASSERT(instruction_pending_deoptimization_environment_ == NULL); - ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber); - instruction_pending_deoptimization_environment_ = instr; - pending_deoptimization_ast_id_ = ast_id; - return instr; -} - - -void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() { - instruction_pending_deoptimization_environment_ = NULL; - pending_deoptimization_ast_id_ = AstNode::kNoNumber; -} - - LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, HInstruction* hinstr, CanDeoptimize can_deoptimize) { @@ -760,8 +740,10 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, if (hinstr->HasObservableSideEffects()) { ASSERT(hinstr->next()->IsSimulate()); HSimulate* sim = HSimulate::cast(hinstr->next()); - instr = SetInstructionPendingDeoptimizationEnvironment( - instr, sim->ast_id()); + ASSERT(instruction_pending_deoptimization_environment_ == NULL); + ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber); + instruction_pending_deoptimization_environment_ = instr; + pending_deoptimization_ast_id_ = sim->ast_id(); } // If instruction does not have side-effects lazy deoptimization @@ -779,15 +761,9 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, } -LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) { - instr->MarkAsSaveDoubles(); - return instr; -} - - LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { ASSERT(!instr->HasPointerMap()); - instr->set_pointer_map(new(zone()) LPointerMap(position_)); + instr->set_pointer_map(new(zone()) LPointerMap(position_, zone())); return instr; } @@ -1010,7 +986,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment( hydrogen_env->parameter_count(), argument_count_, value_count, - outer); + outer, + zone()); int argument_index = *argument_index_accumulator; for (int i = 0; i < value_count; ++i) { if (hydrogen_env->is_special_index(i)) continue; @@ -1295,6 +1272,7 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) { ASSERT(instr->value()->representation().IsInteger32()); ASSERT(instr->representation().IsInteger32()); + if (instr->HasNoUses()) return NULL; LOperand* value = UseRegisterAtStart(instr->value()); return DefineAsRegister(new(zone()) LBitNotI(value)); } @@ -1319,6 +1297,76 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { } +bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) { + uint32_t divisor_abs = abs(divisor); + // Dividing by 0, 1, and powers of 2 is easy. + // Note that IsPowerOf2(0) returns true; + ASSERT(IsPowerOf2(0) == true); + if (IsPowerOf2(divisor_abs)) return true; + + // We have magic numbers for a few specific divisors. + // Details and proofs can be found in: + // - Hacker's Delight, Henry S. Warren, Jr. + // - The PowerPC Compiler Writer’s Guide + // and probably many others. + // + // We handle + // <divisor with magic numbers> * <power of 2> + // but not + // <divisor with magic numbers> * <other divisor with magic numbers> + int32_t power_of_2_factor = + CompilerIntrinsics::CountTrailingZeros(divisor_abs); + DivMagicNumbers magic_numbers = + DivMagicNumberFor(divisor_abs >> power_of_2_factor); + if (magic_numbers.M != InvalidDivMagicNumber.M) return true; + + return false; +} + + +HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) { + // A value with an integer representation does not need to be transformed. + if (dividend->representation().IsInteger32()) { + return dividend; + // A change from an integer32 can be replaced by the integer32 value. + } else if (dividend->IsChange() && + HChange::cast(dividend)->from().IsInteger32()) { + return HChange::cast(dividend)->value(); + } + return NULL; +} + + +HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) { + // Only optimize when we have magic numbers for the divisor. + // The standard integer division routine is usually slower than transitionning + // to VFP. + if (divisor->IsConstant() && + HConstant::cast(divisor)->HasInteger32Value()) { + HConstant* constant_val = HConstant::cast(divisor); + int32_t int32_val = constant_val->Integer32Value(); + if (LChunkBuilder::HasMagicNumberForDivisor(int32_val)) { + return constant_val->CopyToRepresentation(Representation::Integer32(), + divisor->block()->zone()); + } + } + return NULL; +} + + +LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { + HValue* right = instr->right(); + LOperand* dividend = UseRegister(instr->left()); + LOperand* divisor = UseRegisterOrConstant(right); + LOperand* remainder = TempRegister(); + ASSERT(right->IsConstant() && + HConstant::cast(right)->HasInteger32Value() && + HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value())); + return AssignEnvironment(DefineAsRegister( + new(zone()) LMathFloorOfDiv(dividend, divisor, remainder))); +} + + LInstruction* LChunkBuilder::DoMod(HMod* instr) { if (instr->representation().IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); @@ -1612,7 +1660,8 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) { LInstruction* LChunkBuilder::DoDateField(HDateField* instr) { LOperand* object = UseFixed(instr->value(), r0); - LDateField* result = new LDateField(object, FixedTemp(r1), instr->index()); + LDateField* result = + new(zone()) LDateField(object, FixedTemp(r1), instr->index()); return MarkAsCall(DefineFixed(result, r0), instr); } @@ -1661,10 +1710,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { } else { ASSERT(to.IsInteger32()); LOperand* value = UseRegisterAtStart(instr->value()); - bool needs_check = !instr->value()->type().IsSmi(); LInstruction* res = NULL; - if (!needs_check) { - res = DefineAsRegister(new(zone()) LSmiUntag(value, needs_check)); + if (instr->value()->type().IsSmi()) { + res = DefineAsRegister(new(zone()) LSmiUntag(value, false)); } else { LOperand* temp1 = TempRegister(); LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() @@ -1753,9 +1801,9 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) { } -LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) { +LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = new(zone()) LCheckMap(value); + LInstruction* result = new(zone()) LCheckMaps(value); return AssignEnvironment(result); } @@ -2037,8 +2085,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS && - instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) { + ElementsKind from_kind = instr->original_map()->elements_kind(); + ElementsKind to_kind = instr->transitioned_map()->elements_kind(); + if (IsSimpleMapChangeTransition(from_kind, to_kind)) { LOperand* object = UseRegister(instr->object()); LOperand* new_map_reg = TempRegister(); LTransitionElementsKind* result = @@ -2059,16 +2108,28 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind( LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { bool needs_write_barrier = instr->NeedsWriteBarrier(); - - LOperand* obj = needs_write_barrier - ? UseTempRegister(instr->object()) - : UseRegisterAtStart(instr->object()); + bool needs_write_barrier_for_map = !instr->transition().is_null() && + instr->NeedsWriteBarrierForMap(); + + LOperand* obj; + if (needs_write_barrier) { + obj = instr->is_in_object() + ? UseRegister(instr->object()) + : UseTempRegister(instr->object()); + } else { + obj = needs_write_barrier_for_map + ? UseRegister(instr->object()) + : UseRegisterAtStart(instr->object()); + } LOperand* val = needs_write_barrier ? UseTempRegister(instr->value()) : UseRegister(instr->value()); - return new(zone()) LStoreNamedField(obj, val); + // We need a temporary register for write barrier of the map field. + LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL; + + return new(zone()) LStoreNamedField(obj, val, temp); } @@ -2111,7 +2172,8 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) { - LAllocateObject* result = new LAllocateObject(TempRegister(), TempRegister()); + LAllocateObject* result = + new(zone()) LAllocateObject(TempRegister(), TempRegister()); return AssignPointerMap(DefineAsRegister(result)); } @@ -2242,9 +2304,12 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { if (pending_deoptimization_ast_id_ == instr->ast_id()) { LInstruction* result = new(zone()) LLazyBailout; result = AssignEnvironment(result); + // Store the lazy deopt environment with the instruction if needed. Right + // now it is only used for LInstanceOfKnownGlobal. instruction_pending_deoptimization_environment_-> - set_deoptimization_environment(result->environment()); - ClearInstructionPendingDeoptimizationEnvironment(); + SetDeferredLazyDeoptimizationEnvironment(result->environment()); + instruction_pending_deoptimization_environment_ = NULL; + pending_deoptimization_ast_id_ = AstNode::kNoNumber; return result; } @@ -2271,8 +2336,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { undefined, instr->call_kind(), instr->is_construct()); - if (instr->arguments() != NULL) { - inner->Bind(instr->arguments(), graph()->GetArgumentsObject()); + if (instr->arguments_var() != NULL) { + inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject()); } current_block_->UpdateEnvironment(inner); chunk_->AddInlinedClosure(instr->closure()); @@ -2281,10 +2346,21 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { + LInstruction* pop = NULL; + + HEnvironment* env = current_block_->last_environment(); + + if (instr->arguments_pushed()) { + int argument_count = env->arguments_environment()->parameter_count(); + pop = new(zone()) LDrop(argument_count); + argument_count_ -= argument_count; + } + HEnvironment* outer = current_block_->last_environment()-> DiscardInlined(false); current_block_->UpdateEnvironment(outer); - return NULL; + + return pop; } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 62cde6e249..869a80a280 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -72,7 +72,7 @@ class LCodeGen; V(CheckFunction) \ V(CheckInstanceType) \ V(CheckNonSmi) \ - V(CheckMap) \ + V(CheckMaps) \ V(CheckPrototypeMaps) \ V(CheckSmi) \ V(ClampDToUint8) \ @@ -132,6 +132,7 @@ class LCodeGen; V(LoadNamedField) \ V(LoadNamedFieldPolymorphic) \ V(LoadNamedGeneric) \ + V(MathFloorOfDiv) \ V(ModI) \ V(MulI) \ V(NumberTagD) \ @@ -179,7 +180,8 @@ class LCodeGen; V(CheckMapValue) \ V(LoadFieldByIndex) \ V(DateField) \ - V(WrapReceiver) + V(WrapReceiver) \ + V(Drop) #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ @@ -203,15 +205,14 @@ class LInstruction: public ZoneObject { LInstruction() : environment_(NULL), hydrogen_value_(NULL), - is_call_(false), - is_save_doubles_(false) { } + is_call_(false) { } virtual ~LInstruction() { } virtual void CompileToNative(LCodeGen* generator) = 0; virtual const char* Mnemonic() const = 0; virtual void PrintTo(StringStream* stream); - virtual void PrintDataTo(StringStream* stream) = 0; - virtual void PrintOutputOperandTo(StringStream* stream) = 0; + virtual void PrintDataTo(StringStream* stream); + virtual void PrintOutputOperandTo(StringStream* stream); enum Opcode { // Declare a unique enum value for each instruction. @@ -246,22 +247,12 @@ class LInstruction: public ZoneObject { void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } HValue* hydrogen_value() const { return hydrogen_value_; } - void set_deoptimization_environment(LEnvironment* env) { - deoptimization_environment_.set(env); - } - LEnvironment* deoptimization_environment() const { - return deoptimization_environment_.get(); - } - bool HasDeoptimizationEnvironment() const { - return deoptimization_environment_.is_set(); - } + virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { } void MarkAsCall() { is_call_ = true; } - void MarkAsSaveDoubles() { is_save_doubles_ = true; } // Interface to the register allocator and iterators. bool IsMarkedAsCall() const { return is_call_; } - bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; } virtual bool HasResult() const = 0; virtual LOperand* result() = 0; @@ -282,9 +273,7 @@ class LInstruction: public ZoneObject { LEnvironment* environment_; SetOncePointer<LPointerMap> pointer_map_; HValue* hydrogen_value_; - SetOncePointer<LEnvironment> deoptimization_environment_; bool is_call_; - bool is_save_doubles_; }; @@ -306,9 +295,6 @@ class LTemplateInstruction: public LInstruction { int TempCount() { return T; } LOperand* TempAt(int i) { return temps_[i]; } - virtual void PrintDataTo(StringStream* stream); - virtual void PrintOutputOperandTo(StringStream* stream); - protected: EmbeddedContainer<LOperand*, R> results_; EmbeddedContainer<LOperand*, I> inputs_; @@ -347,8 +333,10 @@ class LGap: public LTemplateInstruction<0, 0, 0> { LAST_INNER_POSITION = AFTER }; - LParallelMove* GetOrCreateParallelMove(InnerPosition pos) { - if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove; + LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) { + if (parallel_moves_[pos] == NULL) { + parallel_moves_[pos] = new(zone) LParallelMove(zone); + } return parallel_moves_[pos]; } @@ -534,9 +522,8 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> { class LArgumentsElements: public LTemplateInstruction<1, 0, 0> { public: - LArgumentsElements() { } - DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements") + DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements) }; @@ -582,6 +569,21 @@ class LDivI: public LTemplateInstruction<1, 2, 0> { }; +class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> { + public: + LMathFloorOfDiv(LOperand* left, + LOperand* right, + LOperand* temp = NULL) { + inputs_[0] = left; + inputs_[1] = right; + temps_[0] = temp; + } + + DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div") + DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) +}; + + class LMulI: public LTemplateInstruction<1, 2, 1> { public: LMulI(LOperand* left, LOperand* right, LOperand* temp) { @@ -834,6 +836,15 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> { DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal) Handle<JSFunction> function() const { return hydrogen()->function(); } + LEnvironment* GetDeferredLazyDeoptimizationEnvironment() { + return lazy_deopt_env_; + } + virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { + lazy_deopt_env_ = env; + } + + private: + LEnvironment* lazy_deopt_env_; }; @@ -1227,6 +1238,7 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1243,13 +1255,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, - LOperand* key) { + LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) { inputs_[0] = external_pointer; inputs_[1] = key; } @@ -1263,6 +1275,7 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1378,6 +1391,19 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> { }; +class LDrop: public LTemplateInstruction<0, 0, 0> { + public: + explicit LDrop(int count) : count_(count) { } + + int count() const { return count_; } + + DECLARE_CONCRETE_INSTRUCTION(Drop, "drop") + + private: + int count_; +}; + + class LThisFunction: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") @@ -1460,6 +1486,7 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> { virtual void PrintDataTo(StringStream* stream); int arity() const { return hydrogen()->argument_count() - 1; } + Handle<JSFunction> known_function() { return hydrogen()->known_function(); } }; @@ -1659,11 +1686,12 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> { }; -class LStoreNamedField: public LTemplateInstruction<0, 2, 0> { +class LStoreNamedField: public LTemplateInstruction<0, 2, 1> { public: - LStoreNamedField(LOperand* obj, LOperand* val) { + LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) { inputs_[0] = obj; inputs_[1] = val; + temps_[0] = temp; } DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") @@ -1717,6 +1745,7 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { LOperand* object() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1739,6 +1768,9 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } + + bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } }; @@ -1781,6 +1813,7 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> { ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1889,14 +1922,14 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> { }; -class LCheckMap: public LTemplateInstruction<0, 1, 0> { +class LCheckMaps: public LTemplateInstruction<0, 1, 0> { public: - explicit LCheckMap(LOperand* value) { + explicit LCheckMaps(LOperand* value) { inputs_[0] = value; } - DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map") - DECLARE_HYDROGEN_ACCESSOR(CheckMap) + DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps") + DECLARE_HYDROGEN_ACCESSOR(CheckMaps) }; @@ -2236,9 +2269,11 @@ class LChunk: public ZoneObject { } void AddInlinedClosure(Handle<JSFunction> closure) { - inlined_closures_.Add(closure); + inlined_closures_.Add(closure, zone()); } + Zone* zone() const { return graph_->zone(); } + private: int spill_slot_count_; CompilationInfo* info_; @@ -2255,7 +2290,7 @@ class LChunkBuilder BASE_EMBEDDED { : chunk_(NULL), info_(info), graph_(graph), - zone_(graph->isolate()->zone()), + zone_(graph->zone()), status_(UNUSED), current_instruction_(NULL), current_block_(NULL), @@ -2274,6 +2309,10 @@ class LChunkBuilder BASE_EMBEDDED { HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) #undef DECLARE_DO + static bool HasMagicNumberForDivisor(int32_t divisor); + static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val); + static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val); + private: enum Status { UNUSED, @@ -2369,11 +2408,6 @@ class LChunkBuilder BASE_EMBEDDED { LInstruction* instr, HInstruction* hinstr, CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); - LInstruction* MarkAsSaveDoubles(LInstruction* instr); - - LInstruction* SetInstructionPendingDeoptimizationEnvironment( - LInstruction* instr, int ast_id); - void ClearInstructionPendingDeoptimizationEnvironment(); LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env, int* argument_index_accumulator); diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 82b80a2b80..256d180f2f 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -571,6 +571,9 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code, LInstruction* instr, SafepointMode safepoint_mode) { ASSERT(instr != NULL); + // Block literal pool emission to ensure nop indicating no inlined smi code + // is in the correct position. + Assembler::BlockConstPoolScope block_const_pool(masm()); LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); __ Call(code, mode); @@ -631,14 +634,15 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, ++jsframe_count; } } - Translation translation(&translations_, frame_count, jsframe_count); + Translation translation(&translations_, frame_count, jsframe_count, + zone()); WriteTranslation(environment, &translation); int deoptimization_index = deoptimizations_.length(); int pc_offset = masm()->pc_offset(); environment->Register(deoptimization_index, translation.index(), (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); - deoptimizations_.Add(environment); + deoptimizations_.Add(environment, zone()); } } @@ -670,7 +674,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { // jump entry if this is the case. if (deopt_jump_table_.is_empty() || (deopt_jump_table_.last().address != entry)) { - deopt_jump_table_.Add(JumpTableEntry(entry)); + deopt_jump_table_.Add(JumpTableEntry(entry), zone()); } __ b(cc, &deopt_jump_table_.last().label); } @@ -715,7 +719,7 @@ int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) { for (int i = 0; i < deoptimization_literals_.length(); ++i) { if (deoptimization_literals_[i].is_identical_to(literal)) return i; } - deoptimization_literals_.Add(literal); + deoptimization_literals_.Add(literal, zone()); return result; } @@ -761,14 +765,14 @@ void LCodeGen::RecordSafepoint( for (int i = 0; i < operands->length(); i++) { LOperand* pointer = operands->at(i); if (pointer->IsStackSlot()) { - safepoint.DefinePointerSlot(pointer->index()); + safepoint.DefinePointerSlot(pointer->index(), zone()); } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { - safepoint.DefinePointerRegister(ToRegister(pointer)); + safepoint.DefinePointerRegister(ToRegister(pointer), zone()); } } if (kind & Safepoint::kWithRegisters) { // Register cp always contains a pointer to the context. - safepoint.DefinePointerRegister(cp); + safepoint.DefinePointerRegister(cp, zone()); } } @@ -780,7 +784,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { - LPointerMap empty_pointers(RelocInfo::kNoPosition); + LPointerMap empty_pointers(RelocInfo::kNoPosition, zone()); RecordSafepoint(&empty_pointers, deopt_mode); } @@ -1034,6 +1038,100 @@ void LCodeGen::DoModI(LModI* instr) { } +void LCodeGen::EmitSignedIntegerDivisionByConstant( + Register result, + Register dividend, + int32_t divisor, + Register remainder, + Register scratch, + LEnvironment* environment) { + ASSERT(!AreAliased(dividend, scratch, ip)); + ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor)); + + uint32_t divisor_abs = abs(divisor); + + int32_t power_of_2_factor = + CompilerIntrinsics::CountTrailingZeros(divisor_abs); + + switch (divisor_abs) { + case 0: + DeoptimizeIf(al, environment); + return; + + case 1: + if (divisor > 0) { + __ Move(result, dividend); + } else { + __ rsb(result, dividend, Operand(0), SetCC); + DeoptimizeIf(vs, environment); + } + // Compute the remainder. + __ mov(remainder, Operand(0)); + return; + + default: + if (IsPowerOf2(divisor_abs)) { + // Branch and condition free code for integer division by a power + // of two. + int32_t power = WhichPowerOf2(divisor_abs); + if (power > 1) { + __ mov(scratch, Operand(dividend, ASR, power - 1)); + } + __ add(scratch, dividend, Operand(scratch, LSR, 32 - power)); + __ mov(result, Operand(scratch, ASR, power)); + // Negate if necessary. + // We don't need to check for overflow because the case '-1' is + // handled separately. + if (divisor < 0) { + ASSERT(divisor != -1); + __ rsb(result, result, Operand(0)); + } + // Compute the remainder. + if (divisor > 0) { + __ sub(remainder, dividend, Operand(result, LSL, power)); + } else { + __ add(remainder, dividend, Operand(result, LSL, power)); + } + return; + } else { + // Use magic numbers for a few specific divisors. + // Details and proofs can be found in: + // - Hacker's Delight, Henry S. Warren, Jr. + // - The PowerPC Compiler Writer’s Guide + // and probably many others. + // + // We handle + // <divisor with magic numbers> * <power of 2> + // but not + // <divisor with magic numbers> * <other divisor with magic numbers> + DivMagicNumbers magic_numbers = + DivMagicNumberFor(divisor_abs >> power_of_2_factor); + // Branch and condition free code for integer division by a power + // of two. + const int32_t M = magic_numbers.M; + const int32_t s = magic_numbers.s + power_of_2_factor; + + __ mov(ip, Operand(M)); + __ smull(ip, scratch, dividend, ip); + if (M < 0) { + __ add(scratch, scratch, Operand(dividend)); + } + if (s > 0) { + __ mov(scratch, Operand(scratch, ASR, s)); + } + __ add(result, scratch, Operand(dividend, LSR, 31)); + if (divisor < 0) __ rsb(result, result, Operand(0)); + // Compute the remainder. + __ mov(ip, Operand(divisor)); + // This sequence could be replaced with 'mls' when + // it gets implemented. + __ mul(scratch, result, ip); + __ sub(remainder, dividend, scratch); + } + } +} + + void LCodeGen::DoDivI(LDivI* instr) { class DeferredDivI: public LDeferredCode { public: @@ -1096,7 +1194,7 @@ void LCodeGen::DoDivI(LDivI* instr) { // Call the stub. The numbers in r0 and r1 have // to be tagged to Smis. If that is not possible, deoptimize. - DeferredDivI* deferred = new DeferredDivI(this, instr); + DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr); __ TrySmiTag(left, &deoptimize, scratch); __ TrySmiTag(right, &deoptimize, scratch); @@ -1115,6 +1213,34 @@ void LCodeGen::DoDivI(LDivI* instr) { } +void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { + const Register result = ToRegister(instr->result()); + const Register left = ToRegister(instr->InputAt(0)); + const Register remainder = ToRegister(instr->TempAt(0)); + const Register scratch = scratch0(); + + // We only optimize this for division by constants, because the standard + // integer division routine is usually slower than transitionning to VFP. + // This could be optimized on processors with SDIV available. + ASSERT(instr->InputAt(1)->IsConstantOperand()); + int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1))); + if (divisor < 0) { + __ cmp(left, Operand(0)); + DeoptimizeIf(eq, instr->environment()); + } + EmitSignedIntegerDivisionByConstant(result, + left, + divisor, + remainder, + scratch, + instr->environment()); + // We operated a truncating division. Correct the result if necessary. + __ cmp(remainder, Operand(0)); + __ teq(remainder, Operand(divisor), ne); + __ sub(result, result, Operand(1), LeaveCC, mi); +} + + template<int T> void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, Token::Value op) { @@ -1562,6 +1688,9 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { ASSERT(ToRegister(instr->result()).is(r0)); BinaryOpStub stub(instr->op(), NO_OVERWRITE); + // Block literal pool emission to ensure nop indicating no inlined smi code + // is in the correct position. + Assembler::BlockConstPoolScope block_const_pool(masm()); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); __ nop(); // Signals no inlined code. } @@ -2174,7 +2303,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { }; DeferredInstanceOfKnownGlobal* deferred; - deferred = new DeferredInstanceOfKnownGlobal(this, instr); + deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); Label done, false_result; Register object = ToRegister(instr->InputAt(0)); @@ -2193,20 +2322,25 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { Label cache_miss; Register map = temp; __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); - __ bind(deferred->map_check()); // Label for calculating code patching. - // We use Factory::the_hole_value() on purpose instead of loading from the - // root array to force relocation to be able to later patch with - // the cached map. - Handle<JSGlobalPropertyCell> cell = - factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); - __ mov(ip, Operand(Handle<Object>(cell))); - __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); - __ cmp(map, Operand(ip)); - __ b(ne, &cache_miss); - // We use Factory::the_hole_value() on purpose instead of loading from the - // root array to force relocation to be able to later patch - // with true or false. - __ mov(result, Operand(factory()->the_hole_value())); + { + // Block constant pool emission to ensure the positions of instructions are + // as expected by the patcher. See InstanceofStub::Generate(). + Assembler::BlockConstPoolScope block_const_pool(masm()); + __ bind(deferred->map_check()); // Label for calculating code patching. + // We use Factory::the_hole_value() on purpose instead of loading from the + // root array to force relocation to be able to later patch with + // the cached map. + Handle<JSGlobalPropertyCell> cell = + factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); + __ mov(ip, Operand(Handle<Object>(cell))); + __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); + __ cmp(map, Operand(ip)); + __ b(ne, &cache_miss); + // We use Factory::the_hole_value() on purpose instead of loading from the + // root array to force relocation to be able to later patch + // with true or false. + __ mov(result, Operand(factory()->the_hole_value())); + } __ b(&done); // The inlined call site cache did not match. Check null and string before @@ -2267,8 +2401,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, RelocInfo::CODE_TARGET, instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - ASSERT(instr->HasDeoptimizationEnvironment()); - LEnvironment* env = instr->deoptimization_environment(); + LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); // Put the result value into the result register slot and // restore all registers. @@ -2438,12 +2571,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, Register object, Handle<Map> type, - Handle<String> name) { + Handle<String> name, + LEnvironment* env) { LookupResult lookup(isolate()); type->LookupInDescriptors(NULL, *name, &lookup); - ASSERT(lookup.IsFound() && - (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION)); - if (lookup.type() == FIELD) { + ASSERT(lookup.IsFound() || lookup.IsCacheable()); + if (lookup.IsFound() && lookup.type() == FIELD) { int index = lookup.GetLocalFieldIndexFromMap(*type); int offset = index * kPointerSize; if (index < 0) { @@ -2455,9 +2588,23 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize)); } - } else { + } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) { Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type)); __ LoadHeapObject(result, function); + } else { + // Negative lookup. + // Check prototypes. + HeapObject* current = HeapObject::cast((*type)->prototype()); + Heap* heap = type->GetHeap(); + while (current != heap->null_value()) { + Handle<HeapObject> link(current); + __ LoadHeapObject(result, link); + __ ldr(result, FieldMemOperand(result, HeapObject::kMapOffset)); + __ cmp(result, Operand(Handle<Map>(JSObject::cast(current)->map()))); + DeoptimizeIf(ne, env); + current = HeapObject::cast(current->map()->prototype()); + } + __ LoadRoot(result, Heap::kUndefinedValueRootIndex); } } @@ -2465,43 +2612,45 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { Register object = ToRegister(instr->object()); Register result = ToRegister(instr->result()); - Register scratch = scratch0(); + Register object_map = scratch0(); + int map_count = instr->hydrogen()->types()->length(); + bool need_generic = instr->hydrogen()->need_generic(); + + if (map_count == 0 && !need_generic) { + DeoptimizeIf(al, instr->environment()); + return; + } Handle<String> name = instr->hydrogen()->name(); - if (map_count == 0) { - ASSERT(instr->hydrogen()->need_generic()); - __ mov(r2, Operand(name)); - Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - } else { - Label done; - __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); - for (int i = 0; i < map_count - 1; ++i) { - Handle<Map> map = instr->hydrogen()->types()->at(i); + Label done; + __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); + for (int i = 0; i < map_count; ++i) { + bool last = (i == map_count - 1); + Handle<Map> map = instr->hydrogen()->types()->at(i); + Label check_passed; + __ CompareMap( + object_map, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS); + if (last && !need_generic) { + DeoptimizeIf(ne, instr->environment()); + __ bind(&check_passed); + EmitLoadFieldOrConstantFunction( + result, object, map, name, instr->environment()); + } else { Label next; - __ cmp(scratch, Operand(map)); __ b(ne, &next); - EmitLoadFieldOrConstantFunction(result, object, map, name); + __ bind(&check_passed); + EmitLoadFieldOrConstantFunction( + result, object, map, name, instr->environment()); __ b(&done); __ bind(&next); } - Handle<Map> map = instr->hydrogen()->types()->last(); - __ cmp(scratch, Operand(map)); - if (instr->hydrogen()->need_generic()) { - Label generic; - __ b(ne, &generic); - EmitLoadFieldOrConstantFunction(result, object, map, name); - __ b(&done); - __ bind(&generic); - __ mov(r2, Operand(name)); - Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - } else { - DeoptimizeIf(ne, instr->environment()); - EmitLoadFieldOrConstantFunction(result, object, map, name); - } - __ bind(&done); } + if (need_generic) { + __ mov(r2, Operand(name)); + Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); + CallCode(ic, RelocInfo::CODE_TARGET, instr); + } + __ bind(&done); } @@ -2579,8 +2728,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) { __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset)); __ ubfx(scratch, scratch, Map::kElementsKindShift, Map::kElementsKindBitCount); - __ cmp(scratch, Operand(FAST_ELEMENTS)); - __ b(eq, &done); + __ cmp(scratch, Operand(GetInitialFastElementsKind())); + __ b(lt, &fail); + __ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND)); + __ b(le, &done); __ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND)); __ b(lt, &fail); __ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND)); @@ -2627,13 +2778,20 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { // Load the result. __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); - __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize)); + uint32_t offset = FixedArray::kHeaderSize + + (instr->additional_index() << kPointerSizeLog2); + __ ldr(result, FieldMemOperand(scratch, offset)); // Check for the hole value. if (instr->hydrogen()->RequiresHoleCheck()) { - __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); - __ cmp(result, scratch); - DeoptimizeIf(eq, instr->environment()); + if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { + __ tst(result, Operand(kSmiTagMask)); + DeoptimizeIf(ne, instr->environment()); + } else { + __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); + __ cmp(result, scratch); + DeoptimizeIf(eq, instr->environment()); + } } } @@ -2659,18 +2817,21 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( } Operand operand = key_is_constant - ? Operand(constant_key * (1 << shift_size) + + ? Operand(((constant_key + instr->additional_index()) << shift_size) + FixedDoubleArray::kHeaderSize - kHeapObjectTag) : Operand(key, LSL, shift_size); __ add(elements, elements, operand); if (!key_is_constant) { __ add(elements, elements, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) + + (instr->additional_index() << shift_size))); } - __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); - __ cmp(scratch, Operand(kHoleNanUpper32)); - DeoptimizeIf(eq, instr->environment()); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); + __ cmp(scratch, Operand(kHoleNanUpper32)); + DeoptimizeIf(eq, instr->environment()); + } __ vldr(result, elements, 0); } @@ -2692,26 +2853,33 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( key = ToRegister(instr->key()); } int shift_size = ElementsKindToShiftSize(elements_kind); + int additional_offset = instr->additional_index() << shift_size; if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { CpuFeatures::Scope scope(VFP3); DwVfpRegister result = ToDoubleRegister(instr->result()); Operand operand = key_is_constant - ? Operand(constant_key * (1 << shift_size)) + ? Operand(constant_key << shift_size) : Operand(key, LSL, shift_size); __ add(scratch0(), external_pointer, operand); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - __ vldr(result.low(), scratch0(), 0); + __ vldr(result.low(), scratch0(), additional_offset); __ vcvt_f64_f32(result, result.low()); } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ vldr(result, scratch0(), 0); + __ vldr(result, scratch0(), additional_offset); } } else { Register result = ToRegister(instr->result()); + if (instr->additional_index() != 0 && !key_is_constant) { + __ add(scratch0(), key, Operand(instr->additional_index())); + } MemOperand mem_operand(key_is_constant - ? MemOperand(external_pointer, constant_key * (1 << shift_size)) - : MemOperand(external_pointer, key, LSL, shift_size)); + ? MemOperand(external_pointer, + (constant_key << shift_size) + additional_offset) + : (instr->additional_index() == 0 + ? MemOperand(external_pointer, key, LSL, shift_size) + : MemOperand(external_pointer, scratch0(), LSL, shift_size))); switch (elements_kind) { case EXTERNAL_BYTE_ELEMENTS: __ ldrsb(result, mem_operand); @@ -2739,9 +2907,12 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( break; case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -2764,16 +2935,20 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { Register scratch = scratch0(); Register result = ToRegister(instr->result()); - // Check if the calling frame is an arguments adaptor frame. - Label done, adapted; - __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); - __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + if (instr->hydrogen()->from_inlined()) { + __ sub(result, sp, Operand(2 * kPointerSize)); + } else { + // Check if the calling frame is an arguments adaptor frame. + Label done, adapted; + __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); + __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); - // Result is the frame pointer for the frame if not adapted and for the real - // frame below the adaptor frame if adapted. - __ mov(result, fp, LeaveCC, ne); - __ mov(result, scratch, LeaveCC, eq); + // Result is the frame pointer for the frame if not adapted and for the real + // frame below the adaptor frame if adapted. + __ mov(result, fp, LeaveCC, ne); + __ mov(result, scratch, LeaveCC, eq); + } } @@ -2882,7 +3057,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { __ b(ne, &loop); __ bind(&invoke); - ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); + ASSERT(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); SafepointGenerator safepoint_generator( @@ -2907,6 +3082,11 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) { } +void LCodeGen::DoDrop(LDrop* instr) { + __ Drop(instr->count()); +} + + void LCodeGen::DoThisFunction(LThisFunction* instr) { Register result = ToRegister(instr->result()); __ LoadHeapObject(result, instr->hydrogen()->closure()); @@ -2953,7 +3133,8 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { void LCodeGen::CallKnownFunction(Handle<JSFunction> function, int arity, LInstruction* instr, - CallKind call_kind) { + CallKind call_kind, + R1State r1_state) { bool can_invoke_directly = !function->NeedsArgumentsAdaption() || function->shared()->formal_parameter_count() == arity; @@ -2961,7 +3142,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, RecordPosition(pointers->position()); if (can_invoke_directly) { - __ LoadHeapObject(r1, function); + if (r1_state == R1_UNINITIALIZED) { + __ LoadHeapObject(r1, function); + } + // Change context if needed. bool change_context = (info()->closure()->context() != function->context()) || @@ -3000,7 +3184,8 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { CallKnownFunction(instr->function(), instr->arity(), instr, - CALL_AS_METHOD); + CALL_AS_METHOD, + R1_UNINITIALIZED); } @@ -3109,7 +3294,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { } else { // Representation is tagged. DeferredMathAbsTaggedHeapNumber* deferred = - new DeferredMathAbsTaggedHeapNumber(this, instr); + new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); Register input = ToRegister(instr->InputAt(0)); // Smi check. __ JumpIfNotSmi(input, deferred->entry()); @@ -3286,7 +3471,7 @@ void LCodeGen::DoRandom(LRandom* instr) { LRandom* instr_; }; - DeferredDoRandom* deferred = new DeferredDoRandom(this, instr); + DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr); // Having marked this instruction as a call we can use any // registers. @@ -3424,13 +3609,21 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { ASSERT(ToRegister(instr->function()).is(r1)); ASSERT(instr->HasPointerMap()); - ASSERT(instr->HasDeoptimizationEnvironment()); - LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount count(instr->arity()); - __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + + if (instr->known_function().is_null()) { + LPointerMap* pointers = instr->pointer_map(); + RecordPosition(pointers->position()); + SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); + ParameterCount count(instr->arity()); + __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + } else { + CallKnownFunction(instr->known_function(), + instr->arity(), + instr, + CALL_AS_METHOD, + R1_CONTAINS_TARGET); + } } @@ -3485,7 +3678,11 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) { void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { ASSERT(ToRegister(instr->result()).is(r0)); - CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION); + CallKnownFunction(instr->target(), + instr->arity(), + instr, + CALL_AS_FUNCTION, + R1_UNINITIALIZED); } @@ -3515,6 +3712,18 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (!instr->transition().is_null()) { __ mov(scratch, Operand(instr->transition())); __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); + if (instr->hydrogen()->NeedsWriteBarrierForMap()) { + Register temp = ToRegister(instr->TempAt(0)); + // Update the write barrier for the map field. + __ RecordWriteField(object, + HeapObject::kMapOffset, + scratch, + temp, + kLRHasBeenSaved, + kSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + } } // Do the store. @@ -3583,10 +3792,16 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); int offset = - ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize; + (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize + + FixedArray::kHeaderSize; __ str(value, FieldMemOperand(elements, offset)); } else { __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); + if (instr->additional_index() != 0) { + __ add(scratch, + scratch, + Operand(instr->additional_index() << kPointerSizeLog2)); + } __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize)); } @@ -3615,7 +3830,6 @@ void LCodeGen::DoStoreKeyedFastDoubleElement( Register scratch = scratch0(); bool key_is_constant = instr->key()->IsConstantOperand(); int constant_key = 0; - Label not_nan; // Calculate the effective address of the slot in the array to store the // double value. @@ -3629,7 +3843,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement( } int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); Operand operand = key_is_constant - ? Operand(constant_key * (1 << shift_size) + + ? Operand((constant_key << shift_size) + FixedDoubleArray::kHeaderSize - kHeapObjectTag) : Operand(key, LSL, shift_size); __ add(scratch, elements, operand); @@ -3638,14 +3852,16 @@ void LCodeGen::DoStoreKeyedFastDoubleElement( Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); } - // Check for NaN. All NaNs must be canonicalized. - __ VFPCompareAndSetFlags(value, value); - - // Only load canonical NaN if the comparison above set the overflow. - __ Vmov(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double(), vs); + if (instr->NeedsCanonicalization()) { + // Check for NaN. All NaNs must be canonicalized. + __ VFPCompareAndSetFlags(value, value); + // Only load canonical NaN if the comparison above set the overflow. + __ Vmov(value, + FixedDoubleArray::canonical_not_the_hole_nan_as_double(), + vs); + } - __ bind(¬_nan); - __ vstr(value, scratch, 0); + __ vstr(value, scratch, instr->additional_index() << shift_size); } @@ -3666,25 +3882,33 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( key = ToRegister(instr->key()); } int shift_size = ElementsKindToShiftSize(elements_kind); + int additional_offset = instr->additional_index() << shift_size; if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { CpuFeatures::Scope scope(VFP3); DwVfpRegister value(ToDoubleRegister(instr->value())); - Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size)) + Operand operand(key_is_constant ? Operand(constant_key << shift_size) : Operand(key, LSL, shift_size)); __ add(scratch0(), external_pointer, operand); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { __ vcvt_f32_f64(double_scratch0().low(), value); - __ vstr(double_scratch0().low(), scratch0(), 0); + __ vstr(double_scratch0().low(), scratch0(), additional_offset); } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ vstr(value, scratch0(), 0); + __ vstr(value, scratch0(), additional_offset); } } else { Register value(ToRegister(instr->value())); + if (instr->additional_index() != 0 && !key_is_constant) { + __ add(scratch0(), key, Operand(instr->additional_index())); + } MemOperand mem_operand(key_is_constant - ? MemOperand(external_pointer, constant_key * (1 << shift_size)) - : MemOperand(external_pointer, key, LSL, shift_size)); + ? MemOperand(external_pointer, + ((constant_key + instr->additional_index()) + << shift_size)) + : (instr->additional_index() == 0 + ? MemOperand(external_pointer, key, LSL, shift_size) + : MemOperand(external_pointer, scratch0(), LSL, shift_size))); switch (elements_kind) { case EXTERNAL_PIXEL_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS: @@ -3703,7 +3927,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3740,20 +3967,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { __ cmp(scratch, Operand(from_map)); __ b(ne, ¬_applicable); __ mov(new_map_reg, Operand(to_map)); - if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) { + + if (IsSimpleMapChangeTransition(from_kind, to_kind)) { __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); // Write barrier. __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, scratch, kLRHasBeenSaved, kDontSaveFPRegs); - } else if (from_kind == FAST_SMI_ONLY_ELEMENTS && - to_kind == FAST_DOUBLE_ELEMENTS) { + } else if (IsFastSmiElementsKind(from_kind) && + IsFastDoubleElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp_reg()); ASSERT(fixed_object_reg.is(r2)); ASSERT(new_map_reg.is(r3)); __ mov(fixed_object_reg, object_reg); CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), RelocInfo::CODE_TARGET, instr); - } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) { + } else if (IsFastDoubleElementsKind(from_kind) && + IsFastObjectElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp_reg()); ASSERT(fixed_object_reg.is(r2)); ASSERT(new_map_reg.is(r3)); @@ -3787,7 +4016,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { }; DeferredStringCharCodeAt* deferred = - new DeferredStringCharCodeAt(this, instr); + new(zone()) DeferredStringCharCodeAt(this, instr); StringCharLoadGenerator::Generate(masm(), ToRegister(instr->string()), @@ -3842,7 +4071,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { }; DeferredStringCharFromCode* deferred = - new DeferredStringCharFromCode(this, instr); + new(zone()) DeferredStringCharFromCode(this, instr); ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); Register char_code = ToRegister(instr->char_code()); @@ -3916,7 +4145,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { Register src = ToRegister(instr->InputAt(0)); Register dst = ToRegister(instr->result()); - DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr); + DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); __ SmiTag(dst, src, SetCC); __ b(vs, deferred->entry()); __ bind(deferred->exit()); @@ -3987,7 +4216,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { Register temp1 = ToRegister(instr->TempAt(0)); Register temp2 = ToRegister(instr->TempAt(1)); - DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr); + DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); if (FLAG_inline_new) { __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); @@ -4189,7 +4418,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { Register input_reg = ToRegister(input); - DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr); + DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); // Optimistically untag the input. // If the input is a HeapObject, SmiUntag will set the carry flag. @@ -4338,14 +4567,22 @@ void LCodeGen::DoCheckMapCommon(Register reg, } -void LCodeGen::DoCheckMap(LCheckMap* instr) { +void LCodeGen::DoCheckMaps(LCheckMaps* instr) { Register scratch = scratch0(); LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); Register reg = ToRegister(input); - Handle<Map> map = instr->hydrogen()->map(); - DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(), - instr->environment()); + + Label success; + SmallMapList* map_set = instr->hydrogen()->map_set(); + for (int i = 0; i < map_set->length() - 1; i++) { + Handle<Map> map = map_set->at(i); + __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP); + __ b(eq, &success); + } + Handle<Map> map = map_set->last(); + DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment()); + __ bind(&success); } @@ -4441,7 +4678,8 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { LAllocateObject* instr_; }; - DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr); + DeferredAllocateObject* deferred = + new(zone()) DeferredAllocateObject(this, instr); Register result = ToRegister(instr->result()); Register scratch = ToRegister(instr->TempAt(0)); @@ -4464,6 +4702,14 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { deferred->entry(), TAG_OBJECT); + __ bind(deferred->exit()); + if (FLAG_debug_code) { + Label is_in_new_space; + __ JumpIfInNewSpace(result, scratch, &is_in_new_space); + __ Abort("Allocated object is not in new-space"); + __ bind(&is_in_new_space); + } + // Load the initial map. Register map = scratch; __ LoadHeapObject(map, constructor); @@ -4482,14 +4728,14 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { __ str(scratch, FieldMemOperand(result, property_offset)); } } - - __ bind(deferred->exit()); } void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { Register result = ToRegister(instr->result()); Handle<JSFunction> constructor = instr->hydrogen()->constructor(); + Handle<Map> initial_map(constructor->initial_map()); + int instance_size = initial_map->instance_size(); // TODO(3095996): Get rid of this. For now, we need to make the // result register contain a valid pointer because it is already @@ -4497,9 +4743,9 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { __ mov(result, Operand(0)); PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); - __ LoadHeapObject(r0, constructor); + __ mov(r0, Operand(Smi::FromInt(instance_size))); __ push(r0); - CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr); + CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); __ StoreToSafepointRegisterSlot(r0, result); } @@ -4511,8 +4757,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { // Deopt if the array literal boilerplate ElementsKind is of a type different // than the expected one. The check isn't necessary if the boilerplate has - // already been converted to FAST_ELEMENTS. - if (boilerplate_elements_kind != FAST_ELEMENTS) { + // already been converted to TERMINAL_FAST_ELEMENTS_KIND. + if (CanTransitionToMoreGeneralFastElementsKind( + boilerplate_elements_kind, true)) { __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object()); // Load map into r2. __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); @@ -4633,9 +4880,10 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, __ str(r2, FieldMemOperand(result, total_offset + 4)); } } else if (elements->IsFixedArray()) { + Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements); for (int i = 0; i < elements_length; i++) { int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); - Handle<Object> value = JSObject::GetElement(object, i); + Handle<Object> value(fast_elements->get(i)); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); __ add(r2, result, Operand(*offset)); @@ -4659,6 +4907,24 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, void LCodeGen::DoFastLiteral(LFastLiteral* instr) { int size = instr->hydrogen()->total_size(); + ElementsKind boilerplate_elements_kind = + instr->hydrogen()->boilerplate()->GetElementsKind(); + + // Deopt if the array literal boilerplate ElementsKind is of a type different + // than the expected one. The check isn't necessary if the boilerplate has + // already been converted to TERMINAL_FAST_ELEMENTS_KIND. + if (CanTransitionToMoreGeneralFastElementsKind( + boilerplate_elements_kind, true)) { + __ LoadHeapObject(r1, instr->hydrogen()->boilerplate()); + // Load map into r2. + __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); + // Load the map's "bit field 2". + __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset)); + // Retrieve elements_kind from bit field 2. + __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount); + __ cmp(r2, Operand(boilerplate_elements_kind)); + DeoptimizeIf(ne, instr->environment()); + } // Allocate all objects that are part of the literal in one big // allocation. This avoids multiple limit checks. @@ -4923,6 +5189,8 @@ void LCodeGen::EnsureSpaceForLazyDeopt() { int current_pc = masm()->pc_offset(); int patch_size = Deoptimizer::patch_size(); if (current_pc < last_lazy_deopt_pc_ + patch_size) { + // Block literal pool emission for duration of padding. + Assembler::BlockConstPoolScope block_const_pool(masm()); int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; ASSERT_EQ(0, padding_size % Assembler::kInstrSize); while (padding_size > 0) { @@ -4954,7 +5222,7 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { Register strict = scratch0(); __ mov(strict, Operand(Smi::FromInt(strict_mode_flag()))); __ Push(object, key, strict); - ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); + ASSERT(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); SafepointGenerator safepoint_generator( @@ -4967,7 +5235,7 @@ void LCodeGen::DoIn(LIn* instr) { Register obj = ToRegister(instr->object()); Register key = ToRegister(instr->key()); __ Push(key, obj); - ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); + ASSERT(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); @@ -5017,7 +5285,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { ASSERT(instr->hydrogen()->is_backwards_branch()); // Perform stack overflow check if this goto needs it before jumping. DeferredStackCheck* deferred_stack_check = - new DeferredStackCheck(this, instr); + new(zone()) DeferredStackCheck(this, instr); __ LoadRoot(ip, Heap::kStackLimitRootIndex); __ cmp(sp, Operand(ip)); __ b(lo, deferred_stack_check->entry()); diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index adb6e1bb73..f35c69b8a3 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -43,22 +43,26 @@ class SafepointGenerator; class LCodeGen BASE_EMBEDDED { public: - LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) + LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info, + Zone* zone) : chunk_(chunk), masm_(assembler), info_(info), current_block_(-1), current_instruction_(-1), instructions_(chunk->instructions()), - deoptimizations_(4), - deopt_jump_table_(4), - deoptimization_literals_(8), + deoptimizations_(4, zone), + deopt_jump_table_(4, zone), + deoptimization_literals_(8, zone), inlined_function_count_(0), scope_(info->scope()), status_(UNUSED), - deferred_(8), + translations_(zone), + deferred_(8, zone), osr_pc_offset_(-1), last_lazy_deopt_pc_(0), + safepoints_(zone), + zone_(zone), resolver_(this), expected_safepoint_kind_(Safepoint::kSimple) { PopulateDeoptimizationLiteralsWithInlinedFunctions(); @@ -71,6 +75,7 @@ class LCodeGen BASE_EMBEDDED { Isolate* isolate() const { return info_->isolate(); } Factory* factory() const { return isolate()->factory(); } Heap* heap() const { return isolate()->heap(); } + Zone* zone() const { return zone_; } // Support for converting LOperands to assembler types. // LOperand must be a register. @@ -176,7 +181,7 @@ class LCodeGen BASE_EMBEDDED { void Abort(const char* format, ...); void Comment(const char* format, ...); - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); } + void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } // Code generation passes. Returns true if code generation should // continue. @@ -215,12 +220,18 @@ class LCodeGen BASE_EMBEDDED { int argc, LInstruction* instr); + enum R1State { + R1_UNINITIALIZED, + R1_CONTAINS_TARGET + }; + // Generate a direct call to a known function. Expects the function // to be in r1. void CallKnownFunction(Handle<JSFunction> function, int arity, LInstruction* instr, - CallKind call_kind); + CallKind call_kind, + R1State r1_state); void LoadHeapObject(Register result, Handle<HeapObject> object); @@ -308,7 +319,8 @@ class LCodeGen BASE_EMBEDDED { void EmitLoadFieldOrConstantFunction(Register result, Register object, Handle<Map> type, - Handle<String> name); + Handle<String> name, + LEnvironment* env); // Emits optimized code to deep-copy the contents of statically known // object graphs (e.g. object literal boilerplate). @@ -317,6 +329,17 @@ class LCodeGen BASE_EMBEDDED { Register source, int* offset); + // Emit optimized code for integer division. + // Inputs are signed. + // All registers are clobbered. + // If 'remainder' is no_reg, it is not computed. + void EmitSignedIntegerDivisionByConstant(Register result, + Register dividend, + int32_t divisor, + Register remainder, + Register scratch, + LEnvironment* environment); + struct JumpTableEntry { explicit inline JumpTableEntry(Address entry) : label(), @@ -349,6 +372,8 @@ class LCodeGen BASE_EMBEDDED { // itself is emitted at the end of the generated code. SafepointTableBuilder safepoints_; + Zone* zone_; + // Compiler from a set of parallel moves to a sequential list of moves. LGapResolver resolver_; diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc index cefca476ad..c100720d89 100644 --- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc +++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc @@ -36,7 +36,7 @@ namespace internal { static const Register kSavedValueRegister = { 9 }; LGapResolver::LGapResolver(LCodeGen* owner) - : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false), + : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false), saved_destination_(NULL) { } @@ -79,7 +79,7 @@ void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); for (int i = 0; i < moves->length(); ++i) { LMoveOperands move = moves->at(i); - if (!move.IsRedundant()) moves_.Add(move); + if (!move.IsRedundant()) moves_.Add(move, cgen_->zone()); } Verify(); } diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 857c2bf770..7c49e9e58a 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -1868,10 +1868,12 @@ void MacroAssembler::CompareRoot(Register obj, void MacroAssembler::CheckFastElements(Register map, Register scratch, Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); - cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); + cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); b(hi, fail); } @@ -1879,22 +1881,25 @@ void MacroAssembler::CheckFastElements(Register map, void MacroAssembler::CheckFastObjectElements(Register map, Register scratch, Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); - cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); + cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); b(ls, fail); - cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); + cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); b(hi, fail); } -void MacroAssembler::CheckFastSmiOnlyElements(Register map, - Register scratch, - Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); +void MacroAssembler::CheckFastSmiElements(Register map, + Register scratch, + Label* fail) { + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); - cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); + cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); b(hi, fail); } @@ -1995,24 +2000,27 @@ void MacroAssembler::CompareMap(Register obj, Label* early_success, CompareMapMode mode) { ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); - cmp(scratch, Operand(map)); - if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { - Map* transitioned_fast_element_map( - map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL)); - ASSERT(transitioned_fast_element_map == NULL || - map->elements_kind() != FAST_ELEMENTS); - if (transitioned_fast_element_map != NULL) { - b(eq, early_success); - cmp(scratch, Operand(Handle<Map>(transitioned_fast_element_map))); - } + CompareMap(scratch, map, early_success, mode); +} + - Map* transitioned_double_map( - map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL)); - ASSERT(transitioned_double_map == NULL || - map->elements_kind() == FAST_SMI_ONLY_ELEMENTS); - if (transitioned_double_map != NULL) { - b(eq, early_success); - cmp(scratch, Operand(Handle<Map>(transitioned_double_map))); +void MacroAssembler::CompareMap(Register obj_map, + Handle<Map> map, + Label* early_success, + CompareMapMode mode) { + cmp(obj_map, Operand(map)); + if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { + ElementsKind kind = map->elements_kind(); + if (IsFastElementsKind(kind)) { + bool packed = IsFastPackedElementsKind(kind); + Map* current_map = *map; + while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) { + kind = GetNextMoreGeneralFastElementsKind(kind, packed); + current_map = current_map->LookupElementsTransitionMap(kind); + if (!current_map) break; + b(eq, early_success); + cmp(obj_map, Operand(Handle<Map>(current_map))); + } } } } @@ -2865,28 +2873,38 @@ void MacroAssembler::LoadTransitionedArrayMapConditional( ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset)); // Check that the function's map is the same as the expected cached map. - int expected_index = - Context::GetContextMapIndexFromElementsKind(expected_kind); - ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index))); - cmp(map_in_out, ip); + ldr(scratch, + MemOperand(scratch, + Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); + size_t offset = expected_kind * kPointerSize + + FixedArrayBase::kHeaderSize; + cmp(map_in_out, scratch); b(ne, no_map_match); // Use the transitioned cached map. - int trans_index = - Context::GetContextMapIndexFromElementsKind(transitioned_kind); - ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index))); + offset = transitioned_kind * kPointerSize + + FixedArrayBase::kHeaderSize; + ldr(map_in_out, FieldMemOperand(scratch, offset)); } void MacroAssembler::LoadInitialArrayMap( - Register function_in, Register scratch, Register map_out) { + Register function_in, Register scratch, + Register map_out, bool can_have_holes) { ASSERT(!function_in.is(map_out)); Label done; ldr(map_out, FieldMemOperand(function_in, JSFunction::kPrototypeOrInitialMapOffset)); if (!FLAG_smi_only_arrays) { - LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, - FAST_ELEMENTS, + ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; + LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + kind, + map_out, + scratch, + &done); + } else if (can_have_holes) { + LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + FAST_HOLEY_SMI_ELEMENTS, map_out, scratch, &done); @@ -3710,22 +3728,35 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { } -bool AreAliased(Register r1, Register r2, Register r3, Register r4) { - if (r1.is(r2)) return true; - if (r1.is(r3)) return true; - if (r1.is(r4)) return true; - if (r2.is(r3)) return true; - if (r2.is(r4)) return true; - if (r3.is(r4)) return true; - return false; +#ifdef DEBUG +bool AreAliased(Register reg1, + Register reg2, + Register reg3, + Register reg4, + Register reg5, + Register reg6) { + int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + + reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid(); + + RegList regs = 0; + if (reg1.is_valid()) regs |= reg1.bit(); + if (reg2.is_valid()) regs |= reg2.bit(); + if (reg3.is_valid()) regs |= reg3.bit(); + if (reg4.is_valid()) regs |= reg4.bit(); + if (reg5.is_valid()) regs |= reg5.bit(); + if (reg6.is_valid()) regs |= reg6.bit(); + int n_of_non_aliasing_regs = NumRegs(regs); + + return n_of_valid_regs != n_of_non_aliasing_regs; } +#endif CodePatcher::CodePatcher(byte* address, int instructions) : address_(address), instructions_(instructions), size_(instructions * Assembler::kInstrSize), - masm_(Isolate::Current(), address, size_ + Assembler::kGap) { + masm_(NULL, address, size_ + Assembler::kGap) { // Create a new macro assembler pointing to the address of the code to patch. // The size is adjusted with kGap on order for the assembler to generate size // bytes of instructions without failing with buffer size constraints. diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 47afa93a6e..6b7d116357 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -85,7 +85,14 @@ enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; -bool AreAliased(Register r1, Register r2, Register r3, Register r4); +#ifdef DEBUG +bool AreAliased(Register reg1, + Register reg2, + Register reg3 = no_reg, + Register reg4 = no_reg, + Register reg5 = no_reg, + Register reg6 = no_reg); +#endif // MacroAssembler implements a collection of frequently used macros. @@ -505,7 +512,8 @@ class MacroAssembler: public Assembler { // Load the initial map for new Arrays from a JSFunction. void LoadInitialArrayMap(Register function_in, Register scratch, - Register map_out); + Register map_out, + bool can_have_holes); void LoadGlobalFunction(int index, Register function); @@ -795,9 +803,9 @@ class MacroAssembler: public Assembler { // Check if a map for a JSObject indicates that the object has fast smi only // elements. Jump to the specified label if it does not. - void CheckFastSmiOnlyElements(Register map, - Register scratch, - Label* fail); + void CheckFastSmiElements(Register map, + Register scratch, + Label* fail); // Check to see if maybe_number can be stored as a double in // FastDoubleElements. If it can, store it at the index specified by key in @@ -823,6 +831,13 @@ class MacroAssembler: public Assembler { Label* early_success, CompareMapMode mode = REQUIRE_EXACT_MAP); + // As above, but the map of the object is already loaded into the register + // which is preserved by the code generated. + void CompareMap(Register obj_map, + Handle<Map> map, + Label* early_success, + CompareMapMode mode = REQUIRE_EXACT_MAP); + // Check if the map of an object is equal to a specified map and branch to // label if not. Skip the smi check if not required (object is known to be a // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match @@ -1321,7 +1336,6 @@ class MacroAssembler: public Assembler { }; -#ifdef ENABLE_DEBUGGER_SUPPORT // The code patcher is used to patch (typically) small parts of code e.g. for // debugging and other types of instrumentation. When using the code patcher // the exact number of bytes specified must be emitted. It is not legal to emit @@ -1351,7 +1365,6 @@ class CodePatcher { int size_; // Number of bytes of the expected patch size. MacroAssembler masm_; // Macro assembler used to generate the code. }; -#endif // ENABLE_DEBUGGER_SUPPORT // ----------------------------------------------------------------------------- diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index 10ff2dd96c..66cdd8435e 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -43,45 +43,49 @@ namespace internal { #ifndef V8_INTERPRETED_REGEXP /* * This assembler uses the following register assignment convention + * - r4 : Temporarily stores the index of capture start after a matching pass + * for a global regexp. * - r5 : Pointer to current code object (Code*) including heap object tag. * - r6 : Current position in input, as negative offset from end of string. * Please notice that this is the byte offset, not the character offset! * - r7 : Currently loaded character. Must be loaded using * LoadCurrentCharacter before using any of the dispatch methods. - * - r8 : points to tip of backtrack stack + * - r8 : Points to tip of backtrack stack * - r9 : Unused, might be used by C code and expected unchanged. * - r10 : End of input (points to byte after last character in input). * - r11 : Frame pointer. Used to access arguments, local variables and * RegExp registers. * - r12 : IP register, used by assembler. Very volatile. - * - r13/sp : points to tip of C stack. + * - r13/sp : Points to tip of C stack. * * The remaining registers are free for computations. * Each call to a public method should retain this convention. * * The stack will have the following structure: - * - fp[52] Isolate* isolate (Address of the current isolate) - * - fp[48] direct_call (if 1, direct call from JavaScript code, - * if 0, call through the runtime system). - * - fp[44] stack_area_base (High end of the memory area to use as - * backtracking stack). + * - fp[56] Isolate* isolate (address of the current isolate) + * - fp[52] direct_call (if 1, direct call from JavaScript code, + * if 0, call through the runtime system). + * - fp[48] stack_area_base (high end of the memory area to use as + * backtracking stack). + * - fp[44] capture array size (may fit multiple sets of matches) * - fp[40] int* capture_array (int[num_saved_registers_], for output). * - fp[36] secondary link/return address used by native call. * --- sp when called --- - * - fp[32] return address (lr). - * - fp[28] old frame pointer (r11). + * - fp[32] return address (lr). + * - fp[28] old frame pointer (r11). * - fp[0..24] backup of registers r4..r10. * --- frame pointer ---- - * - fp[-4] end of input (Address of end of string). - * - fp[-8] start of input (Address of first character in string). + * - fp[-4] end of input (address of end of string). + * - fp[-8] start of input (address of first character in string). * - fp[-12] start index (character index of start). * - fp[-16] void* input_string (location of a handle containing the string). - * - fp[-20] Offset of location before start of input (effectively character + * - fp[-20] success counter (only for global regexps to count matches). + * - fp[-24] Offset of location before start of input (effectively character * position -1). Used to initialize capture registers to a * non-position. - * - fp[-24] At start (if 1, we are starting at the start of the + * - fp[-28] At start (if 1, we are starting at the start of the * string, otherwise 0) - * - fp[-28] register 0 (Only positions must be stored in the first + * - fp[-32] register 0 (Only positions must be stored in the first * - register 1 num_saved_registers_ registers) * - ... * - register num_registers-1 @@ -115,8 +119,10 @@ namespace internal { RegExpMacroAssemblerARM::RegExpMacroAssemblerARM( Mode mode, - int registers_to_save) - : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)), + int registers_to_save, + Zone* zone) + : NativeRegExpMacroAssembler(zone), + masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)), mode_(mode), num_registers_(registers_to_save), num_saved_registers_(registers_to_save), @@ -197,9 +203,9 @@ void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) { void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) { Label not_at_start; // Did we start the match at the start of the string at all? - __ ldr(r0, MemOperand(frame_pointer(), kAtStart)); + __ ldr(r0, MemOperand(frame_pointer(), kStartIndex)); __ cmp(r0, Operand(0, RelocInfo::NONE)); - BranchOrBacktrack(eq, ¬_at_start); + BranchOrBacktrack(ne, ¬_at_start); // If we did, are we still at the start of the input? __ ldr(r1, MemOperand(frame_pointer(), kInputStart)); @@ -212,9 +218,9 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) { void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) { // Did we start the match at the start of the string at all? - __ ldr(r0, MemOperand(frame_pointer(), kAtStart)); + __ ldr(r0, MemOperand(frame_pointer(), kStartIndex)); __ cmp(r0, Operand(0, RelocInfo::NONE)); - BranchOrBacktrack(eq, on_not_at_start); + BranchOrBacktrack(ne, on_not_at_start); // If we did, are we still at the start of the input? __ ldr(r1, MemOperand(frame_pointer(), kInputStart)); __ add(r0, end_of_input_address(), Operand(current_input_offset())); @@ -432,16 +438,6 @@ void RegExpMacroAssemblerARM::CheckNotBackReference( } -void RegExpMacroAssemblerARM::CheckNotRegistersEqual(int reg1, - int reg2, - Label* on_not_equal) { - __ ldr(r0, register_location(reg1)); - __ ldr(r1, register_location(reg2)); - __ cmp(r0, r1); - BranchOrBacktrack(ne, on_not_equal); -} - - void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c, Label* on_not_equal) { __ cmp(current_character(), Operand(c)); @@ -452,8 +448,12 @@ void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c, void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c, uint32_t mask, Label* on_equal) { - __ and_(r0, current_character(), Operand(mask)); - __ cmp(r0, Operand(c)); + if (c == 0) { + __ tst(current_character(), Operand(mask)); + } else { + __ and_(r0, current_character(), Operand(mask)); + __ cmp(r0, Operand(c)); + } BranchOrBacktrack(eq, on_equal); } @@ -461,8 +461,12 @@ void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c, void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(unsigned c, unsigned mask, Label* on_not_equal) { - __ and_(r0, current_character(), Operand(mask)); - __ cmp(r0, Operand(c)); + if (c == 0) { + __ tst(current_character(), Operand(mask)); + } else { + __ and_(r0, current_character(), Operand(mask)); + __ cmp(r0, Operand(c)); + } BranchOrBacktrack(ne, on_not_equal); } @@ -480,6 +484,44 @@ void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd( } +void RegExpMacroAssemblerARM::CheckCharacterInRange( + uc16 from, + uc16 to, + Label* on_in_range) { + __ sub(r0, current_character(), Operand(from)); + __ cmp(r0, Operand(to - from)); + BranchOrBacktrack(ls, on_in_range); // Unsigned lower-or-same condition. +} + + +void RegExpMacroAssemblerARM::CheckCharacterNotInRange( + uc16 from, + uc16 to, + Label* on_not_in_range) { + __ sub(r0, current_character(), Operand(from)); + __ cmp(r0, Operand(to - from)); + BranchOrBacktrack(hi, on_not_in_range); // Unsigned higher condition. +} + + +void RegExpMacroAssemblerARM::CheckBitInTable( + Handle<ByteArray> table, + Label* on_bit_set) { + __ mov(r0, Operand(table)); + if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) { + __ and_(r1, current_character(), Operand(kTableSize - 1)); + __ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag)); + } else { + __ add(r1, + current_character(), + Operand(ByteArray::kHeaderSize - kHeapObjectTag)); + } + __ ldrb(r0, MemOperand(r0, r1)); + __ cmp(r0, Operand(0)); + BranchOrBacktrack(ne, on_bit_set); +} + + bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type, Label* on_no_match) { // Range checks (c in min..max) are generally implemented by an unsigned @@ -609,6 +651,7 @@ void RegExpMacroAssemblerARM::Fail() { Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { + Label return_r0; // Finalize code - write the entry point code now we know how many // registers we need. @@ -632,8 +675,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { // Set frame pointer in space for it if this is not a direct call // from generated code. __ add(frame_pointer(), sp, Operand(4 * kPointerSize)); + __ mov(r0, Operand(0, RelocInfo::NONE)); + __ push(r0); // Make room for success counter and initialize it to 0. __ push(r0); // Make room for "position - 1" constant (value is irrelevant). - __ push(r0); // Make room for "at start" constant (value is irrelevant). // Check if we have space on the stack for registers. Label stack_limit_hit; Label stack_ok; @@ -652,13 +696,13 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { // Exit with OutOfMemory exception. There is not enough space on the stack // for our working registers. __ mov(r0, Operand(EXCEPTION)); - __ jmp(&exit_label_); + __ jmp(&return_r0); __ bind(&stack_limit_hit); CallCheckStackGuardState(r0); __ cmp(r0, Operand(0, RelocInfo::NONE)); // If returned value is non-zero, we exit with the returned value as result. - __ b(ne, &exit_label_); + __ b(ne, &return_r0); __ bind(&stack_ok); @@ -679,41 +723,45 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { // position registers. __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne)); - // Determine whether the start index is zero, that is at the start of the - // string, and store that value in a local variable. - __ cmp(r1, Operand(0)); - __ mov(r1, Operand(1), LeaveCC, eq); - __ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne); - __ str(r1, MemOperand(frame_pointer(), kAtStart)); + // Initialize code pointer register + __ mov(code_pointer(), Operand(masm_->CodeObject())); + Label load_char_start_regexp, start_regexp; + // Load newline if index is at start, previous character otherwise. + __ cmp(r1, Operand(0, RelocInfo::NONE)); + __ b(ne, &load_char_start_regexp); + __ mov(current_character(), Operand('\n'), LeaveCC, eq); + __ jmp(&start_regexp); + + // Global regexp restarts matching here. + __ bind(&load_char_start_regexp); + // Load previous char as initial value of current character register. + LoadCurrentCharacterUnchecked(-1, 1); + __ bind(&start_regexp); + + // Initialize on-stack registers. if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. // Fill saved registers with initial value = start offset - 1 - - // Address of register 0. - __ add(r1, frame_pointer(), Operand(kRegisterZero)); - __ mov(r2, Operand(num_saved_registers_)); - Label init_loop; - __ bind(&init_loop); - __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex)); - __ sub(r2, r2, Operand(1), SetCC); - __ b(ne, &init_loop); + if (num_saved_registers_ > 8) { + // Address of register 0. + __ add(r1, frame_pointer(), Operand(kRegisterZero)); + __ mov(r2, Operand(num_saved_registers_)); + Label init_loop; + __ bind(&init_loop); + __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex)); + __ sub(r2, r2, Operand(1), SetCC); + __ b(ne, &init_loop); + } else { + for (int i = 0; i < num_saved_registers_; i++) { + __ str(r0, register_location(i)); + } + } } // Initialize backtrack stack pointer. __ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd)); - // Initialize code pointer register - __ mov(code_pointer(), Operand(masm_->CodeObject())); - // Load previous char as initial value of current character register. - Label at_start; - __ ldr(r0, MemOperand(frame_pointer(), kAtStart)); - __ cmp(r0, Operand(0, RelocInfo::NONE)); - __ b(ne, &at_start); - LoadCurrentCharacterUnchecked(-1, 1); // Load previous char. - __ jmp(&start_label_); - __ bind(&at_start); - __ mov(current_character(), Operand('\n')); - __ jmp(&start_label_); + __ jmp(&start_label_); // Exit code: if (success_label_.is_linked()) { @@ -740,6 +788,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { for (int i = 0; i < num_saved_registers_; i += 2) { __ ldr(r2, register_location(i)); __ ldr(r3, register_location(i + 1)); + if (i == 0 && global_with_zero_length_check()) { + // Keep capture start in r4 for the zero-length check later. + __ mov(r4, r2); + } if (mode_ == UC16) { __ add(r2, r1, Operand(r2, ASR, 1)); __ add(r3, r1, Operand(r3, ASR, 1)); @@ -751,10 +803,58 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); } } - __ mov(r0, Operand(SUCCESS)); + + if (global()) { + // Restart matching if the regular expression is flagged as global. + __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures)); + __ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegisters)); + __ ldr(r2, MemOperand(frame_pointer(), kRegisterOutput)); + // Increment success counter. + __ add(r0, r0, Operand(1)); + __ str(r0, MemOperand(frame_pointer(), kSuccessfulCaptures)); + // Capture results have been stored, so the number of remaining global + // output registers is reduced by the number of stored captures. + __ sub(r1, r1, Operand(num_saved_registers_)); + // Check whether we have enough room for another set of capture results. + __ cmp(r1, Operand(num_saved_registers_)); + __ b(lt, &return_r0); + + __ str(r1, MemOperand(frame_pointer(), kNumOutputRegisters)); + // Advance the location for output. + __ add(r2, r2, Operand(num_saved_registers_ * kPointerSize)); + __ str(r2, MemOperand(frame_pointer(), kRegisterOutput)); + + // Prepare r0 to initialize registers with its value in the next run. + __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne)); + + if (global_with_zero_length_check()) { + // Special case for zero-length matches. + // r4: capture start index + __ cmp(current_input_offset(), r4); + // Not a zero-length match, restart. + __ b(ne, &load_char_start_regexp); + // Offset from the end is zero if we already reached the end. + __ cmp(current_input_offset(), Operand(0)); + __ b(eq, &exit_label_); + // Advance current position after a zero-length match. + __ add(current_input_offset(), + current_input_offset(), + Operand((mode_ == UC16) ? 2 : 1)); + } + + __ b(&load_char_start_regexp); + } else { + __ mov(r0, Operand(SUCCESS)); + } } + // Exit and return r0 __ bind(&exit_label_); + if (global()) { + __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures)); + } + + __ bind(&return_r0); // Skip sp past regexp registers and local variables.. __ mov(sp, frame_pointer()); // Restore registers r4..r11 and return (restoring lr to pc). @@ -776,7 +876,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { __ cmp(r0, Operand(0, RelocInfo::NONE)); // If returning non-zero, we should end execution with the given // result as return value. - __ b(ne, &exit_label_); + __ b(ne, &return_r0); // String might have moved: Reload end of string from frame. __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); @@ -813,7 +913,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { __ bind(&exit_with_exception); // Exit with Result EXCEPTION(-1) to signal thrown exception. __ mov(r0, Operand(EXCEPTION)); - __ jmp(&exit_label_); + __ jmp(&return_r0); } CodeDesc code_desc; @@ -968,8 +1068,9 @@ void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) { } -void RegExpMacroAssemblerARM::Succeed() { +bool RegExpMacroAssemblerARM::Succeed() { __ jmp(&success_label_); + return global(); } @@ -1261,8 +1362,9 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset, int characters) { Register offset = current_input_offset(); if (cp_offset != 0) { - __ add(r0, current_input_offset(), Operand(cp_offset * char_size())); - offset = r0; + // r4 is not being used to store the capture start index at this point. + __ add(r4, current_input_offset(), Operand(cp_offset * char_size())); + offset = r4; } // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU // and the operating system running on the target allow it. diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h index 5c8ed0693f..9bebb4d406 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.h +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -45,7 +45,7 @@ class RegExpMacroAssemblerARM: public RegExpMacroAssembler { #else // V8_INTERPRETED_REGEXP class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { public: - RegExpMacroAssemblerARM(Mode mode, int registers_to_save); + RegExpMacroAssemblerARM(Mode mode, int registers_to_save, Zone* zone); virtual ~RegExpMacroAssemblerARM(); virtual int stack_limit_slack(); virtual void AdvanceCurrentPosition(int by); @@ -70,7 +70,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { virtual void CheckNotBackReference(int start_reg, Label* on_no_match); virtual void CheckNotBackReferenceIgnoreCase(int start_reg, Label* on_no_match); - virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal); virtual void CheckNotCharacter(unsigned c, Label* on_not_equal); virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask, @@ -79,6 +78,14 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { uc16 minus, uc16 mask, Label* on_not_equal); + virtual void CheckCharacterInRange(uc16 from, + uc16 to, + Label* on_in_range); + virtual void CheckCharacterNotInRange(uc16 from, + uc16 to, + Label* on_not_in_range); + virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set); + // Checks whether the given offset from the current position is before // the end of the string. virtual void CheckPosition(int cp_offset, Label* on_outside_input); @@ -105,7 +112,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { virtual void ReadStackPointerFromRegister(int reg); virtual void SetCurrentPositionFromEnd(int by); virtual void SetRegister(int register_index, int to); - virtual void Succeed(); + virtual bool Succeed(); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void ClearRegisters(int reg_from, int reg_to); virtual void WriteStackPointerToRegister(int reg); @@ -129,7 +136,8 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize; // Stack parameters placed by caller. static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize; - static const int kStackHighEnd = kRegisterOutput + kPointerSize; + static const int kNumOutputRegisters = kRegisterOutput + kPointerSize; + static const int kStackHighEnd = kNumOutputRegisters + kPointerSize; static const int kDirectCall = kStackHighEnd + kPointerSize; static const int kIsolate = kDirectCall + kPointerSize; @@ -141,10 +149,10 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { static const int kInputString = kStartIndex - kPointerSize; // When adding local variables remember to push space for them in // the frame in GetCode. - static const int kInputStartMinusOne = kInputString - kPointerSize; - static const int kAtStart = kInputStartMinusOne - kPointerSize; + static const int kSuccessfulCaptures = kInputString - kPointerSize; + static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize; // First register address. Following registers are below it on the stack. - static const int kRegisterZero = kAtStart - kPointerSize; + static const int kRegisterZero = kInputStartMinusOne - kPointerSize; // Initial size of code buffer. static const size_t kRegExpCodeSize = 1024; diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index 585f1e0176..d1cad15bd0 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -49,16 +49,16 @@ namespace internal { (entry(p0, p1, p2, p3, p4)) typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*, - void*, int*, Address, int, Isolate*); + void*, int*, int, Address, int, Isolate*); // Call the generated regexp code directly. The code at the entry address // should act as a function matching the type arm_regexp_matcher. // The fifth argument is a dummy that reserves the space used for // the return address added by the ExitFrame in native calls. -#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ (FUNCTION_CAST<arm_regexp_matcher>(entry)( \ - p0, p1, p2, p3, NULL, p4, p5, p6, p7)) + p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ reinterpret_cast<TryCatch*>(try_catch_address) @@ -401,9 +401,9 @@ class Simulator { reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \ FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4)) -#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ Simulator::current(Isolate::Current())->Call( \ - entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7) + entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ try_catch_address == NULL ? \ diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index d514b607ae..dd9de23fa4 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -435,22 +435,59 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, Handle<JSObject> object, int index, Handle<Map> transition, + Handle<String> name, Register receiver_reg, Register name_reg, - Register scratch, + Register scratch1, + Register scratch2, Label* miss_label) { // r0 : value Label exit; + LookupResult lookup(masm->isolate()); + object->Lookup(*name, &lookup); + if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) { + // In sloppy mode, we could just return the value and be done. However, we + // might be in strict mode, where we have to throw. Since we cannot tell, + // go into slow case unconditionally. + __ jmp(miss_label); + return; + } + // Check that the map of the object hasn't changed. CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS : REQUIRE_EXACT_MAP; - __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label, + __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label, DO_SMI_CHECK, mode); // Perform global security token check if needed. if (object->IsJSGlobalProxy()) { - __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label); + __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label); + } + + // Check that we are allowed to write this. + if (!transition.is_null() && object->GetPrototype()->IsJSObject()) { + JSObject* holder; + if (lookup.IsFound()) { + holder = lookup.holder(); + } else { + // Find the top object. + holder = *object; + do { + holder = JSObject::cast(holder->GetPrototype()); + } while (holder->GetPrototype()->IsJSObject()); + } + // We need an extra register, push + __ push(name_reg); + Label miss_pop, done_check; + CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg, + scratch1, scratch2, name, &miss_pop); + __ jmp(&done_check); + __ bind(&miss_pop); + __ pop(name_reg); + __ jmp(miss_label); + __ bind(&done_check); + __ pop(name_reg); } // Stub never generated for non-global objects that require access @@ -473,10 +510,20 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, } if (!transition.is_null()) { - // Update the map of the object; no write barrier updating is - // needed because the map is never in new space. - __ mov(ip, Operand(transition)); - __ str(ip, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); + // Update the map of the object. + __ mov(scratch1, Operand(transition)); + __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); + + // Update the write barrier for the map field and pass the now unused + // name_reg as scratch register. + __ RecordWriteField(receiver_reg, + HeapObject::kMapOffset, + scratch1, + name_reg, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); } // Adjust for the number of properties stored in the object. Even in the @@ -498,15 +545,16 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, __ RecordWriteField(receiver_reg, offset, name_reg, - scratch, + scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs); } else { // Write to the properties array. int offset = index * kPointerSize + FixedArray::kHeaderSize; // Get the properties array - __ ldr(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); - __ str(r0, FieldMemOperand(scratch, offset)); + __ ldr(scratch1, + FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); + __ str(r0, FieldMemOperand(scratch1, offset)); // Skip updating write barrier if storing a smi. __ JumpIfSmi(r0, &exit); @@ -514,7 +562,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Ok to clobber receiver_reg and name_reg, since we return. __ mov(name_reg, r0); - __ RecordWriteField(scratch, + __ RecordWriteField(scratch1, offset, name_reg, receiver_reg, @@ -582,6 +630,8 @@ static void PushInterceptorArguments(MacroAssembler* masm, __ push(holder); __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset)); __ push(scratch); + __ mov(scratch, Operand(ExternalReference::isolate_address())); + __ push(scratch); } @@ -596,7 +646,7 @@ static void CompileCallLoadPropertyWithInterceptor( ExternalReference ref = ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly), masm->isolate()); - __ mov(r0, Operand(5)); + __ mov(r0, Operand(6)); __ mov(r1, Operand(ref)); CEntryStub stub(1); @@ -604,9 +654,9 @@ static void CompileCallLoadPropertyWithInterceptor( } -static const int kFastApiCallArguments = 3; +static const int kFastApiCallArguments = 4; -// Reserves space for the extra arguments to FastHandleApiCall in the +// Reserves space for the extra arguments to API function in the // caller's frame. // // These arguments are set by CheckPrototypes and GenerateFastApiDirectCall. @@ -632,7 +682,8 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, // -- sp[0] : holder (set by CheckPrototypes) // -- sp[4] : callee JS function // -- sp[8] : call data - // -- sp[12] : last JS argument + // -- sp[12] : isolate + // -- sp[16] : last JS argument // -- ... // -- sp[(argc + 3) * 4] : first JS argument // -- sp[(argc + 4) * 4] : receiver @@ -642,7 +693,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, __ LoadHeapObject(r5, function); __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset)); - // Pass the additional arguments FastHandleApiCall expects. + // Pass the additional arguments. Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); Handle<Object> call_data(api_call_info->data()); if (masm->isolate()->heap()->InNewSpace(*call_data)) { @@ -651,13 +702,15 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, } else { __ Move(r6, call_data); } - // Store JS function and call data. - __ stm(ib, sp, r5.bit() | r6.bit()); + __ mov(r7, Operand(ExternalReference::isolate_address())); + // Store JS function, call data and isolate. + __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit()); - // r2 points to call data as expected by Arguments - // (refer to layout above). - __ add(r2, sp, Operand(2 * kPointerSize)); + // Prepare arguments. + __ add(r2, sp, Operand(3 * kPointerSize)); + // Allocate the v8::Arguments structure in the arguments' space since + // it's not controlled by GC. const int kApiStackSpace = 4; FrameScope frame_scope(masm, StackFrame::MANUAL); @@ -666,9 +719,9 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, // r0 = v8::Arguments& // Arguments is after the return address. __ add(r0, sp, Operand(1 * kPointerSize)); - // v8::Arguments::implicit_args = data + // v8::Arguments::implicit_args_ __ str(r2, MemOperand(r0, 0 * kPointerSize)); - // v8::Arguments::values = last argument + // v8::Arguments::values_ __ add(ip, r2, Operand(argc * kPointerSize)); __ str(ip, MemOperand(r0, 1 * kPointerSize)); // v8::Arguments::length_ = argc @@ -845,7 +898,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { __ CallExternalReference( ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall), masm->isolate()), - 5); + 6); // Restore the name_ register. __ pop(name_); // Leave the internal frame. @@ -1204,7 +1257,9 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object, } else { __ Move(scratch3, Handle<Object>(callback->data())); } - __ Push(reg, scratch3, name_reg); + __ Push(reg, scratch3); + __ mov(scratch3, Operand(ExternalReference::isolate_address())); + __ Push(scratch3, name_reg); __ mov(r0, sp); // r0 = Handle<String> const int kApiStackSpace = 1; @@ -1216,7 +1271,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object, __ str(scratch2, MemOperand(sp, 1 * kPointerSize)); __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo& - const int kStackUnwindSpace = 4; + const int kStackUnwindSpace = 5; Address getter_address = v8::ToCData<Address>(callback->getter()); ApiFunction fun(getter_address); ExternalReference ref = @@ -1252,8 +1307,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, compile_followup_inline = true; } else if (lookup->type() == CALLBACKS && lookup->GetCallbackObject()->IsAccessorInfo()) { - compile_followup_inline = - AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL; + AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject()); + compile_followup_inline = callback->getter() != NULL && + callback->IsCompatibleReceiver(*object); } } @@ -1344,20 +1400,19 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, if (!receiver.is(holder_reg)) { ASSERT(scratch1.is(holder_reg)); __ Push(receiver, holder_reg); - __ ldr(scratch3, - FieldMemOperand(scratch2, AccessorInfo::kDataOffset)); - __ Push(scratch3, scratch2, name_reg); } else { __ push(receiver); - __ ldr(scratch3, - FieldMemOperand(scratch2, AccessorInfo::kDataOffset)); - __ Push(holder_reg, scratch3, scratch2, name_reg); + __ push(holder_reg); } + __ ldr(scratch3, + FieldMemOperand(scratch2, AccessorInfo::kDataOffset)); + __ mov(scratch1, Operand(ExternalReference::isolate_address())); + __ Push(scratch3, scratch1, scratch2, name_reg); ExternalReference ref = ExternalReference(IC_Utility(IC::kLoadCallbackProperty), masm()->isolate()); - __ TailCallExternalReference(ref, 5, 1); + __ TailCallExternalReference(ref, 6, 1); } } else { // !compile_followup_inline // Call the runtime system to load the interceptor. @@ -1371,7 +1426,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, ExternalReference ref = ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate()); - __ TailCallExternalReference(ref, 5, 1); + __ TailCallExternalReference(ref, 6, 1); } } @@ -1575,16 +1630,29 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ jmp(&fast_object); // In case of fast smi-only, convert to fast object, otherwise bail out. __ bind(¬_fast_object); - __ CheckFastSmiOnlyElements(r3, r7, &call_builtin); + __ CheckFastSmiElements(r3, r7, &call_builtin); // edx: receiver // r3: map - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + Label try_holey_map; + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, r3, r7, + &try_holey_map); + __ mov(r2, receiver); + ElementsTransitionGenerator:: + GenerateMapChangeElementsTransition(masm()); + __ jmp(&fast_object); + + __ bind(&try_holey_map); + __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS, + FAST_HOLEY_ELEMENTS, + r3, + r7, &call_builtin); __ mov(r2, receiver); - ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm()); + ElementsTransitionGenerator:: + GenerateMapChangeElementsTransition(masm()); __ bind(&fast_object); } else { __ CheckFastObjectElements(r3, r3, &call_builtin); @@ -1739,7 +1807,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall( // We can't address the last element in one operation. Compute the more // expensive shift first, and use an offset later on. __ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ ldr(r0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag)); + __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize)); __ cmp(r0, r6); __ b(eq, &call_builtin); @@ -1747,7 +1815,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall( __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); // Fill with the hole. - __ str(r6, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag)); + __ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize)); __ Drop(argc + 1); __ Ret(); @@ -2539,7 +2607,13 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object, // ----------------------------------- Label miss; - GenerateStoreField(masm(), object, index, transition, r1, r2, r3, &miss); + GenerateStoreField(masm(), + object, + index, + transition, + name, + r1, r2, r3, r4, + &miss); __ bind(&miss); Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss(); __ Jump(ic, RelocInfo::CODE_TARGET); @@ -2594,6 +2668,51 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback( } +Handle<Code> StoreStubCompiler::CompileStoreViaSetter( + Handle<JSObject> receiver, + Handle<JSFunction> setter, + Handle<String> name) { + // ----------- S t a t e ------------- + // -- r0 : value + // -- r1 : receiver + // -- r2 : name + // -- lr : return address + // ----------------------------------- + Label miss; + + // Check that the map of the object hasn't changed. + __ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss, DO_SMI_CHECK, + ALLOW_ELEMENT_TRANSITION_MAPS); + + { + FrameScope scope(masm(), StackFrame::INTERNAL); + + // Save value register, so we can restore it later. + __ push(r0); + + // Call the JavaScript getter with the receiver and the value on the stack. + __ Push(r1, r0); + ParameterCount actual(1); + __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(), + CALL_AS_METHOD); + + // We have to return the passed value, not the return value of the setter. + __ pop(r0); + + // Restore context register. + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + } + __ Ret(); + + __ bind(&miss); + Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss(); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(CALLBACKS, name); +} + + Handle<Code> StoreStubCompiler::CompileStoreInterceptor( Handle<JSObject> receiver, Handle<String> name) { @@ -2761,6 +2880,44 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback( } +Handle<Code> LoadStubCompiler::CompileLoadViaGetter( + Handle<String> name, + Handle<JSObject> receiver, + Handle<JSObject> holder, + Handle<JSFunction> getter) { + // ----------- S t a t e ------------- + // -- r0 : receiver + // -- r2 : name + // -- lr : return address + // ----------------------------------- + Label miss; + + // Check that the maps haven't changed. + __ JumpIfSmi(r0, &miss); + CheckPrototypes(receiver, r0, holder, r3, r4, r1, name, &miss); + + { + FrameScope scope(masm(), StackFrame::INTERNAL); + + // Call the JavaScript getter with the receiver on the stack. + __ push(r0); + ParameterCount actual(0); + __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(), + CALL_AS_METHOD); + + // Restore context register. + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + } + __ Ret(); + + __ bind(&miss); + GenerateLoadMiss(masm(), Code::LOAD_IC); + + // Return the generated code. + return GetCode(CALLBACKS, name); +} + + Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object, Handle<JSObject> holder, Handle<JSFunction> value, @@ -3085,7 +3242,13 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object, // r3 is used as scratch register. r1 and r2 keep their values if a jump to // the miss label is generated. - GenerateStoreField(masm(), object, index, transition, r2, r1, r3, &miss); + GenerateStoreField(masm(), + object, + index, + transition, + name, + r2, r1, r3, r4, + &miss); __ bind(&miss); __ DecrementCounter(counters->keyed_store_field(), 1, r3, r4); @@ -3366,8 +3529,11 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) { case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3377,6 +3543,44 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) { } +static void GenerateSmiKeyCheck(MacroAssembler* masm, + Register key, + Register scratch0, + Register scratch1, + DwVfpRegister double_scratch0, + Label* fail) { + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + Label key_ok; + // Check for smi or a smi inside a heap number. We convert the heap + // number and check if the conversion is exact and fits into the smi + // range. + __ JumpIfSmi(key, &key_ok); + __ CheckMap(key, + scratch0, + Heap::kHeapNumberMapRootIndex, + fail, + DONT_DO_SMI_CHECK); + __ sub(ip, key, Operand(kHeapObjectTag)); + __ vldr(double_scratch0, ip, HeapNumber::kValueOffset); + __ EmitVFPTruncate(kRoundToZero, + double_scratch0.low(), + double_scratch0, + scratch0, + scratch1, + kCheckForInexactConversion); + __ b(ne, fail); + __ vmov(scratch0, double_scratch0.low()); + __ TrySmiTag(scratch0, fail, scratch1); + __ mov(key, scratch0); + __ bind(&key_ok); + } else { + // Check that the key is a smi. + __ JumpIfNotSmi(key, fail); + } +} + + void KeyedLoadStubCompiler::GenerateLoadExternalArray( MacroAssembler* masm, ElementsKind elements_kind) { @@ -3393,8 +3597,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - // Check that the key is a smi. - __ JumpIfNotSmi(key, &miss_force_generic); + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic); __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); // r3: elements array @@ -3453,8 +3657,11 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( } break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3724,8 +3931,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - // Check that the key is a smi. - __ JumpIfNotSmi(key, &miss_force_generic); + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic); __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); @@ -3794,8 +4001,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3858,8 +4068,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3998,8 +4211,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -4050,8 +4266,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - // Check that the key is a smi. - __ JumpIfNotSmi(r0, &miss_force_generic); + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, r0, r4, r5, d1, &miss_force_generic); // Get the elements array. __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); @@ -4102,8 +4318,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - // Check that the key is a smi. - __ JumpIfNotSmi(key_reg, &miss_force_generic); + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic); // Get the elements array. __ ldr(elements_reg, @@ -4178,10 +4394,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - // Check that the key is a smi. - __ JumpIfNotSmi(key_reg, &miss_force_generic); + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + if (IsFastSmiElementsKind(elements_kind)) { __ JumpIfNotSmi(value_reg, &transition_elements_kind); } @@ -4209,7 +4425,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( DONT_DO_SMI_CHECK); __ bind(&finish_store); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + if (IsFastSmiElementsKind(elements_kind)) { __ add(scratch, elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); @@ -4219,7 +4435,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); __ str(value_reg, MemOperand(scratch)); } else { - ASSERT(elements_kind == FAST_ELEMENTS); + ASSERT(IsFastObjectElementsKind(elements_kind)); __ add(scratch, elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); @@ -4345,7 +4561,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - __ JumpIfNotSmi(key_reg, &miss_force_generic); + + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic); __ ldr(elements_reg, FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); |