summaryrefslogtreecommitdiff
path: root/deps/v8/src/builtins/arm/builtins-arm.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/builtins/arm/builtins-arm.cc')
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc577
1 files changed, 311 insertions, 266 deletions
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 05593bd419..286df2eea7 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -446,11 +446,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
namespace {
-void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
- Label post_instantiation_deopt_entry;
-
+void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
@@ -460,8 +456,6 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// -- sp[...]: constructor arguments
// -----------------------------------
- Isolate* isolate = masm->isolate();
-
// Enter a construct frame.
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
@@ -469,189 +463,250 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Preserve the incoming parameters on the stack.
__ SmiTag(r0);
__ Push(cp, r0);
-
- if (create_implicit_receiver) {
- // Allocate the new receiver object.
- __ Push(r1, r3);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
- RelocInfo::CODE_TARGET);
- __ mov(r4, r0);
- __ Pop(r1, r3);
-
- // ----------- S t a t e -------------
- // -- r1: constructor function
- // -- r3: new target
- // -- r4: newly allocated object
- // -----------------------------------
-
- // Retrieve smi-tagged arguments count from the stack.
- __ ldr(r0, MemOperand(sp));
- }
-
__ SmiUntag(r0);
- if (create_implicit_receiver) {
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(r4);
- __ push(r4);
- } else {
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- }
-
- // Deoptimizer re-enters stub code here.
- __ bind(&post_instantiation_deopt_entry);
+ // The receiver for the builtin/api call.
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
// Set up pointer to last argument.
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ add(r4, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
- // r0: number of arguments
- // r1: constructor function
- // r2: address of last argument (caller sp)
- // r3: new target
- // r4: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: receiver
- // sp[2]: number of arguments (smi-tagged)
Label loop, entry;
- __ SmiTag(r4, r0);
+ __ mov(r5, r0);
+ // ----------- S t a t e -------------
+ // -- r0: number of arguments (untagged)
+ // -- r1: constructor function
+ // -- r3: new target
+ // -- r4: pointer to last argument
+ // -- r5: counter
+ // -- sp[0*kPointerSize]: the hole (receiver)
+ // -- sp[1*kPointerSize]: number of arguments (tagged)
+ // -- sp[2*kPointerSize]: context
+ // -----------------------------------
__ b(&entry);
__ bind(&loop);
- __ ldr(ip, MemOperand(r2, r4, LSL, kPointerSizeLog2 - 1));
+ __ ldr(ip, MemOperand(r4, r5, LSL, kPointerSizeLog2));
__ push(ip);
__ bind(&entry);
- __ sub(r4, r4, Operand(2), SetCC);
+ __ sub(r5, r5, Operand(1), SetCC);
__ b(ge, &loop);
// Call the function.
- // r0: number of arguments
+ // r0: number of arguments (untagged)
// r1: constructor function
// r3: new target
ParameterCount actual(r0);
__ InvokeFunction(r1, r3, actual, CALL_FUNCTION,
CheckDebugStepCallWrapper());
- // Store offset of return address for deoptimizer.
- if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
- }
-
// Restore context from the frame.
- // r0: result
- // sp[0]: receiver
- // sp[1]: number of arguments (smi-tagged)
__ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
-
- if (create_implicit_receiver) {
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // r0: result
- // sp[0]: receiver
- // sp[1]: number of arguments (smi-tagged)
- __ JumpIfSmi(r0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r1, r3, FIRST_JS_RECEIVER_TYPE);
- __ b(ge, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ ldr(r0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- } else {
- __ ldr(r1, MemOperand(sp));
- }
-
+ // Restore smi-tagged arguments count from the frame.
+ __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
- // ES6 9.2.2. Step 13+
- // Check that the result is not a Smi, indicating that the constructor result
- // from a derived class is neither undefined nor an Object.
- if (check_derived_construct) {
- Label do_throw, dont_throw;
- __ JumpIfSmi(r0, &do_throw);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(r0, r3, r3, FIRST_JS_RECEIVER_TYPE);
- __ b(ge, &dont_throw);
- __ bind(&do_throw);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
- }
- __ bind(&dont_throw);
- }
-
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
+ // Remove caller arguments from the stack and return.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(sp, sp, Operand(kPointerSize));
- if (create_implicit_receiver) {
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
- }
__ Jump(lr);
+}
+
+// The construct stub for ES5 constructor functions and ES6 class constructors.
+void Generate_JSConstructStubGeneric(MacroAssembler* masm,
+ bool restrict_constructor_return) {
+ // ----------- S t a t e -------------
+ // -- r0: number of arguments (untagged)
+ // -- r1: constructor function
+ // -- r3: new target
+ // -- cp: context
+ // -- lr: return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
- // Store offset of trampoline address for deoptimizer. This is the bailout
- // point after the receiver instantiation but before the function invocation.
- // We need to restore some registers in order to continue the above code.
- if (create_implicit_receiver && !is_api_function) {
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(r0);
+ __ Push(cp, r0, r1, r3);
+
+ // ----------- S t a t e -------------
+ // -- sp[0*kPointerSize]: new target
+ // -- r1 and sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments (tagged)
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldrb(r4,
+ FieldMemOperand(r4, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ tst(r4, Operand(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ b(ne, &not_create_implicit_receiver);
+
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
+ r4, r5);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
+ __ b(&post_instantiation_deopt_entry);
+
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+
+ // ----------- S t a t e -------------
+ // -- r0: receiver
+ // -- Slot 3 / sp[0*kPointerSize]: new target
+ // -- Slot 2 / sp[1*kPointerSize]: constructor function
+ // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[3*kPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
+
+ // Restore new target.
+ __ Pop(r3);
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(r0, r0);
// ----------- S t a t e -------------
- // -- r0 : newly allocated object
- // -- sp[0] : constructor function
+ // -- r3: new target
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
- __ pop(r1);
- __ push(r0);
- __ push(r0);
-
- // Retrieve smi-tagged arguments count from the stack.
+ // Restore constructor function and argument count.
+ __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ ldr(r0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(r0);
- // Retrieve the new target value from the stack. This was placed into the
- // frame description in place of the receiver by the optimizing compiler.
- __ add(r3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2));
+ // Set up pointer to last argument.
+ __ add(r4, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- // Continue with constructor function invocation.
- __ b(&post_instantiation_deopt_entry);
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ mov(r5, r0);
+ // ----------- S t a t e -------------
+ // -- r0: number of arguments (untagged)
+ // -- r3: new target
+ // -- r4: pointer to last argument
+ // -- r5: counter
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- r1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(ip, MemOperand(r4, r5, LSL, kPointerSizeLog2));
+ __ push(ip);
+ __ bind(&entry);
+ __ sub(r5, r5, Operand(1), SetCC);
+ __ b(ge, &loop);
+
+ // Call the function.
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, r3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
+
+ // ----------- S t a t e -------------
+ // -- r0: constructor result
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // Restore the context from the frame.
+ __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, other_result, leave_frame;
+
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfRoot(r0, Heap::kUndefinedValueRootIndex, &use_receiver);
+
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(r0, &other_result);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r0, r4, r5, FIRST_JS_RECEIVER_TYPE);
+ __ b(ge, &leave_frame);
+
+ __ bind(&other_result);
+ // The result is now neither undefined nor an object.
+ if (restrict_constructor_return) {
+ // Throw if constructor function is a class constructor
+ __ ldr(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ ldr(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ ldrb(r4,
+ FieldMemOperand(r4, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ tst(r4, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ b(eq, &use_receiver);
+
+ } else {
+ __ b(&use_receiver);
+ }
+
+ __ bind(&do_throw);
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
+ __ JumpIfRoot(r0, Heap::kTheHoleValueRootIndex, &do_throw);
+
+ __ bind(&leave_frame);
+ // Restore smi-tagged arguments count from the frame.
+ __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
}
+ // Remove caller arguments from the stack and return.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(sp, sp, Operand(kPointerSize));
+ __ Jump(lr);
}
-
} // namespace
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, false);
+void Builtins::Generate_JSConstructStubGenericRestrictedReturn(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubGeneric(masm, true);
+}
+void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubGeneric(masm, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, false);
-}
-
-void Builtins::Generate_JSBuiltinsConstructStubForDerived(
- MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, true);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
// static
@@ -830,7 +885,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r2: receiver
// r3: argc
// r4: argv
- // r5-r6, r8 (if !FLAG_enable_embedded_constant_pool) and cp may be clobbered
+ // r5-r6, r8 and cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Enter an internal frame.
@@ -880,9 +935,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
- if (!FLAG_enable_embedded_constant_pool) {
- __ mov(r8, Operand(r4));
- }
+ __ mov(r8, Operand(r4));
if (kR9Available == 1) {
__ mov(r9, Operand(r4));
}
@@ -910,6 +963,38 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void ReplaceClosureEntryWithOptimizedCode(
+ MacroAssembler* masm, Register optimized_code_entry, Register closure,
+ Register scratch1, Register scratch2, Register scratch3) {
+ Register native_context = scratch1;
+
+ // Store code entry in the closure.
+ __ add(optimized_code_entry, optimized_code_entry,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ str(optimized_code_entry,
+ FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
+
+ // Link the closure into the optimized function list.
+ __ ldr(native_context, NativeContextMemOperand());
+ __ ldr(scratch2,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ str(scratch2,
+ FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
+ scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ str(closure,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ // Save closure before the write barrier.
+ __ mov(scratch2, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure,
+ scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ mov(closure, scratch2);
+}
+
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
Register args_count = scratch;
@@ -935,7 +1020,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
// o r1: the JS function object being called.
// o r3: the new target
// o cp: our context
-// o pp: the caller's constant pool pointer (if enabled)
// o fp: the caller's frame pointer
// o sp: stack pointer
// o lr: return address
@@ -951,6 +1035,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(r1);
+ // First check if there is optimized code in the feedback vector which we
+ // could call instead.
+ Label switch_to_optimized_code;
+ Register optimized_code_entry = r4;
+ __ ldr(r0, FieldMemOperand(r1, JSFunction::kFeedbackVectorOffset));
+ __ ldr(r0, FieldMemOperand(r0, Cell::kValueOffset));
+ __ ldr(
+ optimized_code_entry,
+ FieldMemOperand(r0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
+ __ ldr(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
__ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
@@ -1063,6 +1161,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ str(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(r1, r4, r5);
__ Jump(r4);
+
+ // If there is optimized code on the type feedback vector, check if it is good
+ // to run, and if so, self heal the closure and call the optimized code.
+ __ bind(&switch_to_optimized_code);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ Label gotta_call_runtime;
+
+ // Check if the optimized code is marked for deopt.
+ __ ldr(r5, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ tst(r5, Operand(1 << Code::kMarkedForDeoptimizationBit));
+
+ __ b(ne, &gotta_call_runtime);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r1, r6, r5,
+ r2);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code is marked for deopt, bailout to the CompileLazy runtime
+ // function which will clear the feedback vector's optimized code slot.
+ __ bind(&gotta_call_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1300,114 +1422,50 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -- r1 : target function (preserved for callee)
// -----------------------------------
// First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime, gotta_call_runtime_no_stack;
+ Label gotta_call_runtime;
Label try_shared;
- Label loop_top, loop_bottom;
- Register argument_count = r0;
Register closure = r1;
- Register new_target = r3;
- Register map = argument_count;
Register index = r2;
// Do we have a valid feedback vector?
__ ldr(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ ldr(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex,
- &gotta_call_runtime_no_stack);
-
- __ push(argument_count);
- __ push(new_target);
- __ push(closure);
-
- __ ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(map,
- FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ ldr(index, FieldMemOperand(map, FixedArray::kLengthOffset));
- __ cmp(index, Operand(Smi::FromInt(2)));
- __ b(lt, &try_shared);
-
- // r3 : native context
- // r2 : length / index
- // r0 : optimized code map
- // stack[0] : new target
- // stack[4] : closure
- Register native_context = r3;
- __ ldr(native_context, NativeContextMemOperand());
-
- __ bind(&loop_top);
- Register temp = r1;
- Register array_pointer = r5;
+ __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
- // Does the native context match?
- __ add(array_pointer, map, Operand::PointerOffsetFromSmiKey(index));
- __ ldr(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousContext));
- __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
- __ cmp(temp, native_context);
- __ b(ne, &loop_bottom);
-
- // Code available?
+ // Is optimized code available in the feedback vector?
Register entry = r4;
- __ ldr(entry,
- FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ ldr(entry, FieldMemOperand(
+ index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found code. Get it into the closure and return.
- __ pop(closure);
- // Store code entry in the closure.
- __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
- __ RecordWriteCodeEntryField(closure, entry, r5);
+ // Found code, check if it is marked for deopt, if so call into runtime to
+ // clear the optimized code slot.
+ __ ldr(r5, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
+ __ tst(r5, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ b(ne, &gotta_call_runtime);
- // Link the closure into the optimized function list.
- // r4 : code entry
- // r3 : native context
- // r1 : closure
- __ ldr(r5,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ str(r5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r5, r0,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ str(closure,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Save closure before the write barrier.
- __ mov(r5, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, closure, r0,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ mov(closure, r5);
- __ pop(new_target);
- __ pop(argument_count);
+ // Code is good, get it into the closure and tail call.
+ ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r6, r5, r2);
__ Jump(entry);
- __ bind(&loop_bottom);
- __ sub(index, index, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ cmp(index, Operand(Smi::FromInt(1)));
- __ b(gt, &loop_top);
-
- // We found no code.
+ // We found no optimized code.
__ bind(&try_shared);
- __ pop(closure);
- __ pop(new_target);
- __ pop(argument_count);
__ ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ ldrb(r5, FieldMemOperand(entry,
SharedFunctionInfo::kMarkedForTierUpByteOffset));
__ tst(r5, Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ b(ne, &gotta_call_runtime_no_stack);
+ __ b(ne, &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ Move(r5, masm->CodeObject());
__ cmp(entry, r5);
- __ b(eq, &gotta_call_runtime_no_stack);
+ __ b(eq, &gotta_call_runtime);
// Install the SFI's code entry.
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1416,10 +1474,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ Jump(entry);
__ bind(&gotta_call_runtime);
- __ pop(closure);
- __ pop(new_target);
- __ pop(argument_count);
- __ bind(&gotta_call_runtime_no_stack);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
@@ -1676,10 +1730,6 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
- if (FLAG_enable_embedded_constant_pool) {
- __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r0);
- }
-
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ ldr(r1, FieldMemOperand(
@@ -1937,7 +1987,6 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r0);
__ mov(r4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
- (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
__ add(fp, sp,
Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
@@ -2104,54 +2153,54 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- r1 : the target to call (can be any Object)
- // -- r2 : start index (to support rest parameters)
- // -- lr : return address.
- // -- sp[0] : thisArgument
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r3 : the new.target (for [[Construct]] calls)
+ // -- r1 : the target to call (can be any Object)
+ // -- r2 : start index (to support rest parameters)
// -----------------------------------
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
- __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(ip, MemOperand(r3, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(ip, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &arguments_adaptor);
{
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r0, FieldMemOperand(
- r0, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(r3, fp);
+ __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r5, FieldMemOperand(
+ r5, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(r4, fp);
}
__ b(&arguments_done);
__ bind(&arguments_adaptor);
{
// Load the length from the ArgumentsAdaptorFrame.
- __ ldr(r0, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ bind(&arguments_done);
- Label stack_empty, stack_done, stack_overflow;
- __ SmiUntag(r0);
- __ sub(r0, r0, r2, SetCC);
- __ b(le, &stack_empty);
+ Label stack_done, stack_overflow;
+ __ SmiUntag(r5);
+ __ sub(r5, r5, r2, SetCC);
+ __ b(le, &stack_done);
{
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, r0, r2, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r5, r2, &stack_overflow);
// Forward the arguments from the caller frame.
{
Label loop;
- __ add(r3, r3, Operand(kPointerSize));
- __ mov(r2, r0);
+ __ add(r4, r4, Operand(kPointerSize));
+ __ add(r0, r0, r5);
__ bind(&loop);
{
- __ ldr(ip, MemOperand(r3, r2, LSL, kPointerSizeLog2));
+ __ ldr(ip, MemOperand(r4, r5, LSL, kPointerSizeLog2));
__ push(ip);
- __ sub(r2, r2, Operand(1), SetCC);
+ __ sub(r5, r5, Operand(1), SetCC);
__ b(ne, &loop);
}
}
@@ -2159,13 +2208,9 @@ void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
__ b(&stack_done);
__ bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&stack_empty);
- {
- // We just pass the receiver, which is already on the stack.
- __ mov(r0, Operand(0));
- }
__ bind(&stack_done);
+ // Tail-call to the {code} handler.
__ Jump(code, RelocInfo::CODE_TARGET);
}