diff options
author | Michaël Zasso <targos@protonmail.com> | 2018-03-07 08:54:53 +0100 |
---|---|---|
committer | Michaël Zasso <targos@protonmail.com> | 2018-03-07 16:48:52 +0100 |
commit | 88786fecff336342a56e6f2e7ff3b286be716e47 (patch) | |
tree | 92e6ba5b8ac8dae1a058988d20c9d27bfa654390 /deps/v8/src/builtins | |
parent | 4e86f9b5ab83cbabf43839385bf383e6a7ef7d19 (diff) | |
download | node-new-88786fecff336342a56e6f2e7ff3b286be716e47.tar.gz |
deps: update V8 to 6.5.254.31
PR-URL: https://github.com/nodejs/node/pull/18453
Reviewed-By: James M Snell <jasnell@gmail.com>
Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Reviewed-By: Yang Guo <yangguo@chromium.org>
Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Reviewed-By: Michael Dawson <michael_dawson@ca.ibm.com>
Diffstat (limited to 'deps/v8/src/builtins')
38 files changed, 2576 insertions, 1236 deletions
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index 1c31009d93..2b2b9c2b34 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -116,9 +116,9 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { // Initial map for the builtin InternalArray functions should be maps. __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); __ SmiTst(r2); - __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction); + __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction); __ CompareObjectType(r2, r3, r4, MAP_TYPE); - __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction); + __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction); } // Run the native code for the InternalArray function called as a normal @@ -143,9 +143,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) { // Initial map for the builtin Array functions should be maps. __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); __ SmiTst(r2); - __ Assert(ne, kUnexpectedInitialMapForArrayFunction); + __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction); __ CompareObjectType(r2, r3, r4, MAP_TYPE); - __ Assert(eq, kUnexpectedInitialMapForArrayFunction); + __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction); } __ mov(r3, r1); @@ -283,14 +283,16 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, Label post_instantiation_deopt_entry, not_create_implicit_receiver; // Preserve the incoming parameters on the stack. + __ LoadRoot(r4, Heap::kTheHoleValueRootIndex); __ SmiTag(r0); - __ Push(cp, r0, r1, r3); + __ Push(cp, r0, r1, r4, r3); // ----------- S t a t e ------------- // -- sp[0*kPointerSize]: new target - // -- r1 and sp[1*kPointerSize]: constructor function - // -- sp[2*kPointerSize]: number of arguments (tagged) - // -- sp[3*kPointerSize]: context + // -- sp[1*kPointerSize]: padding + // -- r1 and sp[2*kPointerSize]: constructor function + // -- sp[3*kPointerSize]: number of arguments (tagged) + // -- sp[4*kPointerSize]: context // ----------------------------------- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); @@ -332,9 +334,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // -- r3: new target // -- sp[0*kPointerSize]: implicit receiver // -- sp[1*kPointerSize]: implicit receiver - // -- sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments (tagged) - // -- sp[4*kPointerSize]: context + // -- sp[2*kPointerSize]: padding + // -- sp[3*kPointerSize]: constructor function + // -- sp[4*kPointerSize]: number of arguments (tagged) + // -- sp[5*kPointerSize]: context // ----------------------------------- // Restore constructor function and argument count. @@ -355,9 +358,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // -- r5: counter // -- sp[0*kPointerSize]: implicit receiver // -- sp[1*kPointerSize]: implicit receiver - // -- r1 and sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments (tagged) - // -- sp[4*kPointerSize]: context + // -- sp[2*kPointerSize]: padding + // -- r1 and sp[3*kPointerSize]: constructor function + // -- sp[4*kPointerSize]: number of arguments (tagged) + // -- sp[5*kPointerSize]: context // ----------------------------------- __ b(&entry); @@ -375,9 +379,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // ----------- S t a t e ------------- // -- r0: constructor result // -- sp[0*kPointerSize]: implicit receiver - // -- sp[1*kPointerSize]: constructor function - // -- sp[2*kPointerSize]: number of arguments - // -- sp[3*kPointerSize]: context + // -- sp[1*kPointerSize]: padding + // -- sp[2*kPointerSize]: constructor function + // -- sp[3*kPointerSize]: number of arguments + // -- sp[4*kPointerSize]: context // ----------------------------------- // Store offset of return address for deoptimizer. @@ -541,7 +546,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset)); __ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE); - __ Assert(eq, kMissingBytecodeArray); + __ Assert(eq, AbortReason::kMissingBytecodeArray); } // Resume (Ignition/TurboFan) generator object. @@ -629,8 +634,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ mov(cp, Operand(context_address)); __ ldr(cp, MemOperand(cp)); - __ InitializeRootRegister(); - // Push the function and the receiver onto the stack. __ Push(r1, r2); @@ -777,6 +780,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, __ b(eq, &fallthrough); TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, OptimizationMarker::kCompileOptimized, Runtime::kCompileOptimized_NotConcurrent); TailCallRuntimeIfMarkerEquals( @@ -791,7 +797,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, __ cmp( optimized_code_entry, Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); - __ Assert(eq, kExpectedOptimizationSentinel); + __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); } __ jmp(&fallthrough); } @@ -871,7 +877,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array, __ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset)); __ add(bytecode_size_table, bytecode_size_table, Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); - __ jmp(&load_size); // Load the size of the current bytecode. __ bind(&load_size); @@ -935,10 +940,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Check function data field is actually a BytecodeArray object. if (FLAG_debug_code) { __ SmiTst(kInterpreterBytecodeArrayRegister); - __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ Assert( + ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); __ CompareObjectType(kInterpreterBytecodeArrayRegister, r0, no_reg, BYTECODE_ARRAY_TYPE); - __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ Assert( + eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); } // Reset code age. @@ -1194,10 +1201,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { if (FLAG_debug_code) { // Check function data field is actually a BytecodeArray object. __ SmiTst(kInterpreterBytecodeArrayRegister); - __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ Assert( + ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); __ CompareObjectType(kInterpreterBytecodeArrayRegister, r1, no_reg, BYTECODE_ARRAY_TYPE); - __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ Assert( + eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); } // Get the target bytecode offset from the frame. @@ -1259,7 +1268,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) { // The feedback vector must be defined. if (FLAG_debug_code) { __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex); - __ Assert(ne, BailoutReason::kExpectedFeedbackVector); + __ Assert(ne, AbortReason::kExpectedFeedbackVector); } // Is there an optimization marker or optimized code in the feedback vector? @@ -1799,8 +1808,9 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { __ mov(r4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit()); + __ Push(Smi::kZero); // Padding. __ add(fp, sp, - Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize)); + Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp)); } static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { @@ -1809,8 +1819,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { // ----------------------------------- // Get the number of arguments passed (as a smi), tear down the frame and // then tear down the parameters. - __ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp + - kPointerSize))); + __ ldr(r1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR); __ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1)); @@ -1889,7 +1898,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ JumpIfSmi(r3, &new_target_not_constructor); __ ldr(scratch, FieldMemOperand(r3, HeapObject::kMapOffset)); __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ tst(scratch, Operand(1 << Map::kIsConstructor)); + __ tst(scratch, Operand(Map::IsConstructorBit::kMask)); __ b(ne, &new_target_constructor); __ bind(&new_target_not_constructor); { @@ -2178,7 +2187,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // Check if target has a [[Call]] internal method. __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset)); - __ tst(r4, Operand(1 << Map::kIsCallable)); + __ tst(r4, Operand(Map::IsCallableBit::kMask)); __ b(eq, &non_callable); // Check if target is a proxy and call CallProxy external builtin @@ -2268,7 +2277,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { // Check if target has a [[Construct]] internal method. __ ldrb(r2, FieldMemOperand(r4, Map::kBitFieldOffset)); - __ tst(r2, Operand(1 << Map::kIsConstructor)); + __ tst(r2, Operand(Map::IsConstructorBit::kMask)); __ b(eq, &non_constructor); // Only dispatch to bound functions after checking whether they are @@ -2337,17 +2346,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) { __ TailCallRuntime(Runtime::kAbort); } -// static -void Builtins::Generate_AbortJS(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r1 : message as String object - // -- lr : return address - // ----------------------------------- - __ Push(r1); - __ Move(cp, Smi::kZero); - __ TailCallRuntime(Runtime::kAbortJS); -} - void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r0 : actual number of arguments @@ -2434,8 +2432,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); __ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2)); // Adjust for frame. - __ sub(r4, r4, Operand(StandardFrameConstants::kFixedFrameSizeFromFp + - 2 * kPointerSize)); + __ sub(r4, r4, + Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp + + kPointerSize)); Label fill; __ bind(&fill); diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index 875f261835..dd92af89bb 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -111,9 +111,9 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { // Initial map for the builtin InternalArray functions should be maps. __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset)); __ Tst(x10, kSmiTagMask); - __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction); + __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction); __ CompareObjectType(x10, x11, x12, MAP_TYPE); - __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction); + __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction); } // Run the native code for the InternalArray function called as a normal @@ -138,9 +138,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) { // Initial map for the builtin Array functions should be maps. __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset)); __ Tst(x10, kSmiTagMask); - __ Assert(ne, kUnexpectedInitialMapForArrayFunction); + __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction); __ CompareObjectType(x10, x11, x12, MAP_TYPE); - __ Assert(eq, kUnexpectedInitialMapForArrayFunction); + __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction); } // Run the native code for the Array function called as a normal function. @@ -210,7 +210,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // Check that FrameScope pushed the context on to the stack already. __ Peek(x2, 0); __ Cmp(x2, cp); - __ Check(eq, kUnexpectedValue); + __ Check(eq, AbortReason::kUnexpectedValue); } // Push number of arguments. @@ -315,7 +315,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // Check that FrameScope pushed the context on to the stack already. __ Peek(x2, 0); __ Cmp(x2, cp); - __ Check(eq, kUnexpectedValue); + __ Check(eq, AbortReason::kUnexpectedValue); } // Preserve the incoming parameters on the stack. @@ -348,10 +348,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // ----------- S t a t e ------------- // -- x0: receiver - // -- Slot 3 / sp[0*kPointerSize]: new target - // -- Slot 2 / sp[1*kPointerSize]: constructor function - // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged) - // -- Slot 0 / sp[3*kPointerSize]: context + // -- Slot 4 / sp[0*kPointerSize]: new target + // -- Slot 3 / sp[1*kPointerSize]: padding + // -- Slot 2 / sp[2*kPointerSize]: constructor function + // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged) + // -- Slot 0 / sp[4*kPointerSize]: context // ----------------------------------- // Deoptimizer enters here. masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset( @@ -388,9 +389,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // -- sp[0*kPointerSize]: implicit receiver (overwrite if argc odd) // -- sp[1*kPointerSize]: implicit receiver // -- sp[2*kPointerSize]: implicit receiver - // -- x1 and sp[3*kPointerSize]: constructor function - // -- sp[4*kPointerSize]: number of arguments (tagged) - // -- sp[5*kPointerSize]: context + // -- sp[3*kPointerSize]: padding + // -- x1 and sp[4*kPointerSize]: constructor function + // -- sp[5*kPointerSize]: number of arguments (tagged) + // -- sp[6*kPointerSize]: context // ----------------------------------- // Round the number of arguments down to the next even number, and claim @@ -416,14 +418,8 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, __ InvokeFunction(x1, x3, actual, CALL_FUNCTION); // ----------- S t a t e ------------- - // If argc is odd: - // -- sp[0*kPointerSize]: implicit receiver - // -- sp[1*kPointerSize]: constructor function - // -- sp[2*kPointerSize]: number of arguments - // -- sp[3*kPointerSize]: context - // If argc is even: // -- sp[0*kPointerSize]: implicit receiver - // -- sp[1*kPointerSize]: implicit receiver + // -- sp[1*kPointerSize]: padding // -- sp[2*kPointerSize]: constructor function // -- sp[3*kPointerSize]: number of arguments // -- sp[4*kPointerSize]: context @@ -556,7 +552,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack limit". Label stack_overflow; - __ CompareRoot(jssp, Heap::kRealStackLimitRootIndex); + __ CompareRoot(__ StackPointer(), Heap::kRealStackLimitRootIndex); __ B(lo, &stack_overflow); // Get number of arguments for generator function. @@ -564,10 +560,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Ldr(w10, FieldMemOperand(x10, SharedFunctionInfo::kFormalParameterCountOffset)); - // Claim slots for arguments and receiver. - __ Add(x11, x10, 1); + // Claim slots for arguments and receiver (rounded up to a multiple of two). + __ Add(x11, x10, 2); + __ Bic(x11, x11, 1); __ Claim(x11); + // Store padding (which might be replaced by the receiver). + __ Sub(x11, x11, 1); + __ Poke(padreg, Operand(x11, LSL, kPointerSizeLog2)); + // Poke receiver into highest claimed slot. __ Ldr(x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset)); __ Poke(x5, Operand(x10, LSL, kPointerSizeLog2)); @@ -578,8 +579,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // -- x10 : argument count // -- cp : generator context // -- lr : return address - // -- jssp[arg count] : generator receiver - // -- jssp[0 .. arg count - 1] : claimed for args + // -- sp[arg count] : generator receiver + // -- sp[0 .. arg count - 1] : claimed for args // ----------------------------------- // Push holes for arguments to generator function. Since the parser forced @@ -603,7 +604,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset)); __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset)); __ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE); - __ Assert(eq, kMissingBytecodeArray); + __ Assert(eq, AbortReason::kMissingBytecodeArray); } // Resume (Ignition/TurboFan) generator object. @@ -624,10 +625,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Bind(&prepare_step_in_if_stepping); { FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(x1); + __ Push(x1, padreg); __ PushArgument(x4); __ CallRuntime(Runtime::kDebugOnFunctionCall); - __ Pop(x1); + __ Pop(padreg, x1); __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset)); } __ B(&stepping_prepared); @@ -635,9 +636,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Bind(&prepare_step_in_suspended_generator); { FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(x1); + __ Push(x1, padreg); __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator); - __ Pop(x1); + __ Pop(padreg, x1); __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset)); } __ B(&stepping_prepared); @@ -652,8 +653,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, Label* stack_overflow) { - DCHECK(masm->StackPointer().Is(jssp)); - UseScratchRegisterScope temps(masm); Register scratch = temps.AcquireX(); @@ -767,10 +766,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ Mov(x23, x19); __ Mov(x24, x19); __ Mov(x25, x19); + __ Mov(x28, x19); // Don't initialize the reserved registers. // x26 : root register (root). // x27 : context pointer (cp). - // x28 : JS stack pointer (jssp). // x29 : frame pointer (fp). Handle<Code> builtin = is_construct @@ -820,7 +819,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { // Drop receiver + arguments. if (__ emit_debug_code()) { __ Tst(args_size, kPointerSize - 1); - __ Check(eq, kUnexpectedValue); + __ Check(eq, AbortReason::kUnexpectedValue); } __ Lsr(args_size, args_size, kPointerSizeLog2); __ DropArguments(args_size); @@ -873,6 +872,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, &fallthrough); TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, OptimizationMarker::kCompileOptimized, Runtime::kCompileOptimized_NotConcurrent); TailCallRuntimeIfMarkerEquals( @@ -887,7 +889,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, __ Cmp( optimized_code_entry, Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); - __ Assert(eq, kExpectedOptimizationSentinel); + __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); } __ B(&fallthrough); } @@ -967,7 +969,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array, __ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset)); __ Add(bytecode_size_table, bytecode_size_table, Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); - __ B(&load_size); // Load the size of the current bytecode. __ Bind(&load_size); @@ -985,7 +986,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array, // - x3: the incoming new target or generator object // - cp: our context. // - fp: our caller's frame pointer. -// - jssp: stack pointer. // - lr: return address. // // The function builds an interpreter frame. See InterpreterFrameConstants in @@ -1009,7 +1009,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // the frame (that is done below). FrameScope frame_scope(masm, StackFrame::MANUAL); __ Push(lr, fp, cp, closure); - __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); + __ Add(fp, __ StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp); // Get the bytecode array from the function object (or from the DebugInfo if // it is present) and load it into kInterpreterBytecodeArrayRegister. @@ -1030,11 +1030,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Check function data field is actually a BytecodeArray object. if (FLAG_debug_code) { - __ AssertNotSmi(kInterpreterBytecodeArrayRegister, - kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ AssertNotSmi( + kInterpreterBytecodeArrayRegister, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); __ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0, BYTECODE_ARRAY_TYPE); - __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ Assert( + eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); } // Reset code age. @@ -1058,8 +1060,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Do a stack check to ensure we don't go over the limit. Label ok; - DCHECK(jssp.Is(__ StackPointer())); - __ Sub(x10, jssp, Operand(x11)); + __ Sub(x10, __ StackPointer(), Operand(x11)); __ CompareRoot(x10, Heap::kRealStackLimitRootIndex); __ B(hs, &ok); __ CallRuntime(Runtime::kThrowStackOverflow); @@ -1181,10 +1182,19 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm, __ Unreachable(); __ Bind(&done); - // TODO(arm64): Claim one extra slot for padding and store padreg to the - // padding slot. + // Round up to an even number of slots and claim them. + __ Add(slots_to_claim, slots_to_claim, 1); + __ Bic(slots_to_claim, slots_to_claim, 1); __ Claim(slots_to_claim); + { + // Store padding, which may be overwritten. + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); + __ Sub(scratch, slots_to_claim, 1); + __ Poke(padreg, Operand(scratch, LSL, kPointerSizeLog2)); + } + if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { // Store "undefined" as the receiver arg if we need to. Register receiver = x14; @@ -1311,11 +1321,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { if (FLAG_debug_code) { // Check function data field is actually a BytecodeArray object. - __ AssertNotSmi(kInterpreterBytecodeArrayRegister, - kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ AssertNotSmi( + kInterpreterBytecodeArrayRegister, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); __ CompareObjectType(kInterpreterBytecodeArrayRegister, x1, x1, BYTECODE_ARRAY_TYPE); - __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ Assert( + eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); } // Get the target bytecode offset from the frame. @@ -1375,7 +1387,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) { // The feedback vector must be defined. if (FLAG_debug_code) { __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex); - __ Assert(ne, BailoutReason::kExpectedFeedbackVector); + __ Assert(ne, AbortReason::kExpectedFeedbackVector); } // Is there an optimization marker or optimized code in the feedback vector? @@ -1634,7 +1646,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, kPointerSize; // Set up frame pointer. - __ Add(fp, jssp, frame_size); + __ Add(fp, __ StackPointer(), frame_size); if (with_result) { // Overwrite the hole inserted by the deoptimizer with the return value from @@ -1770,9 +1782,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- x0 : argc - // -- jssp[0] : argArray (if argc == 2) - // -- jssp[8] : thisArg (if argc >= 1) - // -- jssp[16] : receiver + // -- sp[0] : argArray (if argc == 2) + // -- sp[8] : thisArg (if argc >= 1) + // -- sp[16] : receiver // ----------------------------------- ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply"); @@ -1824,7 +1836,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- x2 : argArray // -- x1 : receiver - // -- jssp[0] : thisArg + // -- sp[0] : thisArg // ----------------------------------- // 2. We don't need to check explicitly for callable receiver here, @@ -1855,55 +1867,65 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { Register argc = x0; Register function = x1; - Register scratch1 = x10; - Register scratch2 = x11; ASM_LOCATION("Builtins::Generate_FunctionPrototypeCall"); - // 1. Make sure we have at least one argument. + // 1. Get the callable to call (passed as receiver) from the stack. + __ Peek(function, Operand(argc, LSL, kXRegSizeLog2)); + + // 2. Handle case with no arguments. { - Label done; - __ Cbnz(argc, &done); - __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex); - __ Push(scratch1); - __ Mov(argc, 1); - __ Bind(&done); + Label non_zero; + Register scratch = x10; + __ Cbnz(argc, &non_zero); + __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); + // Overwrite receiver with undefined, which will be the new receiver. + // We do not need to overwrite the padding slot above it with anything. + __ Poke(scratch, 0); + // Call function. The argument count is already zero. + __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); + __ Bind(&non_zero); } - // 2. Get the callable to call (passed as receiver) from the stack. - __ Peek(function, Operand(argc, LSL, kXRegSizeLog2)); + // 3. Overwrite the receiver with padding. If argc is odd, this is all we + // need to do. + Label arguments_ready; + __ Poke(padreg, Operand(argc, LSL, kXRegSizeLog2)); + __ Tbnz(argc, 0, &arguments_ready); - // 3. Shift arguments and return address one slot down on the stack - // (overwriting the original receiver). Adjust argument count to make - // the original first argument the new receiver. + // 4. If argc is even: + // Copy arguments two slots higher in memory, overwriting the original + // receiver and padding. { Label loop; - // Calculate the copy start address (destination). Copy end address is jssp. - __ SlotAddress(scratch2, argc); - __ Sub(scratch1, scratch2, kPointerSize); - - __ Bind(&loop); - __ Ldr(x12, MemOperand(scratch1, -kPointerSize, PostIndex)); - __ Str(x12, MemOperand(scratch2, -kPointerSize, PostIndex)); - __ Cmp(scratch1, jssp); - __ B(ge, &loop); - // Adjust the actual number of arguments and remove the top element - // (which is a copy of the last argument). - __ Sub(argc, argc, 1); - __ Drop(1); + Register copy_from = x10; + Register copy_to = x11; + Register count = x12; + Register last_arg_slot = x13; + __ Mov(count, argc); + __ Sub(last_arg_slot, argc, 1); + __ SlotAddress(copy_from, last_arg_slot); + __ Add(copy_to, copy_from, 2 * kPointerSize); + __ CopyDoubleWords(copy_to, copy_from, count, + TurboAssembler::kSrcLessThanDst); + // Drop two slots. These are copies of the last two arguments. + __ Drop(2); } - // 4. Call the callable. + // 5. Adjust argument count to make the original first argument the new + // receiver and call the callable. + __ Bind(&arguments_ready); + __ Sub(argc, argc, 1); __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); } void Builtins::Generate_ReflectApply(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- x0 : argc - // -- jssp[0] : argumentsList (if argc == 3) - // -- jssp[8] : thisArgument (if argc >= 2) - // -- jssp[16] : target (if argc >= 1) - // -- jssp[24] : receiver + // -- sp[0] : argumentsList (if argc == 3) + // -- sp[8] : thisArgument (if argc >= 2) + // -- sp[16] : target (if argc >= 1) + // -- sp[24] : receiver // ----------------------------------- ASM_LOCATION("Builtins::Generate_ReflectApply"); @@ -1962,7 +1984,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- x2 : argumentsList // -- x1 : target - // -- jssp[0] : thisArgument + // -- sp[0] : thisArgument // ----------------------------------- // 2. We don't need to check explicitly for callable target here, @@ -1977,10 +1999,10 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- x0 : argc - // -- jssp[0] : new.target (optional) - // -- jssp[8] : argumentsList - // -- jssp[16] : target - // -- jssp[24] : receiver + // -- sp[0] : new.target (optional) + // -- sp[8] : argumentsList + // -- sp[16] : target + // -- sp[24] : receiver // ----------------------------------- ASM_LOCATION("Builtins::Generate_ReflectConstruct"); @@ -2044,7 +2066,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { // -- x2 : argumentsList // -- x1 : target // -- x3 : new.target - // -- jssp[0] : receiver (undefined) + // -- sp[0] : receiver (undefined) // ----------------------------------- // 2. We don't need to check explicitly for constructor target here, @@ -2060,25 +2082,26 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { RelocInfo::CODE_TARGET); } -static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { +namespace { + +void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { __ Push(lr, fp); __ Mov(x11, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)); __ Push(x11, x1); // x1: function - // We do not yet push the number of arguments, to maintain a 16-byte aligned - // stack pointer. This is done in step (3) in - // Generate_ArgumentsAdaptorTrampoline. - __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); + __ SmiTag(x11, x0); // x0: number of arguments. + __ Push(x11, padreg); + __ Add(fp, __ StackPointer(), + ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp); } -static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { +void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- x0 : result being passed through // ----------------------------------- // Get the number of arguments passed (as a smi), tear down the frame and // then drop the parameters and the receiver. - __ Ldr(x10, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp + - kPointerSize))); - __ Mov(jssp, fp); + __ Ldr(x10, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ Mov(__ StackPointer(), fp); __ Pop(fp, lr); // Drop actual parameters and receiver. @@ -2086,6 +2109,67 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { __ DropArguments(x10, TurboAssembler::kCountExcludesReceiver); } +// Prepares the stack for copying the varargs. First we claim the necessary +// slots, taking care of potential padding. Then we copy the existing arguments +// one slot up or one slot down, as needed. +void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc, + Register len) { + Label len_odd, exit; + Register slots_to_copy = x10; // If needed. + __ Add(slots_to_copy, argc, 1); + __ Add(argc, argc, len); + __ Tbnz(len, 0, &len_odd); + __ Claim(len); + __ B(&exit); + + __ Bind(&len_odd); + // Claim space we need. If argc is even, slots_to_claim = len + 1, as we need + // one extra padding slot. If argc is odd, we know that the original arguments + // will have a padding slot we can reuse (since len is odd), so + // slots_to_claim = len - 1. + { + Register scratch = x11; + Register slots_to_claim = x12; + __ Add(slots_to_claim, len, 1); + __ And(scratch, argc, 1); + __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1)); + __ Claim(slots_to_claim); + } + + Label copy_down; + __ Tbz(slots_to_copy, 0, ©_down); + + // Copy existing arguments one slot up. + { + Register src = x11; + Register dst = x12; + Register scratch = x13; + __ Sub(scratch, argc, 1); + __ SlotAddress(src, scratch); + __ SlotAddress(dst, argc); + __ CopyDoubleWords(dst, src, slots_to_copy, + TurboAssembler::kSrcLessThanDst); + } + __ B(&exit); + + // Copy existing arguments one slot down and add padding. + __ Bind(©_down); + { + Register src = x11; + Register dst = x12; + Register scratch = x13; + __ Add(src, len, 1); + __ Mov(dst, len); // CopySlots will corrupt dst. + __ CopySlots(dst, src, slots_to_copy); + __ Add(scratch, argc, 1); + __ Poke(padreg, Operand(scratch, LSL, kPointerSizeLog2)); // Store padding. + } + + __ Bind(&exit); +} + +} // namespace + // static void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle<Code> code) { @@ -2118,30 +2202,34 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ Bind(&done); } - // Push arguments onto the stack (thisArgument is already on the stack). - { - Label done, push, loop; - Register src = x5; + // Skip argument setup if we don't need to push any varargs. + Label done; + __ Cbz(len, &done); - __ Add(src, arguments_list, FixedArray::kHeaderSize - kHeapObjectTag); - __ Add(argc, argc, len); // The 'len' argument for Call() or Construct(). - __ Cbz(len, &done); + Generate_PrepareForCopyingVarargs(masm, argc, len); + + // Push varargs. + { + Label loop; + Register src = x10; Register the_hole_value = x11; Register undefined_value = x12; - // We do not use the CompareRoot macro as it would do a LoadRoot behind the - // scenes and we want to avoid that in a loop. + Register scratch = x13; + __ Add(src, arguments_list, FixedArray::kHeaderSize - kHeapObjectTag); __ LoadRoot(the_hole_value, Heap::kTheHoleValueRootIndex); __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex); - __ Claim(len); + // We do not use the CompareRoot macro as it would do a LoadRoot behind the + // scenes and we want to avoid that in a loop. + // TODO(all): Consider using Ldp and Stp. __ Bind(&loop); __ Sub(len, len, 1); - __ Ldr(x10, MemOperand(src, kPointerSize, PostIndex)); - __ Cmp(x10, the_hole_value); - __ Csel(x10, x10, undefined_value, ne); - __ Poke(x10, Operand(len, LSL, kPointerSizeLog2)); + __ Ldr(scratch, MemOperand(src, kPointerSize, PostIndex)); + __ Cmp(scratch, the_hole_value); + __ Csel(scratch, scratch, undefined_value, ne); + __ Poke(scratch, Operand(len, LSL, kPointerSizeLog2)); __ Cbnz(len, &loop); - __ Bind(&done); } + __ Bind(&done); // Tail-call to the actual Call or Construct builtin. __ Jump(code, RelocInfo::CODE_TARGET); @@ -2158,13 +2246,16 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, // -- x2 : start index (to support rest parameters) // ----------------------------------- + Register argc = x0; + Register start_index = x2; + // Check if new.target has a [[Construct]] internal method. if (mode == CallOrConstructMode::kConstruct) { Label new_target_constructor, new_target_not_constructor; __ JumpIfSmi(x3, &new_target_not_constructor); __ Ldr(x5, FieldMemOperand(x3, HeapObject::kMapOffset)); __ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset)); - __ TestAndBranchIfAnySet(x5, 1 << Map::kIsConstructor, + __ TestAndBranchIfAnySet(x5, Map::IsConstructorBit::kMask, &new_target_constructor); __ Bind(&new_target_not_constructor); { @@ -2177,49 +2268,57 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, } // Check if we have an arguments adaptor frame below the function frame. - Label arguments_adaptor, arguments_done; - __ Ldr(x5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ Ldr(x4, MemOperand(x5, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)); - __ B(eq, &arguments_adaptor); - { - __ Ldr(x6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - __ Ldr(x6, FieldMemOperand(x6, JSFunction::kSharedFunctionInfoOffset)); - __ Ldrsw(x6, FieldMemOperand( - x6, SharedFunctionInfo::kFormalParameterCountOffset)); - __ Mov(x5, fp); - } - __ B(&arguments_done); - __ Bind(&arguments_adaptor); + // args_fp will point to the frame that contains the actual arguments, which + // will be the current frame unless we have an arguments adaptor frame, in + // which case args_fp points to the arguments adaptor frame. + Register args_fp = x5; + Register len = x6; { - // Just load the length from ArgumentsAdaptorFrame. - __ Ldrsw(x6, UntagSmiMemOperand( - x5, ArgumentsAdaptorFrameConstants::kLengthOffset)); + Label arguments_adaptor, arguments_done; + Register scratch = x10; + __ Ldr(args_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ Ldr(x4, MemOperand(args_fp, + CommonFrameConstants::kContextOrFrameTypeOffset)); + __ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)); + __ B(eq, &arguments_adaptor); + { + __ Ldr(scratch, + MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ Ldr(scratch, + FieldMemOperand(scratch, JSFunction::kSharedFunctionInfoOffset)); + __ Ldrsw(len, + FieldMemOperand( + scratch, SharedFunctionInfo::kFormalParameterCountOffset)); + __ Mov(args_fp, fp); + } + __ B(&arguments_done); + __ Bind(&arguments_adaptor); + { + // Just load the length from ArgumentsAdaptorFrame. + __ Ldrsw(len, + UntagSmiMemOperand( + args_fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); + } + __ Bind(&arguments_done); } - __ Bind(&arguments_done); Label stack_done, stack_overflow; - __ Subs(x6, x6, x2); + __ Subs(len, len, start_index); __ B(le, &stack_done); - { - // Check for stack overflow. - Generate_StackOverflowCheck(masm, x6, &stack_overflow); + // Check for stack overflow. + Generate_StackOverflowCheck(masm, x6, &stack_overflow); - // Forward the arguments from the caller frame. - { - Label loop; - __ Add(x5, x5, kPointerSize); - __ Add(x0, x0, x6); - __ Bind(&loop); - { - __ Ldr(x4, MemOperand(x5, x6, LSL, kPointerSizeLog2)); - __ Push(x4); - __ Subs(x6, x6, 1); - __ B(ne, &loop); - } - } + Generate_PrepareForCopyingVarargs(masm, argc, len); + + // Push varargs. + { + Register dst = x13; + __ Add(args_fp, args_fp, 2 * kPointerSize); + __ SlotAddress(dst, 0); + __ CopyDoubleWords(dst, args_fp, len); } __ B(&stack_done); + __ Bind(&stack_overflow); __ TailCallRuntime(Runtime::kThrowStackOverflow); __ Bind(&stack_done); @@ -2338,12 +2437,16 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // -- x3 : new.target (only in case of [[Construct]]) // ----------------------------------- + Register bound_argc = x4; + Register bound_argv = x2; + // Load [[BoundArguments]] into x2 and length of that into x4. Label no_bound_arguments; - __ Ldr(x2, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset)); - __ Ldrsw(x4, UntagSmiFieldMemOperand(x2, FixedArray::kLengthOffset)); - __ Cmp(x4, 0); - __ B(eq, &no_bound_arguments); + __ Ldr(bound_argv, + FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset)); + __ Ldrsw(bound_argc, + UntagSmiFieldMemOperand(bound_argv, FixedArray::kLengthOffset)); + __ Cbz(bound_argc, &no_bound_arguments); { // ----------- S t a t e ------------- // -- x0 : the number of arguments (not including the receiver) @@ -2353,44 +2456,97 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // -- x4 : the number of [[BoundArguments]] // ----------------------------------- + Register argc = x0; + + // Check for stack overflow. { - Label done; - __ Claim(x4); // Check the stack for overflow. We are not trying to catch interruptions // (i.e. debug break and preemption) here, so check the "real stack // limit". - __ CompareRoot(jssp, Heap::kRealStackLimitRootIndex); + Label done; + __ LoadRoot(x10, Heap::kRealStackLimitRootIndex); + // Make x10 the space we have left. The stack might already be overflowed + // here which will cause x10 to become negative. + __ Sub(x10, masm->StackPointer(), x10); + // Check if the arguments will overflow the stack. + __ Cmp(x10, Operand(bound_argc, LSL, kPointerSizeLog2)); __ B(gt, &done); // Signed comparison. - // Restore the stack pointer. - __ Drop(x4); - { - FrameScope scope(masm, StackFrame::MANUAL); - __ EnterFrame(StackFrame::INTERNAL); - __ CallRuntime(Runtime::kThrowStackOverflow); - } + __ TailCallRuntime(Runtime::kThrowStackOverflow); __ Bind(&done); } - UseScratchRegisterScope temps(masm); - Register argc = temps.AcquireX(); - // Relocate arguments down the stack. - __ Mov(argc, x0); - __ CopySlots(0, x4, argc); + // Check if we need padding. + Label copy_args, copy_bound_args; + Register total_argc = x15; + Register slots_to_claim = x12; + __ Add(total_argc, argc, bound_argc); + __ Mov(slots_to_claim, bound_argc); + __ Tbz(bound_argc, 0, ©_args); + + // Load receiver before we start moving the arguments. We will only + // need this in this path because the bound arguments are odd. + Register receiver = x14; + __ Peek(receiver, Operand(argc, LSL, kPointerSizeLog2)); - // Copy [[BoundArguments]] to the stack (below the arguments). The first - // element of the array is copied to the highest address. + // Claim space we need. If argc is even, slots_to_claim = bound_argc + 1, + // as we need one extra padding slot. If argc is odd, we know that the + // original arguments will have a padding slot we can reuse (since + // bound_argc is odd), so slots_to_claim = bound_argc - 1. { - Label loop; - __ Ldrsw(x4, UntagSmiFieldMemOperand(x2, FixedArray::kLengthOffset)); - __ Add(x2, x2, FixedArray::kHeaderSize - kHeapObjectTag); - __ SlotAddress(x11, x0); - __ Add(x0, x0, x4); - __ Bind(&loop); - __ Sub(x4, x4, 1); - __ Ldr(x10, MemOperand(x2, x4, LSL, kPointerSizeLog2)); - // Poke into claimed area of stack. - __ Str(x10, MemOperand(x11, kPointerSize, PostIndex)); - __ Cbnz(x4, &loop); + Register scratch = x11; + __ Add(slots_to_claim, bound_argc, 1); + __ And(scratch, total_argc, 1); + __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1)); + } + + // Copy bound arguments. + __ Bind(©_args); + // Skip claim and copy of existing arguments in the special case where we + // do not need to claim any slots (this will be the case when + // bound_argc == 1 and the existing arguments have padding we can reuse). + __ Cbz(slots_to_claim, ©_bound_args); + __ Claim(slots_to_claim); + { + Register count = x10; + // Relocate arguments to a lower address. + __ Mov(count, argc); + __ CopySlots(0, slots_to_claim, count); + + __ Bind(©_bound_args); + // Copy [[BoundArguments]] to the stack (below the arguments). The first + // element of the array is copied to the highest address. + { + Label loop; + Register counter = x10; + Register scratch = x11; + Register copy_to = x12; + __ Add(bound_argv, bound_argv, + FixedArray::kHeaderSize - kHeapObjectTag); + __ SlotAddress(copy_to, argc); + __ Add(argc, argc, + bound_argc); // Update argc to include bound arguments. + __ Lsl(counter, bound_argc, kPointerSizeLog2); + __ Bind(&loop); + __ Sub(counter, counter, kPointerSize); + __ Ldr(scratch, MemOperand(bound_argv, counter)); + // Poke into claimed area of stack. + __ Str(scratch, MemOperand(copy_to, kPointerSize, PostIndex)); + __ Cbnz(counter, &loop); + } + + { + Label done; + Register scratch = x10; + __ Tbz(bound_argc, 0, &done); + // Store receiver. + __ Add(scratch, __ StackPointer(), + Operand(total_argc, LSL, kPointerSizeLog2)); + __ Str(receiver, MemOperand(scratch, kPointerSize, PostIndex)); + __ Tbnz(total_argc, 0, &done); + // Store padding. + __ Str(padreg, MemOperand(scratch)); + __ Bind(&done); + } } } __ Bind(&no_bound_arguments); @@ -2438,7 +2594,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // Check if target has a [[Call]] internal method. __ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset)); - __ TestAndBranchIfAllClear(x4, 1 << Map::kIsCallable, &non_callable); + __ TestAndBranchIfAllClear(x4, Map::IsCallableBit::kMask, &non_callable); // Check if target is a proxy and call CallProxy external builtin __ Cmp(x5, JS_PROXY_TYPE); @@ -2533,7 +2689,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { // Check if target has a [[Construct]] internal method. __ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset)); - __ TestAndBranchIfAllClear(x2, 1 << Map::kIsConstructor, &non_constructor); + __ TestAndBranchIfAllClear(x2, Map::IsConstructorBit::kMask, + &non_constructor); // Only dispatch to bound functions after checking whether they are // constructors. @@ -2605,19 +2762,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) { __ TailCallRuntime(Runtime::kAbort); } -// static -void Builtins::Generate_AbortJS(MacroAssembler* masm) { - ASM_LOCATION("Builtins::Generate_AbortJS"); - // ----------- S t a t e ------------- - // -- x1 : message as String object - // -- lr : return address - // ----------------------------------- - MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm); - __ PushArgument(x1); - __ Move(cp, Smi::kZero); - __ TailCallRuntime(Runtime::kAbortJS); -} - void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline"); // ----------- S t a t e ------------- @@ -2651,14 +2795,16 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // 4 | num of | | // | actual args | | // |- - - - - - - - -| | - // [5] | [padding] | | + // 5 | padding | | // |-----------------+---- | - // 5+pad | receiver | ^ | + // [6] | [padding] | ^ | + // |- - - - - - - - -| | | + // 6+pad | receiver | | | // | (parameter 0) | | | // |- - - - - - - - -| | | - // 6+pad | parameter 1 | | | + // 7+pad | parameter 1 | | | // |- - - - - - - - -| Frame slots ----> expected args - // 7+pad | parameter 2 | | | + // 8+pad | parameter 2 | | | // |- - - - - - - - -| | | // | | | | // ... | ... | | | @@ -2671,7 +2817,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // | [undefined] | v <-- stack ptr v // -----+-----------------+--------------------------------- // - // There is an optional slot of padding to ensure stack alignment. + // There is an optional slot of padding above the receiver to ensure stack + // alignment of the arguments. // If the number of expected arguments is larger than the number of actual // arguments, the remaining expected slots will be filled with undefined. @@ -2695,10 +2842,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { Register argc_unused_actual = x14; Register scratch1 = x15, scratch2 = x16; - // We need slots for the expected arguments, with two extra slots for the - // number of actual arguments and the receiver. + // We need slots for the expected arguments, with one extra slot for the + // receiver. __ RecordComment("-- Stack check --"); - __ Add(scratch1, argc_expected, 2); + __ Add(scratch1, argc_expected, 1); Generate_StackOverflowCheck(masm, scratch1, &stack_overflow); // Round up number of slots to be even, to maintain stack alignment. @@ -2707,7 +2854,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ Bic(scratch1, scratch1, 1); __ Claim(scratch1, kPointerSize); - __ Mov(copy_to, jssp); + __ Mov(copy_to, __ StackPointer()); // Preparing the expected arguments is done in four steps, the order of // which is chosen so we can use LDP/STP and avoid conditional branches as @@ -2738,7 +2885,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ Bind(&enough_arguments); // (2) Copy all of the actual arguments, or as many as we need. + Label skip_copy; __ RecordComment("-- Copy actual arguments --"); + __ Cbz(argc_to_copy, &skip_copy); __ Add(copy_end, copy_to, Operand(argc_to_copy, LSL, kPointerSizeLog2)); __ Add(copy_from, fp, 2 * kPointerSize); // Adjust for difference between actual and expected arguments. @@ -2755,21 +2904,22 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ Stp(scratch1, scratch2, MemOperand(copy_to, 2 * kPointerSize, PostIndex)); __ Cmp(copy_end, copy_to); __ B(hi, ©_2_by_2); + __ Bind(&skip_copy); - // (3) Store number of actual arguments and padding. The padding might be - // unnecessary, in which case it will be overwritten by the receiver. - __ RecordComment("-- Store number of args and padding --"); - __ SmiTag(scratch1, argc_actual); - __ Stp(xzr, scratch1, MemOperand(fp, -4 * kPointerSize)); + // (3) Store padding, which might be overwritten by the receiver, if it is not + // necessary. + __ RecordComment("-- Store padding --"); + __ Str(padreg, MemOperand(fp, -5 * kPointerSize)); - // (4) Store receiver. Calculate target address from jssp to avoid checking + // (4) Store receiver. Calculate target address from the sp to avoid checking // for padding. Storing the receiver will overwrite either the extra slot // we copied with the actual arguments, if we did copy one, or the padding we // stored above. __ RecordComment("-- Store receiver --"); __ Add(copy_from, fp, 2 * kPointerSize); __ Ldr(scratch1, MemOperand(copy_from, argc_actual, LSL, kPointerSizeLog2)); - __ Str(scratch1, MemOperand(jssp, argc_expected, LSL, kPointerSizeLog2)); + __ Str(scratch1, + MemOperand(__ StackPointer(), argc_expected, LSL, kPointerSizeLog2)); // Arguments have been adapted. Now call the entry point. __ RecordComment("-- Call entry point --"); @@ -2805,10 +2955,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { } void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { - // Wasm code uses the csp. This builtin excepts to use the jssp. - // Thus, move csp to jssp when entering this builtin (called from wasm). - DCHECK(masm->StackPointer().is(jssp)); - __ Move(jssp, csp); { FrameScope scope(masm, StackFrame::INTERNAL); @@ -2833,9 +2979,6 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ PopDRegList(fp_regs); __ PopXRegList(gp_regs); } - // Move back to csp land. jssp now has the same value as when entering this - // function, but csp might have changed in the runtime call. - __ Move(csp, jssp); // Now jump to the instructions of the returned code object. __ Jump(x8); } diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc index 5fec0abfa5..027baa2873 100644 --- a/deps/v8/src/builtins/builtins-array-gen.cc +++ b/deps/v8/src/builtins/builtins-array-gen.cc @@ -31,6 +31,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler { typedef std::function<void(ArrayBuiltinCodeStubAssembler* masm)> PostLoopAction; + enum class MissingPropertyMode { kSkip, kUseUndefined }; + void FindResultGenerator() { a_.Bind(UndefinedConstant()); } Node* FindProcessor(Node* k_value, Node* k) { @@ -383,6 +385,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler { const char* name, const BuiltinResultGenerator& generator, const CallResultProcessor& processor, const PostLoopAction& action, const Callable& slow_case_continuation, + MissingPropertyMode missing_property_mode, ForEachDirection direction = ForEachDirection::kForward) { Label non_array(this), array_changes(this, {&k_, &a_, &to_}); @@ -439,7 +442,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler { generator(this); - HandleFastElements(processor, action, &fully_spec_compliant_, direction); + HandleFastElements(processor, action, &fully_spec_compliant_, direction, + missing_property_mode); BIND(&fully_spec_compliant_); @@ -550,6 +554,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler { void GenerateIteratingArrayBuiltinLoopContinuation( const CallResultProcessor& processor, const PostLoopAction& action, + MissingPropertyMode missing_property_mode, ForEachDirection direction = ForEachDirection::kForward) { Label loop(this, {&k_, &a_, &to_}); Label after_loop(this); @@ -558,11 +563,11 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler { { if (direction == ForEachDirection::kForward) { // 8. Repeat, while k < len - GotoIfNumericGreaterThanOrEqual(k(), len_, &after_loop); + GotoIfNumberGreaterThanOrEqual(k(), len_, &after_loop); } else { // OR // 10. Repeat, while k >= 0 - GotoIfNumericGreaterThanOrEqual(SmiConstant(-1), k(), &after_loop); + GotoIfNumberGreaterThanOrEqual(SmiConstant(-1), k(), &after_loop); } Label done_element(this, &to_); @@ -572,12 +577,15 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler { // index in the range [0, 2^32-1). CSA_ASSERT(this, IsNumberArrayIndex(k())); - // b. Let kPresent be HasProperty(O, Pk). - // c. ReturnIfAbrupt(kPresent). - Node* k_present = HasProperty(o(), k(), context(), kHasProperty); + if (missing_property_mode == MissingPropertyMode::kSkip) { + // b. Let kPresent be HasProperty(O, Pk). + // c. ReturnIfAbrupt(kPresent). + TNode<Oddball> k_present = + HasProperty(o(), k(), context(), kHasProperty); - // d. If kPresent is true, then - GotoIf(WordNotEqual(k_present, TrueConstant()), &done_element); + // d. If kPresent is true, then + GotoIf(IsFalse(k_present), &done_element); + } // i. Let kValue be Get(O, Pk). // ii. ReturnIfAbrupt(kValue). @@ -655,7 +663,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler { void VisitAllFastElementsOneKind(ElementsKind kind, const CallResultProcessor& processor, Label* array_changed, ParameterMode mode, - ForEachDirection direction) { + ForEachDirection direction, + MissingPropertyMode missing_property_mode) { Comment("begin VisitAllFastElementsOneKind"); VARIABLE(original_map, MachineRepresentation::kTagged); original_map.Bind(LoadMap(o())); @@ -670,7 +679,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler { list, start, end, [=, &original_map](Node* index) { k_.Bind(ParameterToTagged(index, mode)); - Label one_element_done(this), hole_element(this); + Label one_element_done(this), hole_element(this), + process_element(this); // Check if o's map has changed during the callback. If so, we have to // fall back to the slower spec implementation for the rest of the @@ -693,24 +703,32 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler { ? FixedArray::kHeaderSize : (FixedArray::kHeaderSize - kHeapObjectTag); Node* offset = ElementOffsetFromIndex(index, kind, mode, base_size); - Node* value = nullptr; + VARIABLE(value, MachineRepresentation::kTagged); if (kind == PACKED_ELEMENTS) { - value = LoadObjectField(elements, offset); - GotoIf(WordEqual(value, TheHoleConstant()), &hole_element); + value.Bind(LoadObjectField(elements, offset)); + GotoIf(WordEqual(value.value(), TheHoleConstant()), &hole_element); } else { Node* double_value = LoadDoubleWithHoleCheck(elements, offset, &hole_element); - value = AllocateHeapNumberWithValue(double_value); + value.Bind(AllocateHeapNumberWithValue(double_value)); } - a_.Bind(processor(this, value, k())); - Goto(&one_element_done); + Goto(&process_element); BIND(&hole_element); - // Check if o's prototype change unexpectedly has elements after the - // callback in the case of a hole. - BranchIfPrototypesHaveNoElements(o_map, &one_element_done, - array_changed); - + if (missing_property_mode == MissingPropertyMode::kSkip) { + // Check if o's prototype change unexpectedly has elements after + // the callback in the case of a hole. + BranchIfPrototypesHaveNoElements(o_map, &one_element_done, + array_changed); + } else { + value.Bind(UndefinedConstant()); + Goto(&process_element); + } + BIND(&process_element); + { + a_.Bind(processor(this, value.value(), k())); + Goto(&one_element_done); + } BIND(&one_element_done); }, 1, mode, advance_mode); @@ -719,7 +737,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler { void HandleFastElements(const CallResultProcessor& processor, const PostLoopAction& action, Label* slow, - ForEachDirection direction) { + ForEachDirection direction, + MissingPropertyMode missing_property_mode) { Label switch_on_elements_kind(this), fast_elements(this), maybe_double_elements(this), fast_double_elements(this); @@ -742,7 +761,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler { BIND(&fast_elements); { VisitAllFastElementsOneKind(PACKED_ELEMENTS, processor, slow, mode, - direction); + direction, missing_property_mode); action(this); @@ -757,7 +776,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler { BIND(&fast_double_elements); { VisitAllFastElementsOneKind(PACKED_DOUBLE_ELEMENTS, processor, slow, mode, - direction); + direction, missing_property_mode); action(this); @@ -879,7 +898,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler { ElementsKind source_elements_kind_ = ElementsKind::NO_ELEMENTS; }; -TF_BUILTIN(FastArrayPop, CodeStubAssembler) { +TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) { Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount); Node* context = Parameter(BuiltinDescriptor::kContext); CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget))); @@ -977,7 +996,7 @@ TF_BUILTIN(FastArrayPop, CodeStubAssembler) { } } -TF_BUILTIN(FastArrayPush, CodeStubAssembler) { +TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) { TVARIABLE(IntPtrT, arg_index); Label default_label(this, &arg_index); Label smi_transition(this); @@ -1106,9 +1125,10 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) { } } -class FastArraySliceCodeStubAssembler : public CodeStubAssembler { +class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler { public: - explicit FastArraySliceCodeStubAssembler(compiler::CodeAssemblerState* state) + explicit ArrayPrototypeSliceCodeStubAssembler( + compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} Node* HandleFastSlice(Node* context, Node* array, Node* from, Node* count, @@ -1245,11 +1265,11 @@ class FastArraySliceCodeStubAssembler : public CodeStubAssembler { void CopyOneElement(Node* context, Node* o, Node* a, Node* p_k, Variable& n) { // b. Let kPresent be HasProperty(O, Pk). // c. ReturnIfAbrupt(kPresent). - Node* k_present = HasProperty(o, p_k, context, kHasProperty); + TNode<Oddball> k_present = HasProperty(o, p_k, context, kHasProperty); // d. If kPresent is true, then Label done_element(this); - GotoIf(WordNotEqual(k_present, TrueConstant()), &done_element); + GotoIf(IsFalse(k_present), &done_element); // i. Let kValue be Get(O, Pk). // ii. ReturnIfAbrupt(kValue). @@ -1264,10 +1284,10 @@ class FastArraySliceCodeStubAssembler : public CodeStubAssembler { } }; -TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) { +TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) { Node* const argc = ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)); - Node* const context = Parameter(BuiltinDescriptor::kContext); + TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext)); Label slow(this, Label::kDeferred), fast_elements_kind(this); CodeStubArguments args(this, argc); @@ -1339,15 +1359,15 @@ TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) { // 5. Let relativeStart be ToInteger(start). // 6. ReturnIfAbrupt(relativeStart). - Node* arg0 = args.GetOptionalArgumentValue(0, SmiConstant(0)); - Node* relative_start = ToInteger(context, arg0); + TNode<Object> arg0 = CAST(args.GetOptionalArgumentValue(0, SmiConstant(0))); + Node* relative_start = ToInteger_Inline(context, arg0); // 7. If relativeStart < 0, let k be max((len + relativeStart),0); // else let k be min(relativeStart, len.value()). VARIABLE(k, MachineRepresentation::kTagged); Label relative_start_positive(this), relative_start_done(this); - GotoIfNumericGreaterThanOrEqual(relative_start, SmiConstant(0), - &relative_start_positive); + GotoIfNumberGreaterThanOrEqual(relative_start, SmiConstant(0), + &relative_start_positive); k.Bind(NumberMax(NumberAdd(len.value(), relative_start), NumberConstant(0))); Goto(&relative_start_done); BIND(&relative_start_positive); @@ -1358,11 +1378,12 @@ TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) { // 8. If end is undefined, let relativeEnd be len; // else let relativeEnd be ToInteger(end). // 9. ReturnIfAbrupt(relativeEnd). - Node* end = args.GetOptionalArgumentValue(1, UndefinedConstant()); + TNode<Object> end = + CAST(args.GetOptionalArgumentValue(1, UndefinedConstant())); Label end_undefined(this), end_done(this); VARIABLE(relative_end, MachineRepresentation::kTagged); GotoIf(WordEqual(end, UndefinedConstant()), &end_undefined); - relative_end.Bind(ToInteger(context, end)); + relative_end.Bind(ToInteger_Inline(context, end)); Goto(&end_done); BIND(&end_undefined); relative_end.Bind(len.value()); @@ -1373,8 +1394,8 @@ TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) { // else let final be min(relativeEnd, len). VARIABLE(final, MachineRepresentation::kTagged); Label relative_end_positive(this), relative_end_done(this); - GotoIfNumericGreaterThanOrEqual(relative_end.value(), NumberConstant(0), - &relative_end_positive); + GotoIfNumberGreaterThanOrEqual(relative_end.value(), NumberConstant(0), + &relative_end_positive); final.Bind(NumberMax(NumberAdd(len.value(), relative_end.value()), NumberConstant(0))); Goto(&relative_end_done); @@ -1412,7 +1433,7 @@ TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) { BIND(&loop); { // 15. Repeat, while k < final - GotoIfNumericGreaterThanOrEqual(k.value(), final.value(), &after_loop); + GotoIfNumberGreaterThanOrEqual(k.value(), final.value(), &after_loop); Node* p_k = k.value(); // ToString(context, k.value()) is no-op @@ -1438,7 +1459,7 @@ TF_BUILTIN(FastArraySlice, FastArraySliceCodeStubAssembler) { args.PopAndReturn(a); } -TF_BUILTIN(FastArrayShift, CodeStubAssembler) { +TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) { Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount); Node* context = Parameter(BuiltinDescriptor::kContext); CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget))); @@ -1619,6 +1640,206 @@ TF_BUILTIN(CloneFastJSArray, ArrayBuiltinCodeStubAssembler) { Return(CloneFastJSArray(context, array, mode)); } +TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinCodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* callbackfn = Parameter(Descriptor::kCallbackFn); + Node* this_arg = Parameter(Descriptor::kThisArg); + Node* array = Parameter(Descriptor::kArray); + Node* object = Parameter(Descriptor::kObject); + Node* initial_k = Parameter(Descriptor::kInitialK); + Node* len = Parameter(Descriptor::kLength); + Node* to = Parameter(Descriptor::kTo); + + InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn, + this_arg, array, object, initial_k, + len, to); + + GenerateIteratingArrayBuiltinLoopContinuation( + &ArrayBuiltinCodeStubAssembler::FindProcessor, + &ArrayBuiltinCodeStubAssembler::NullPostLoopAction, + MissingPropertyMode::kUseUndefined, ForEachDirection::kForward); +} + +// Continuation that is called after an eager deoptimization from TF (ex. the +// array changes during iteration). +TF_BUILTIN(ArrayFindLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* callbackfn = Parameter(Descriptor::kCallbackFn); + Node* this_arg = Parameter(Descriptor::kThisArg); + Node* initial_k = Parameter(Descriptor::kInitialK); + Node* len = Parameter(Descriptor::kLength); + + Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver, + callbackfn, this_arg, UndefinedConstant(), receiver, + initial_k, len, UndefinedConstant())); +} + +// Continuation that is called after a lazy deoptimization from TF (ex. the +// callback function is no longer callable). +TF_BUILTIN(ArrayFindLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* callbackfn = Parameter(Descriptor::kCallbackFn); + Node* this_arg = Parameter(Descriptor::kThisArg); + Node* initial_k = Parameter(Descriptor::kInitialK); + Node* len = Parameter(Descriptor::kLength); + + Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver, + callbackfn, this_arg, UndefinedConstant(), receiver, + initial_k, len, UndefinedConstant())); +} + +// Continuation that is called after a lazy deoptimization from TF that happens +// right after the callback and it's returned value must be handled before +// iteration continues. +TF_BUILTIN(ArrayFindLoopAfterCallbackLazyDeoptContinuation, + ArrayBuiltinCodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* callbackfn = Parameter(Descriptor::kCallbackFn); + Node* this_arg = Parameter(Descriptor::kThisArg); + Node* initial_k = Parameter(Descriptor::kInitialK); + Node* len = Parameter(Descriptor::kLength); + Node* found_value = Parameter(Descriptor::kFoundValue); + Node* is_found = Parameter(Descriptor::kIsFound); + + // This custom lazy deopt point is right after the callback. find() needs + // to pick up at the next step, which is returning the element if the callback + // value is truthy. Otherwise, continue the search by calling the + // continuation. + Label if_true(this), if_false(this); + BranchIfToBooleanIsTrue(is_found, &if_true, &if_false); + BIND(&if_true); + Return(found_value); + BIND(&if_false); + Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver, + callbackfn, this_arg, UndefinedConstant(), receiver, + initial_k, len, UndefinedConstant())); +} + +// ES #sec-get-%typedarray%.prototype.find +TF_BUILTIN(ArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) { + Node* argc = + ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)); + CodeStubArguments args(this, argc); + Node* context = Parameter(BuiltinDescriptor::kContext); + Node* new_target = Parameter(BuiltinDescriptor::kNewTarget); + Node* receiver = args.GetReceiver(); + Node* callbackfn = args.GetOptionalArgumentValue(0); + Node* this_arg = args.GetOptionalArgumentValue(1); + + InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, + new_target, argc); + + GenerateIteratingArrayBuiltinBody( + "Array.prototype.find", + &ArrayBuiltinCodeStubAssembler::FindResultGenerator, + &ArrayBuiltinCodeStubAssembler::FindProcessor, + &ArrayBuiltinCodeStubAssembler::NullPostLoopAction, + Builtins::CallableFor(isolate(), Builtins::kArrayFindLoopContinuation), + MissingPropertyMode::kUseUndefined, ForEachDirection::kForward); +} + +TF_BUILTIN(ArrayFindIndexLoopContinuation, ArrayBuiltinCodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* callbackfn = Parameter(Descriptor::kCallbackFn); + Node* this_arg = Parameter(Descriptor::kThisArg); + Node* array = Parameter(Descriptor::kArray); + Node* object = Parameter(Descriptor::kObject); + Node* initial_k = Parameter(Descriptor::kInitialK); + Node* len = Parameter(Descriptor::kLength); + Node* to = Parameter(Descriptor::kTo); + + InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn, + this_arg, array, object, initial_k, + len, to); + + GenerateIteratingArrayBuiltinLoopContinuation( + &ArrayBuiltinCodeStubAssembler::FindIndexProcessor, + &ArrayBuiltinCodeStubAssembler::NullPostLoopAction, + MissingPropertyMode::kUseUndefined, ForEachDirection::kForward); +} + +TF_BUILTIN(ArrayFindIndexLoopEagerDeoptContinuation, + ArrayBuiltinCodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* callbackfn = Parameter(Descriptor::kCallbackFn); + Node* this_arg = Parameter(Descriptor::kThisArg); + Node* initial_k = Parameter(Descriptor::kInitialK); + Node* len = Parameter(Descriptor::kLength); + + Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context, + receiver, callbackfn, this_arg, SmiConstant(-1), receiver, + initial_k, len, UndefinedConstant())); +} + +TF_BUILTIN(ArrayFindIndexLoopLazyDeoptContinuation, + ArrayBuiltinCodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* callbackfn = Parameter(Descriptor::kCallbackFn); + Node* this_arg = Parameter(Descriptor::kThisArg); + Node* initial_k = Parameter(Descriptor::kInitialK); + Node* len = Parameter(Descriptor::kLength); + + Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context, + receiver, callbackfn, this_arg, SmiConstant(-1), receiver, + initial_k, len, UndefinedConstant())); +} + +TF_BUILTIN(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation, + ArrayBuiltinCodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* callbackfn = Parameter(Descriptor::kCallbackFn); + Node* this_arg = Parameter(Descriptor::kThisArg); + Node* initial_k = Parameter(Descriptor::kInitialK); + Node* len = Parameter(Descriptor::kLength); + Node* found_value = Parameter(Descriptor::kFoundValue); + Node* is_found = Parameter(Descriptor::kIsFound); + + // This custom lazy deopt point is right after the callback. find() needs + // to pick up at the next step, which is returning the element if the callback + // value is truthy. Otherwise, continue the search by calling the + // continuation. + Label if_true(this), if_false(this); + BranchIfToBooleanIsTrue(is_found, &if_true, &if_false); + BIND(&if_true); + Return(found_value); + BIND(&if_false); + Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context, + receiver, callbackfn, this_arg, SmiConstant(-1), receiver, + initial_k, len, UndefinedConstant())); +} + +// ES #sec-get-%typedarray%.prototype.findIndex +TF_BUILTIN(ArrayPrototypeFindIndex, ArrayBuiltinCodeStubAssembler) { + Node* argc = + ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount)); + CodeStubArguments args(this, argc); + Node* context = Parameter(BuiltinDescriptor::kContext); + Node* new_target = Parameter(BuiltinDescriptor::kNewTarget); + Node* receiver = args.GetReceiver(); + Node* callbackfn = args.GetOptionalArgumentValue(0); + Node* this_arg = args.GetOptionalArgumentValue(1); + + InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg, + new_target, argc); + + GenerateIteratingArrayBuiltinBody( + "Array.prototype.findIndex", + &ArrayBuiltinCodeStubAssembler::FindIndexResultGenerator, + &ArrayBuiltinCodeStubAssembler::FindIndexProcessor, + &ArrayBuiltinCodeStubAssembler::NullPostLoopAction, + Builtins::CallableFor(isolate(), + Builtins::kArrayFindIndexLoopContinuation), + MissingPropertyMode::kUseUndefined, ForEachDirection::kForward); +} + // ES #sec-get-%typedarray%.prototype.find TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinCodeStubAssembler) { Node* argc = @@ -1678,7 +1899,8 @@ TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinCodeStubAssembler) { GenerateIteratingArrayBuiltinLoopContinuation( &ArrayBuiltinCodeStubAssembler::ForEachProcessor, - &ArrayBuiltinCodeStubAssembler::NullPostLoopAction); + &ArrayBuiltinCodeStubAssembler::NullPostLoopAction, + MissingPropertyMode::kSkip); } TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation, @@ -1690,11 +1912,9 @@ TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation, Node* initial_k = Parameter(Descriptor::kInitialK); Node* len = Parameter(Descriptor::kLength); - Callable stub(Builtins::CallableFor(isolate(), - Builtins::kArrayForEachLoopContinuation)); - Return(CallStub(stub, context, receiver, callbackfn, this_arg, - UndefinedConstant(), receiver, initial_k, len, - UndefinedConstant())); + Return(CallBuiltin(Builtins::kArrayForEachLoopContinuation, context, receiver, + callbackfn, this_arg, UndefinedConstant(), receiver, + initial_k, len, UndefinedConstant())); } TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation, @@ -1706,11 +1926,9 @@ TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation, Node* initial_k = Parameter(Descriptor::kInitialK); Node* len = Parameter(Descriptor::kLength); - Callable stub(Builtins::CallableFor(isolate(), - Builtins::kArrayForEachLoopContinuation)); - Return(CallStub(stub, context, receiver, callbackfn, this_arg, - UndefinedConstant(), receiver, initial_k, len, - UndefinedConstant())); + Return(CallBuiltin(Builtins::kArrayForEachLoopContinuation, context, receiver, + callbackfn, this_arg, UndefinedConstant(), receiver, + initial_k, len, UndefinedConstant())); } TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) { @@ -1731,8 +1949,8 @@ TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) { &ArrayBuiltinCodeStubAssembler::ForEachResultGenerator, &ArrayBuiltinCodeStubAssembler::ForEachProcessor, &ArrayBuiltinCodeStubAssembler::NullPostLoopAction, - Builtins::CallableFor(isolate(), - Builtins::kArrayForEachLoopContinuation)); + Builtins::CallableFor(isolate(), Builtins::kArrayForEachLoopContinuation), + MissingPropertyMode::kSkip); } TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinCodeStubAssembler) { @@ -1755,6 +1973,48 @@ TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinCodeStubAssembler) { &ArrayBuiltinCodeStubAssembler::NullPostLoopAction); } +TF_BUILTIN(ArraySomeLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* callbackfn = Parameter(Descriptor::kCallbackFn); + Node* this_arg = Parameter(Descriptor::kThisArg); + Node* initial_k = Parameter(Descriptor::kInitialK); + Node* len = Parameter(Descriptor::kLength); + Node* result = Parameter(Descriptor::kResult); + + // This custom lazy deopt point is right after the callback. every() needs + // to pick up at the next step, which is either continuing to the next + // array element or returning false if {result} is false. + Label true_continue(this), false_continue(this); + + // iii. If selected is true, then... + BranchIfToBooleanIsTrue(result, &true_continue, &false_continue); + BIND(&true_continue); + { Return(TrueConstant()); } + BIND(&false_continue); + { + // Increment k. + initial_k = NumberInc(initial_k); + + Return(CallBuiltin(Builtins::kArraySomeLoopContinuation, context, receiver, + callbackfn, this_arg, FalseConstant(), receiver, + initial_k, len, UndefinedConstant())); + } +} + +TF_BUILTIN(ArraySomeLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* callbackfn = Parameter(Descriptor::kCallbackFn); + Node* this_arg = Parameter(Descriptor::kThisArg); + Node* initial_k = Parameter(Descriptor::kInitialK); + Node* len = Parameter(Descriptor::kLength); + + Return(CallBuiltin(Builtins::kArraySomeLoopContinuation, context, receiver, + callbackfn, this_arg, FalseConstant(), receiver, initial_k, + len, UndefinedConstant())); +} + TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) { Node* context = Parameter(Descriptor::kContext); Node* receiver = Parameter(Descriptor::kReceiver); @@ -1772,7 +2032,8 @@ TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) { GenerateIteratingArrayBuiltinLoopContinuation( &ArrayBuiltinCodeStubAssembler::SomeProcessor, - &ArrayBuiltinCodeStubAssembler::NullPostLoopAction); + &ArrayBuiltinCodeStubAssembler::NullPostLoopAction, + MissingPropertyMode::kSkip); } TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) { @@ -1793,7 +2054,8 @@ TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) { &ArrayBuiltinCodeStubAssembler::SomeResultGenerator, &ArrayBuiltinCodeStubAssembler::SomeProcessor, &ArrayBuiltinCodeStubAssembler::NullPostLoopAction, - Builtins::CallableFor(isolate(), Builtins::kArraySomeLoopContinuation)); + Builtins::CallableFor(isolate(), Builtins::kArraySomeLoopContinuation), + MissingPropertyMode::kSkip); } TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) { @@ -1816,6 +2078,49 @@ TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) { &ArrayBuiltinCodeStubAssembler::NullPostLoopAction); } +TF_BUILTIN(ArrayEveryLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* callbackfn = Parameter(Descriptor::kCallbackFn); + Node* this_arg = Parameter(Descriptor::kThisArg); + Node* initial_k = Parameter(Descriptor::kInitialK); + Node* len = Parameter(Descriptor::kLength); + Node* result = Parameter(Descriptor::kResult); + + // This custom lazy deopt point is right after the callback. every() needs + // to pick up at the next step, which is either continuing to the next + // array element or returning false if {result} is false. + Label true_continue(this), false_continue(this); + + // iii. If selected is true, then... + BranchIfToBooleanIsTrue(result, &true_continue, &false_continue); + BIND(&true_continue); + { + // Increment k. + initial_k = NumberInc(initial_k); + + Return(CallBuiltin(Builtins::kArrayEveryLoopContinuation, context, receiver, + callbackfn, this_arg, TrueConstant(), receiver, + initial_k, len, UndefinedConstant())); + } + BIND(&false_continue); + { Return(FalseConstant()); } +} + +TF_BUILTIN(ArrayEveryLoopEagerDeoptContinuation, + ArrayBuiltinCodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* callbackfn = Parameter(Descriptor::kCallbackFn); + Node* this_arg = Parameter(Descriptor::kThisArg); + Node* initial_k = Parameter(Descriptor::kInitialK); + Node* len = Parameter(Descriptor::kLength); + + Return(CallBuiltin(Builtins::kArrayEveryLoopContinuation, context, receiver, + callbackfn, this_arg, TrueConstant(), receiver, initial_k, + len, UndefinedConstant())); +} + TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinCodeStubAssembler) { Node* context = Parameter(Descriptor::kContext); Node* receiver = Parameter(Descriptor::kReceiver); @@ -1833,7 +2138,8 @@ TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinCodeStubAssembler) { GenerateIteratingArrayBuiltinLoopContinuation( &ArrayBuiltinCodeStubAssembler::EveryProcessor, - &ArrayBuiltinCodeStubAssembler::NullPostLoopAction); + &ArrayBuiltinCodeStubAssembler::NullPostLoopAction, + MissingPropertyMode::kSkip); } TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) { @@ -1854,7 +2160,8 @@ TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) { &ArrayBuiltinCodeStubAssembler::EveryResultGenerator, &ArrayBuiltinCodeStubAssembler::EveryProcessor, &ArrayBuiltinCodeStubAssembler::NullPostLoopAction, - Builtins::CallableFor(isolate(), Builtins::kArrayEveryLoopContinuation)); + Builtins::CallableFor(isolate(), Builtins::kArrayEveryLoopContinuation), + MissingPropertyMode::kSkip); } TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinCodeStubAssembler) { @@ -1894,7 +2201,38 @@ TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinCodeStubAssembler) { GenerateIteratingArrayBuiltinLoopContinuation( &ArrayBuiltinCodeStubAssembler::ReduceProcessor, - &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction); + &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction, + MissingPropertyMode::kSkip); +} + +TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation, + ArrayBuiltinCodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* callbackfn = Parameter(Descriptor::kCallbackFn); + Node* accumulator = Parameter(Descriptor::kAccumulator); + Node* initial_k = Parameter(Descriptor::kInitialK); + Node* len = Parameter(Descriptor::kLength); + + Callable stub( + Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation)); + Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(), + accumulator, receiver, initial_k, len, UndefinedConstant())); +} + +TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation, + ArrayBuiltinCodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* callbackfn = Parameter(Descriptor::kCallbackFn); + Node* initial_k = Parameter(Descriptor::kInitialK); + Node* len = Parameter(Descriptor::kLength); + Node* result = Parameter(Descriptor::kResult); + + Callable stub( + Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation)); + Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(), + result, receiver, initial_k, len, UndefinedConstant())); } TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) { @@ -1915,7 +2253,8 @@ TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) { &ArrayBuiltinCodeStubAssembler::ReduceResultGenerator, &ArrayBuiltinCodeStubAssembler::ReduceProcessor, &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction, - Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation)); + Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation), + MissingPropertyMode::kSkip); } TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinCodeStubAssembler) { @@ -1956,7 +2295,37 @@ TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinCodeStubAssembler) { GenerateIteratingArrayBuiltinLoopContinuation( &ArrayBuiltinCodeStubAssembler::ReduceProcessor, &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction, - ForEachDirection::kReverse); + MissingPropertyMode::kSkip, ForEachDirection::kReverse); +} + +TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation, + ArrayBuiltinCodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* callbackfn = Parameter(Descriptor::kCallbackFn); + Node* accumulator = Parameter(Descriptor::kAccumulator); + Node* initial_k = Parameter(Descriptor::kInitialK); + Node* len = Parameter(Descriptor::kLength); + + Callable stub(Builtins::CallableFor( + isolate(), Builtins::kArrayReduceRightLoopContinuation)); + Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(), + accumulator, receiver, initial_k, len, UndefinedConstant())); +} + +TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation, + ArrayBuiltinCodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* receiver = Parameter(Descriptor::kReceiver); + Node* callbackfn = Parameter(Descriptor::kCallbackFn); + Node* initial_k = Parameter(Descriptor::kInitialK); + Node* len = Parameter(Descriptor::kLength); + Node* result = Parameter(Descriptor::kResult); + + Callable stub(Builtins::CallableFor( + isolate(), Builtins::kArrayReduceRightLoopContinuation)); + Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(), + result, receiver, initial_k, len, UndefinedConstant())); } TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) { @@ -1979,7 +2348,7 @@ TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) { &ArrayBuiltinCodeStubAssembler::ReducePostLoopAction, Builtins::CallableFor(isolate(), Builtins::kArrayReduceRightLoopContinuation), - ForEachDirection::kReverse); + MissingPropertyMode::kSkip, ForEachDirection::kReverse); } TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinCodeStubAssembler) { @@ -2020,7 +2389,8 @@ TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinCodeStubAssembler) { GenerateIteratingArrayBuiltinLoopContinuation( &ArrayBuiltinCodeStubAssembler::FilterProcessor, - &ArrayBuiltinCodeStubAssembler::NullPostLoopAction); + &ArrayBuiltinCodeStubAssembler::NullPostLoopAction, + MissingPropertyMode::kSkip); } TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation, @@ -2034,10 +2404,9 @@ TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation, Node* len = Parameter(Descriptor::kLength); Node* to = Parameter(Descriptor::kTo); - Callable stub( - Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation)); - Return(CallStub(stub, context, receiver, callbackfn, this_arg, array, - receiver, initial_k, len, to)); + Return(CallBuiltin(Builtins::kArrayFilterLoopContinuation, context, receiver, + callbackfn, this_arg, array, receiver, initial_k, len, + to)); } TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation, @@ -2077,10 +2446,9 @@ TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation, // Increment k. initial_k = NumberInc(initial_k); - Callable stub( - Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation)); - Return(CallStub(stub, context, receiver, callbackfn, this_arg, array, - receiver, initial_k, len, to.value())); + Return(CallBuiltin(Builtins::kArrayFilterLoopContinuation, context, receiver, + callbackfn, this_arg, array, receiver, initial_k, len, + to.value())); } TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) { @@ -2101,7 +2469,8 @@ TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) { &ArrayBuiltinCodeStubAssembler::FilterResultGenerator, &ArrayBuiltinCodeStubAssembler::FilterProcessor, &ArrayBuiltinCodeStubAssembler::NullPostLoopAction, - Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation)); + Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation), + MissingPropertyMode::kSkip); } TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) { @@ -2121,7 +2490,8 @@ TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) { GenerateIteratingArrayBuiltinLoopContinuation( &ArrayBuiltinCodeStubAssembler::SpecCompliantMapProcessor, - &ArrayBuiltinCodeStubAssembler::NullPostLoopAction); + &ArrayBuiltinCodeStubAssembler::NullPostLoopAction, + MissingPropertyMode::kSkip); } TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) { @@ -2133,10 +2503,9 @@ TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinCodeStubAssembler) { Node* initial_k = Parameter(Descriptor::kInitialK); Node* len = Parameter(Descriptor::kLength); - Callable stub( - Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation)); - Return(CallStub(stub, context, receiver, callbackfn, this_arg, array, - receiver, initial_k, len, UndefinedConstant())); + Return(CallBuiltin(Builtins::kArrayMapLoopContinuation, context, receiver, + callbackfn, this_arg, array, receiver, initial_k, len, + UndefinedConstant())); } TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) { @@ -2159,10 +2528,9 @@ TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinCodeStubAssembler) { // Then we have to increment k before going on. initial_k = NumberInc(initial_k); - Callable stub( - Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation)); - Return(CallStub(stub, context, receiver, callbackfn, this_arg, array, - receiver, initial_k, len, UndefinedConstant())); + Return(CallBuiltin(Builtins::kArrayMapLoopContinuation, context, receiver, + callbackfn, this_arg, array, receiver, initial_k, len, + UndefinedConstant())); } TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) { @@ -2182,7 +2550,8 @@ TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) { "Array.prototype.map", &ArrayBuiltinCodeStubAssembler::MapResultGenerator, &ArrayBuiltinCodeStubAssembler::FastMapProcessor, &ArrayBuiltinCodeStubAssembler::NullPostLoopAction, - Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation)); + Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation), + MissingPropertyMode::kSkip); } TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinCodeStubAssembler) { @@ -2848,7 +3217,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { { Label if_invalid(this, Label::kDeferred); // A fast array iterator transitioned to a slow iterator during - // iteration. Invalidate fast_array_iteration_prtoector cell to + // iteration. Invalidate fast_array_iteration_protector cell to // prevent potential deopt loops. StoreObjectFieldNoWriteBarrier( iterator, JSArrayIterator::kIteratedObjectMapOffset, @@ -2877,7 +3246,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) { length = var_length.value(); } - GotoIfNumericGreaterThanOrEqual(index, length, &set_done); + GotoIfNumberGreaterThanOrEqual(index, length, &set_done); StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset, NumberInc(index)); diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc index 060696ee5d..0cdcb57a3f 100644 --- a/deps/v8/src/builtins/builtins-async-gen.cc +++ b/deps/v8/src/builtins/builtins-async-gen.cc @@ -161,6 +161,7 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(Node* context, CSA_ASSERT(this, WordEqual(LoadMapInstanceSizeInWords(function_map), IntPtrConstant(JSFunction::kSizeWithoutPrototype / kPointerSize))); + STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize); StoreMapNoWriteBarrier(function, function_map); StoreObjectFieldRoot(function, JSObject::kPropertiesOrHashOffset, Heap::kEmptyFixedArrayRootIndex); diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc index aec265dc35..392040c995 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.cc +++ b/deps/v8/src/builtins/builtins-collections-gen.cc @@ -232,10 +232,9 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable( TNode<Object> add_func = GetAddFunction(variant, context, collection); IteratorBuiltinsAssembler iterator_assembler(this->state()); - TNode<Object> iterator = - CAST(iterator_assembler.GetIterator(context, iterable)); + IteratorRecord iterator = iterator_assembler.GetIterator(context, iterable); - CSA_ASSERT(this, Word32BinaryNot(IsUndefined(iterator))); + CSA_ASSERT(this, Word32BinaryNot(IsUndefined(iterator.object))); TNode<Object> fast_iterator_result_map = LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX); diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc index 2722f7b7a7..5c3883a870 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.cc +++ b/deps/v8/src/builtins/builtins-constructor-gen.cc @@ -134,6 +134,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info, BIND(&cell_done); } + STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize); StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackVectorOffset, literals_cell); StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset, @@ -457,10 +458,10 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( VARIABLE(var_properties, MachineRepresentation::kTagged); { Node* bit_field_3 = LoadMapBitField3(boilerplate_map); - GotoIf(IsSetWord32<Map::Deprecated>(bit_field_3), call_runtime); + GotoIf(IsSetWord32<Map::IsDeprecatedBit>(bit_field_3), call_runtime); // Directly copy over the property store for dict-mode boilerplates. Label if_dictionary(this), if_fast(this), done(this); - Branch(IsSetWord32<Map::DictionaryMap>(bit_field_3), &if_dictionary, + Branch(IsSetWord32<Map::IsDictionaryMapBit>(bit_field_3), &if_dictionary, &if_fast); BIND(&if_dictionary); { @@ -636,8 +637,8 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral( CSA_ASSERT(this, IsMap(map)); // Ensure that slack tracking is disabled for the map. STATIC_ASSERT(Map::kNoSlackTracking == 0); - CSA_ASSERT(this, - IsClearWord32<Map::ConstructionCounter>(LoadMapBitField3(map))); + CSA_ASSERT( + this, IsClearWord32<Map::ConstructionCounterBits>(LoadMapBitField3(map))); Node* empty_fixed_array = EmptyFixedArrayConstant(); Node* result = AllocateJSObjectFromMap(map, empty_fixed_array, empty_fixed_array); diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc index 823e6ca937..98e0f2c8b2 100644 --- a/deps/v8/src/builtins/builtins-conversion-gen.cc +++ b/deps/v8/src/builtins/builtins-conversion-gen.cc @@ -99,10 +99,9 @@ TF_BUILTIN(NonPrimitiveToPrimitive_String, ConversionBuiltinsAssembler) { } TF_BUILTIN(StringToNumber, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); Node* input = Parameter(Descriptor::kArgument); - Return(StringToNumber(context, input)); + Return(StringToNumber(input)); } TF_BUILTIN(ToName, CodeStubAssembler) { @@ -145,10 +144,9 @@ TF_BUILTIN(ToNumber, CodeStubAssembler) { // ES section #sec-tostring-applied-to-the-number-type TF_BUILTIN(NumberToString, CodeStubAssembler) { - Node* context = Parameter(Descriptor::kContext); Node* input = Parameter(Descriptor::kArgument); - Return(NumberToString(context, input)); + Return(NumberToString(input)); } // ES section #sec-tostring @@ -330,7 +328,14 @@ TF_BUILTIN(ToInteger, CodeStubAssembler) { Node* context = Parameter(Descriptor::kContext); Node* input = Parameter(Descriptor::kArgument); - Return(ToInteger(context, input)); + Return(ToInteger(context, input, kNoTruncation)); +} + +TF_BUILTIN(ToInteger_TruncateMinusZero, CodeStubAssembler) { + Node* context = Parameter(Descriptor::kContext); + Node* input = Parameter(Descriptor::kArgument); + + Return(ToInteger(context, input, kTruncateMinusZero)); } // ES6 section 7.1.13 ToObject (argument) diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index 2b2cc407b5..0ffd15df7c 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -91,8 +91,9 @@ namespace internal { ASM(StackCheck) \ \ /* String helpers */ \ - TFC(StringCharAt, StringCharAt, 1) \ - TFC(StringCharCodeAt, StringCharCodeAt, 1) \ + TFC(StringCharAt, StringAt, 1) \ + TFC(StringCharCodeAt, StringAt, 1) \ + TFC(StringCodePointAt, StringAt, 1) \ TFC(StringEqual, Compare, 1) \ TFC(StringGreaterThan, Compare, 1) \ TFC(StringGreaterThanOrEqual, Compare, 1) \ @@ -190,6 +191,7 @@ namespace internal { TFC(NumberToString, TypeConversion, 1) \ TFC(ToString, TypeConversion, 1) \ TFC(ToInteger, TypeConversion, 1) \ + TFC(ToInteger_TruncateMinusZero, TypeConversion, 1) \ TFC(ToLength, TypeConversion, 1) \ TFC(ClassOf, Typeof, 1) \ TFC(Typeof, Typeof, 1) \ @@ -199,26 +201,19 @@ namespace internal { TFC(ToBooleanLazyDeoptContinuation, TypeConversionStackParameter, 1) \ \ /* Handlers */ \ - TFH(LoadICProtoArray, LoadICProtoArray) \ - TFH(LoadICProtoArrayThrowIfNonexistent, LoadICProtoArray) \ TFH(KeyedLoadIC_Megamorphic, LoadWithVector) \ - TFH(KeyedLoadIC_Miss, LoadWithVector) \ TFH(KeyedLoadIC_PolymorphicName, LoadWithVector) \ TFH(KeyedLoadIC_Slow, LoadWithVector) \ TFH(KeyedStoreIC_Megamorphic, StoreWithVector) \ - TFH(KeyedStoreIC_Miss, StoreWithVector) \ TFH(KeyedStoreIC_Slow, StoreWithVector) \ - TFH(LoadGlobalIC_Miss, LoadGlobalWithVector) \ - TFH(LoadGlobalIC_Slow, LoadGlobalWithVector) \ + TFH(LoadGlobalIC_Slow, LoadWithVector) \ TFH(LoadField, LoadField) \ TFH(LoadIC_FunctionPrototype, LoadWithVector) \ - TFH(LoadIC_Miss, LoadWithVector) \ TFH(LoadIC_Slow, LoadWithVector) \ TFH(LoadIC_StringLength, LoadWithVector) \ TFH(LoadIC_StringWrapperLength, LoadWithVector) \ TFH(LoadIC_Uninitialized, LoadWithVector) \ TFH(StoreGlobalIC_Slow, StoreWithVector) \ - TFH(StoreIC_Miss, StoreWithVector) \ TFH(StoreIC_Uninitialized, StoreWithVector) \ \ /* Promise helpers */ \ @@ -226,6 +221,9 @@ namespace internal { TFS(RejectNativePromise, kPromise, kValue, kDebugEvent) \ TFS(PerformNativePromiseThen, kPromise, kResolveReaction, kRejectReaction, \ kResultPromise) \ + TFS(EnqueueMicrotask, kMicrotask) \ + TFC(RunMicrotasks, RunMicrotasks, 1) \ + TFS(PromiseResolveThenableJob, kMicrotask) \ \ /* Object property helpers */ \ TFS(HasProperty, kKey, kObject) \ @@ -233,7 +231,7 @@ namespace internal { \ /* Abort */ \ ASM(Abort) \ - ASM(AbortJS) \ + TFC(AbortJS, AbortJS, 1) \ \ /* Built-in functions for Javascript */ \ /* Special internal builtins */ \ @@ -255,16 +253,16 @@ namespace internal { TFJ(ArrayIndexOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ /* ES6 #sec-array.prototype.pop */ \ CPP(ArrayPop) \ - TFJ(FastArrayPop, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ + TFJ(ArrayPrototypePop, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ /* ES6 #sec-array.prototype.push */ \ CPP(ArrayPush) \ - TFJ(FastArrayPush, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ + TFJ(ArrayPrototypePush, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ /* ES6 #sec-array.prototype.shift */ \ CPP(ArrayShift) \ - TFJ(FastArrayShift, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ + TFJ(ArrayPrototypeShift, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ /* ES6 #sec-array.prototype.slice */ \ CPP(ArraySlice) \ - TFJ(FastArraySlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ + TFJ(ArrayPrototypeSlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ /* ES6 #sec-array.prototype.splice */ \ CPP(ArraySplice) \ /* ES6 #sec-array.prototype.unshift */ \ @@ -283,10 +281,18 @@ namespace internal { /* ES6 #sec-array.prototype.every */ \ TFS(ArrayEveryLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \ kObject, kInitialK, kLength, kTo) \ + TFJ(ArrayEveryLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \ + kInitialK, kLength) \ + TFJ(ArrayEveryLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, \ + kInitialK, kLength, kResult) \ TFJ(ArrayEvery, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ /* ES6 #sec-array.prototype.some */ \ TFS(ArraySomeLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \ kObject, kInitialK, kLength, kTo) \ + TFJ(ArraySomeLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \ + kInitialK, kLength) \ + TFJ(ArraySomeLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, kInitialK, \ + kLength, kResult) \ TFJ(ArraySome, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ /* ES6 #sec-array.prototype.filter */ \ TFS(ArrayFilterLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \ @@ -307,13 +313,42 @@ namespace internal { /* ES6 #sec-array.prototype.reduce */ \ TFS(ArrayReduceLoopContinuation, kReceiver, kCallbackFn, kThisArg, \ kAccumulator, kObject, kInitialK, kLength, kTo) \ + TFJ(ArrayReduceLoopEagerDeoptContinuation, 4, kCallbackFn, kInitialK, \ + kLength, kAccumulator) \ + TFJ(ArrayReduceLoopLazyDeoptContinuation, 4, kCallbackFn, kInitialK, \ + kLength, kResult) \ TFJ(ArrayReduce, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ /* ES6 #sec-array.prototype.reduceRight */ \ TFS(ArrayReduceRightLoopContinuation, kReceiver, kCallbackFn, kThisArg, \ kAccumulator, kObject, kInitialK, kLength, kTo) \ + TFJ(ArrayReduceRightLoopEagerDeoptContinuation, 4, kCallbackFn, kInitialK, \ + kLength, kAccumulator) \ + TFJ(ArrayReduceRightLoopLazyDeoptContinuation, 4, kCallbackFn, kInitialK, \ + kLength, kResult) \ TFJ(ArrayReduceRight, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ /* ES6 #sec-array.prototype.entries */ \ TFJ(ArrayPrototypeEntries, 0) \ + /* ES6 #sec-array.prototype.find */ \ + TFS(ArrayFindLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \ + kObject, kInitialK, kLength, kTo) \ + TFJ(ArrayFindLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \ + kInitialK, kLength) \ + TFJ(ArrayFindLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, kInitialK, \ + kLength, kResult) \ + TFJ(ArrayFindLoopAfterCallbackLazyDeoptContinuation, 6, kCallbackFn, \ + kThisArg, kInitialK, kLength, kFoundValue, kIsFound) \ + TFJ(ArrayPrototypeFind, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ + /* ES6 #sec-array.prototype.findIndex */ \ + TFS(ArrayFindIndexLoopContinuation, kReceiver, kCallbackFn, kThisArg, \ + kArray, kObject, kInitialK, kLength, kTo) \ + TFJ(ArrayFindIndexLoopEagerDeoptContinuation, 4, kCallbackFn, kThisArg, \ + kInitialK, kLength) \ + TFJ(ArrayFindIndexLoopLazyDeoptContinuation, 5, kCallbackFn, kThisArg, \ + kInitialK, kLength, kResult) \ + TFJ(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation, 6, kCallbackFn, \ + kThisArg, kInitialK, kLength, kFoundValue, kIsFound) \ + TFJ(ArrayPrototypeFindIndex, \ + SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ /* ES6 #sec-array.prototype.keys */ \ TFJ(ArrayPrototypeKeys, 0) \ /* ES6 #sec-array.prototype.values */ \ @@ -555,6 +590,8 @@ namespace internal { TFH(LoadICTrampoline, Load) \ TFH(KeyedLoadIC, LoadWithVector) \ TFH(KeyedLoadICTrampoline, Load) \ + TFH(StoreGlobalIC, StoreGlobalWithVector) \ + TFH(StoreGlobalICTrampoline, StoreGlobal) \ TFH(StoreIC, StoreWithVector) \ TFH(StoreICTrampoline, Store) \ TFH(KeyedStoreIC, StoreWithVector) \ @@ -718,7 +755,7 @@ namespace internal { CPP(ObjectDefineProperties) \ CPP(ObjectDefineProperty) \ CPP(ObjectDefineSetter) \ - CPP(ObjectEntries) \ + TFJ(ObjectEntries, 1, kObject) \ CPP(ObjectFreeze) \ TFJ(ObjectGetOwnPropertyDescriptor, \ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ @@ -745,8 +782,10 @@ namespace internal { CPP(ObjectPrototypePropertyIsEnumerable) \ CPP(ObjectPrototypeGetProto) \ CPP(ObjectPrototypeSetProto) \ + /* ES #sec-object.prototype.tolocalestring */ \ + TFJ(ObjectPrototypeToLocaleString, 0) \ CPP(ObjectSeal) \ - CPP(ObjectValues) \ + TFJ(ObjectValues, 1, kObject) \ \ /* instanceof */ \ TFC(OrdinaryHasInstance, Compare, 1) \ @@ -771,13 +810,15 @@ namespace internal { TFJ(PromiseRejectClosure, 1, kValue) \ TFJ(PromiseAllResolveElementClosure, 1, kValue) \ /* ES #sec-promise.prototype.then */ \ - TFJ(PromiseThen, 2, kOnFullfilled, kOnRejected) \ + TFJ(PromisePrototypeThen, 2, kOnFullfilled, kOnRejected) \ /* ES #sec-promise.prototype.catch */ \ - TFJ(PromiseCatch, 1, kOnRejected) \ + TFJ(PromisePrototypeCatch, 1, kOnRejected) \ /* ES #sec-fulfillpromise */ \ TFJ(ResolvePromise, 2, kPromise, kValue) \ TFS(PromiseHandleReject, kPromise, kOnReject, kException) \ - TFJ(PromiseHandle, 5, kValue, kHandler, kDeferredPromise, \ + TFS(PromiseHandle, kValue, kHandler, kDeferredPromise, kDeferredOnResolve, \ + kDeferredOnReject) \ + TFJ(PromiseHandleJS, 5, kValue, kHandler, kDeferredPromise, \ kDeferredOnResolve, kDeferredOnReject) \ /* ES #sec-promise.resolve */ \ TFJ(PromiseResolveWrapper, 1, kValue) \ @@ -785,7 +826,7 @@ namespace internal { /* ES #sec-promise.reject */ \ TFJ(PromiseReject, 1, kReason) \ TFJ(InternalPromiseReject, 3, kPromise, kReason, kDebugEvent) \ - TFJ(PromiseFinally, 1, kOnFinally) \ + TFJ(PromisePrototypeFinally, 1, kOnFinally) \ TFJ(PromiseThenFinally, 1, kValue) \ TFJ(PromiseCatchFinally, 1, kReason) \ TFJ(PromiseValueThunkFinally, 0) \ @@ -799,6 +840,8 @@ namespace internal { TFJ(ProxyConstructor, 0) \ TFJ(ProxyConstructor_ConstructStub, \ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \ + TFJ(ProxyRevocable, 2, kTarget, kHandler) \ + TFJ(ProxyRevoke, 0) \ TFS(ProxyGetProperty, kProxy, kName, kReceiverValue) \ TFS(ProxyHasProperty, kProxy, kName) \ TFS(ProxySetProperty, kProxy, kName, kValue, kReceiverValue, kLanguageMode) \ diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc index 0b98a7169b..7c1db5093d 100644 --- a/deps/v8/src/builtins/builtins-function-gen.cc +++ b/deps/v8/src/builtins/builtins-function-gen.cc @@ -6,7 +6,6 @@ #include "src/builtins/builtins.h" #include "src/code-stub-assembler.h" #include "src/frame-constants.h" -#include "src/zone/zone-list-inl.h" // TODO(mstarzinger): Temporary cycle breaker. namespace v8 { namespace internal { diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc index 4d85be9f91..48c28ab730 100644 --- a/deps/v8/src/builtins/builtins-handler-gen.cc +++ b/deps/v8/src/builtins/builtins-handler-gen.cc @@ -23,17 +23,6 @@ TF_BUILTIN(LoadIC_StringWrapperLength, CodeStubAssembler) { Return(LoadStringLengthAsSmi(string)); } -TF_BUILTIN(KeyedLoadIC_Miss, CodeStubAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); - Node* name = Parameter(Descriptor::kName); - Node* slot = Parameter(Descriptor::kSlot); - Node* vector = Parameter(Descriptor::kVector); - Node* context = Parameter(Descriptor::kContext); - - TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, name, slot, - vector); -} - TF_BUILTIN(KeyedLoadIC_Slow, CodeStubAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); Node* name = Parameter(Descriptor::kName); @@ -52,18 +41,6 @@ void Builtins::Generate_StoreIC_Uninitialized( StoreICUninitializedGenerator::Generate(state); } -TF_BUILTIN(KeyedStoreIC_Miss, CodeStubAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); - Node* name = Parameter(Descriptor::kName); - Node* value = Parameter(Descriptor::kValue); - Node* slot = Parameter(Descriptor::kSlot); - Node* vector = Parameter(Descriptor::kVector); - Node* context = Parameter(Descriptor::kContext); - - TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot, vector, - receiver, name); -} - TF_BUILTIN(KeyedStoreIC_Slow, CodeStubAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); Node* name = Parameter(Descriptor::kName); @@ -78,15 +55,6 @@ TF_BUILTIN(KeyedStoreIC_Slow, CodeStubAssembler) { receiver, name); } -TF_BUILTIN(LoadGlobalIC_Miss, CodeStubAssembler) { - Node* name = Parameter(Descriptor::kName); - Node* slot = Parameter(Descriptor::kSlot); - Node* vector = Parameter(Descriptor::kVector); - Node* context = Parameter(Descriptor::kContext); - - TailCallRuntime(Runtime::kLoadGlobalIC_Miss, context, name, slot, vector); -} - TF_BUILTIN(LoadGlobalIC_Slow, CodeStubAssembler) { Node* name = Parameter(Descriptor::kName); Node* slot = Parameter(Descriptor::kSlot); @@ -110,16 +78,6 @@ TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) { TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name, slot, vector); } -TF_BUILTIN(LoadIC_Miss, CodeStubAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); - Node* name = Parameter(Descriptor::kName); - Node* slot = Parameter(Descriptor::kSlot); - Node* vector = Parameter(Descriptor::kVector); - Node* context = Parameter(Descriptor::kContext); - - TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name, slot, vector); -} - TF_BUILTIN(LoadIC_Slow, CodeStubAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); Node* name = Parameter(Descriptor::kName); @@ -128,18 +86,6 @@ TF_BUILTIN(LoadIC_Slow, CodeStubAssembler) { TailCallRuntime(Runtime::kGetProperty, context, receiver, name); } -TF_BUILTIN(StoreIC_Miss, CodeStubAssembler) { - Node* receiver = Parameter(Descriptor::kReceiver); - Node* name = Parameter(Descriptor::kName); - Node* value = Parameter(Descriptor::kValue); - Node* slot = Parameter(Descriptor::kSlot); - Node* vector = Parameter(Descriptor::kVector); - Node* context = Parameter(Descriptor::kContext); - - TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot, vector, - receiver, name); -} - TF_BUILTIN(StoreGlobalIC_Slow, CodeStubAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); Node* name = Parameter(Descriptor::kName); diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc index 536a7f31ed..94613a6a32 100644 --- a/deps/v8/src/builtins/builtins-ic-gen.cc +++ b/deps/v8/src/builtins/builtins-ic-gen.cc @@ -29,6 +29,8 @@ IC_BUILTIN(LoadField) IC_BUILTIN(KeyedLoadICTrampoline) IC_BUILTIN(KeyedLoadIC_Megamorphic) IC_BUILTIN(KeyedLoadIC_PolymorphicName) +IC_BUILTIN(StoreGlobalIC) +IC_BUILTIN(StoreGlobalICTrampoline) IC_BUILTIN(StoreIC) IC_BUILTIN(StoreICTrampoline) IC_BUILTIN(KeyedStoreIC) @@ -40,8 +42,6 @@ IC_BUILTIN_PARAM(LoadGlobalICTrampoline, LoadGlobalICTrampoline, NOT_INSIDE_TYPEOF) IC_BUILTIN_PARAM(LoadGlobalICInsideTypeofTrampoline, LoadGlobalICTrampoline, INSIDE_TYPEOF) -IC_BUILTIN_PARAM(LoadICProtoArray, LoadICProtoArray, false) -IC_BUILTIN_PARAM(LoadICProtoArrayThrowIfNonexistent, LoadICProtoArray, true) } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc index bc9723700c..bb4b66e3a4 100644 --- a/deps/v8/src/builtins/builtins-internal-gen.cc +++ b/deps/v8/src/builtins/builtins-internal-gen.cc @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "src/api.h" #include "src/builtins/builtins-utils-gen.h" #include "src/builtins/builtins.h" #include "src/code-stub-assembler.h" @@ -13,6 +14,9 @@ namespace v8 { namespace internal { +template <typename T> +using TNode = compiler::TNode<T>; + // ----------------------------------------------------------------------------- // Interrupt and stack checks. @@ -583,7 +587,7 @@ TF_BUILTIN(ForInFilter, CodeStubAssembler) { CSA_ASSERT(this, IsString(key)); Label if_true(this), if_false(this); - Node* result = HasProperty(object, key, context, kForInHasProperty); + TNode<Oddball> result = HasProperty(object, key, context, kForInHasProperty); Branch(IsTrue(result), &if_true, &if_false); BIND(&if_true); @@ -607,5 +611,448 @@ TF_BUILTIN(SameValue, CodeStubAssembler) { Return(FalseConstant()); } +class InternalBuiltinsAssembler : public CodeStubAssembler { + public: + explicit InternalBuiltinsAssembler(compiler::CodeAssemblerState* state) + : CodeStubAssembler(state) {} + + TNode<IntPtrT> GetPendingMicrotaskCount(); + void SetPendingMicrotaskCount(TNode<IntPtrT> count); + + TNode<FixedArray> GetMicrotaskQueue(); + void SetMicrotaskQueue(TNode<FixedArray> queue); + + TNode<Context> GetCurrentContext(); + void SetCurrentContext(TNode<Context> context); + + void EnterMicrotaskContext(TNode<Context> context); + void LeaveMicrotaskContext(); + + TNode<Object> GetPendingException() { + auto ref = ExternalReference(kPendingExceptionAddress, isolate()); + return TNode<Object>::UncheckedCast( + Load(MachineType::AnyTagged(), ExternalConstant(ref))); + } + void ClearPendingException() { + auto ref = ExternalReference(kPendingExceptionAddress, isolate()); + StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref), + TheHoleConstant()); + } + + TNode<Object> GetScheduledException() { + auto ref = ExternalReference::scheduled_exception_address(isolate()); + return TNode<Object>::UncheckedCast( + Load(MachineType::AnyTagged(), ExternalConstant(ref))); + } + void ClearScheduledException() { + auto ref = ExternalReference::scheduled_exception_address(isolate()); + StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref), + TheHoleConstant()); + } +}; + +TNode<IntPtrT> InternalBuiltinsAssembler::GetPendingMicrotaskCount() { + auto ref = ExternalReference::pending_microtask_count_address(isolate()); + if (kIntSize == 8) { + return TNode<IntPtrT>::UncheckedCast( + Load(MachineType::Int64(), ExternalConstant(ref))); + } else { + Node* const value = Load(MachineType::Int32(), ExternalConstant(ref)); + return ChangeInt32ToIntPtr(value); + } +} + +void InternalBuiltinsAssembler::SetPendingMicrotaskCount(TNode<IntPtrT> count) { + auto ref = ExternalReference::pending_microtask_count_address(isolate()); + auto rep = kIntSize == 8 ? MachineRepresentation::kWord64 + : MachineRepresentation::kWord32; + if (kIntSize == 4 && kPointerSize == 8) { + Node* const truncated_count = + TruncateInt64ToInt32(TNode<Int64T>::UncheckedCast(count)); + StoreNoWriteBarrier(rep, ExternalConstant(ref), truncated_count); + } else { + StoreNoWriteBarrier(rep, ExternalConstant(ref), count); + } +} + +TNode<FixedArray> InternalBuiltinsAssembler::GetMicrotaskQueue() { + return TNode<FixedArray>::UncheckedCast( + LoadRoot(Heap::kMicrotaskQueueRootIndex)); +} + +void InternalBuiltinsAssembler::SetMicrotaskQueue(TNode<FixedArray> queue) { + StoreRoot(Heap::kMicrotaskQueueRootIndex, queue); +} + +TNode<Context> InternalBuiltinsAssembler::GetCurrentContext() { + auto ref = ExternalReference(kContextAddress, isolate()); + return TNode<Context>::UncheckedCast( + Load(MachineType::AnyTagged(), ExternalConstant(ref))); +} + +void InternalBuiltinsAssembler::SetCurrentContext(TNode<Context> context) { + auto ref = ExternalReference(kContextAddress, isolate()); + StoreNoWriteBarrier(MachineRepresentation::kTagged, ExternalConstant(ref), + context); +} + +void InternalBuiltinsAssembler::EnterMicrotaskContext( + TNode<Context> microtask_context) { + auto ref = ExternalReference::handle_scope_implementer_address(isolate()); + Node* const hsi = Load(MachineType::Pointer(), ExternalConstant(ref)); + StoreNoWriteBarrier( + MachineType::PointerRepresentation(), hsi, + IntPtrConstant(HandleScopeImplementerOffsets::kMicrotaskContext), + BitcastTaggedToWord(microtask_context)); + + // Load mirrored std::vector length from + // HandleScopeImplementer::entered_contexts_count_ + auto type = kSizetSize == 8 ? MachineType::Uint64() : MachineType::Uint32(); + Node* entered_contexts_length = Load( + type, hsi, + IntPtrConstant(HandleScopeImplementerOffsets::kEnteredContextsCount)); + + auto rep = kSizetSize == 8 ? MachineRepresentation::kWord64 + : MachineRepresentation::kWord32; + + StoreNoWriteBarrier( + rep, hsi, + IntPtrConstant( + HandleScopeImplementerOffsets::kEnteredContextCountDuringMicrotasks), + entered_contexts_length); +} + +void InternalBuiltinsAssembler::LeaveMicrotaskContext() { + auto ref = ExternalReference::handle_scope_implementer_address(isolate()); + + Node* const hsi = Load(MachineType::Pointer(), ExternalConstant(ref)); + StoreNoWriteBarrier( + MachineType::PointerRepresentation(), hsi, + IntPtrConstant(HandleScopeImplementerOffsets::kMicrotaskContext), + IntPtrConstant(0)); + if (kSizetSize == 4) { + StoreNoWriteBarrier( + MachineRepresentation::kWord32, hsi, + IntPtrConstant(HandleScopeImplementerOffsets:: + kEnteredContextCountDuringMicrotasks), + Int32Constant(0)); + } else { + StoreNoWriteBarrier( + MachineRepresentation::kWord64, hsi, + IntPtrConstant(HandleScopeImplementerOffsets:: + kEnteredContextCountDuringMicrotasks), + Int64Constant(0)); + } +} + +TF_BUILTIN(EnqueueMicrotask, InternalBuiltinsAssembler) { + Node* microtask = Parameter(Descriptor::kMicrotask); + + TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount(); + TNode<IntPtrT> new_num_tasks = IntPtrAdd(num_tasks, IntPtrConstant(1)); + TNode<FixedArray> queue = GetMicrotaskQueue(); + TNode<IntPtrT> queue_length = LoadAndUntagFixedArrayBaseLength(queue); + + Label if_append(this), if_grow(this), done(this); + Branch(WordEqual(num_tasks, queue_length), &if_grow, &if_append); + + BIND(&if_grow); + { + // Determine the new queue length and check if we need to allocate + // in large object space (instead of just going to new space, where + // we also know that we don't need any write barriers for setting + // up the new queue object). + Label if_newspace(this), if_lospace(this, Label::kDeferred); + TNode<IntPtrT> new_queue_length = + IntPtrMax(IntPtrConstant(8), IntPtrAdd(num_tasks, num_tasks)); + Branch(IntPtrLessThanOrEqual(new_queue_length, + IntPtrConstant(FixedArray::kMaxRegularLength)), + &if_newspace, &if_lospace); + + BIND(&if_newspace); + { + // This is the likely case where the new queue fits into new space, + // and thus we don't need any write barriers for initializing it. + TNode<FixedArray> new_queue = + CAST(AllocateFixedArray(PACKED_ELEMENTS, new_queue_length)); + CopyFixedArrayElements(PACKED_ELEMENTS, queue, new_queue, num_tasks, + SKIP_WRITE_BARRIER); + StoreFixedArrayElement(new_queue, num_tasks, microtask, + SKIP_WRITE_BARRIER); + FillFixedArrayWithValue(PACKED_ELEMENTS, new_queue, new_num_tasks, + new_queue_length, Heap::kUndefinedValueRootIndex); + SetMicrotaskQueue(new_queue); + Goto(&done); + } + + BIND(&if_lospace); + { + // The fallback case where the new queue ends up in large object space. + TNode<FixedArray> new_queue = CAST(AllocateFixedArray( + PACKED_ELEMENTS, new_queue_length, INTPTR_PARAMETERS, + AllocationFlag::kAllowLargeObjectAllocation)); + CopyFixedArrayElements(PACKED_ELEMENTS, queue, new_queue, num_tasks); + StoreFixedArrayElement(new_queue, num_tasks, microtask); + FillFixedArrayWithValue(PACKED_ELEMENTS, new_queue, new_num_tasks, + new_queue_length, Heap::kUndefinedValueRootIndex); + SetMicrotaskQueue(new_queue); + Goto(&done); + } + } + + BIND(&if_append); + { + StoreFixedArrayElement(queue, num_tasks, microtask); + Goto(&done); + } + + BIND(&done); + SetPendingMicrotaskCount(new_num_tasks); + Return(UndefinedConstant()); +} + +TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) { + Label init_queue_loop(this); + + Goto(&init_queue_loop); + BIND(&init_queue_loop); + { + TVARIABLE(IntPtrT, index, IntPtrConstant(0)); + Label loop(this, &index); + + TNode<IntPtrT> num_tasks = GetPendingMicrotaskCount(); + ReturnIf(IntPtrEqual(num_tasks, IntPtrConstant(0)), UndefinedConstant()); + + TNode<FixedArray> queue = GetMicrotaskQueue(); + + CSA_ASSERT(this, IntPtrGreaterThanOrEqual( + LoadAndUntagFixedArrayBaseLength(queue), num_tasks)); + CSA_ASSERT(this, IntPtrGreaterThan(num_tasks, IntPtrConstant(0))); + + SetPendingMicrotaskCount(IntPtrConstant(0)); + SetMicrotaskQueue( + TNode<FixedArray>::UncheckedCast(EmptyFixedArrayConstant())); + + Goto(&loop); + BIND(&loop); + { + TNode<HeapObject> microtask = + TNode<HeapObject>::UncheckedCast(LoadFixedArrayElement(queue, index)); + index = IntPtrAdd(index, IntPtrConstant(1)); + + CSA_ASSERT(this, TaggedIsNotSmi(microtask)); + + TNode<Map> microtask_map = LoadMap(microtask); + TNode<Int32T> microtask_type = LoadMapInstanceType(microtask_map); + + Label is_call_handler_info(this); + Label is_function(this); + Label is_promise_resolve_thenable_job(this); + Label is_promise_reaction_job(this); + Label is_unreachable(this); + + int32_t case_values[] = {TUPLE3_TYPE, // CallHandlerInfo + JS_FUNCTION_TYPE, + PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE, + PROMISE_REACTION_JOB_INFO_TYPE}; + + Label* case_labels[] = {&is_call_handler_info, &is_function, + &is_promise_resolve_thenable_job, + &is_promise_reaction_job}; + + static_assert(arraysize(case_values) == arraysize(case_labels), ""); + Switch(microtask_type, &is_unreachable, case_values, case_labels, + arraysize(case_labels)); + + BIND(&is_call_handler_info); + { + // Bailout to C++ slow path for the remainder of the loop. + auto index_ref = + ExternalReference(kMicrotaskQueueBailoutIndexAddress, isolate()); + auto count_ref = + ExternalReference(kMicrotaskQueueBailoutCountAddress, isolate()); + auto rep = kIntSize == 4 ? MachineRepresentation::kWord32 + : MachineRepresentation::kWord64; + + // index was pre-incremented, decrement for bailout to C++. + Node* value = IntPtrSub(index, IntPtrConstant(1)); + + if (kPointerSize == 4) { + DCHECK_EQ(kIntSize, 4); + StoreNoWriteBarrier(rep, ExternalConstant(index_ref), value); + StoreNoWriteBarrier(rep, ExternalConstant(count_ref), num_tasks); + } else { + Node* count = num_tasks; + if (kIntSize == 4) { + value = TruncateInt64ToInt32(value); + count = TruncateInt64ToInt32(count); + } + StoreNoWriteBarrier(rep, ExternalConstant(index_ref), value); + StoreNoWriteBarrier(rep, ExternalConstant(count_ref), count); + } + + Return(queue); + } + + BIND(&is_function); + { + Label cont(this); + VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant()); + TNode<Context> old_context = GetCurrentContext(); + TNode<Context> fn_context = TNode<Context>::UncheckedCast( + LoadObjectField(microtask, JSFunction::kContextOffset)); + TNode<Context> native_context = + TNode<Context>::UncheckedCast(LoadNativeContext(fn_context)); + SetCurrentContext(native_context); + EnterMicrotaskContext(fn_context); + Node* const call = CallJS(CodeFactory::Call(isolate()), native_context, + microtask, UndefinedConstant()); + GotoIfException(call, &cont); + Goto(&cont); + BIND(&cont); + LeaveMicrotaskContext(); + SetCurrentContext(old_context); + Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop); + } + + BIND(&is_promise_resolve_thenable_job); + { + VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant()); + TNode<Context> old_context = GetCurrentContext(); + TNode<Context> microtask_context = + TNode<Context>::UncheckedCast(LoadObjectField( + microtask, PromiseResolveThenableJobInfo::kContextOffset)); + TNode<Context> native_context = + TNode<Context>::UncheckedCast(LoadNativeContext(microtask_context)); + SetCurrentContext(native_context); + EnterMicrotaskContext(microtask_context); + + Label if_unhandled_exception(this), done(this); + Node* const ret = CallBuiltin(Builtins::kPromiseResolveThenableJob, + native_context, microtask); + GotoIfException(ret, &if_unhandled_exception, &exception); + Goto(&done); + + BIND(&if_unhandled_exception); + CallRuntime(Runtime::kReportMessage, native_context, exception.value()); + Goto(&done); + + BIND(&done); + LeaveMicrotaskContext(); + SetCurrentContext(old_context); + + Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop); + } + + BIND(&is_promise_reaction_job); + { + Label if_multiple(this); + Label if_single(this); + + Node* const value = + LoadObjectField(microtask, PromiseReactionJobInfo::kValueOffset); + Node* const tasks = + LoadObjectField(microtask, PromiseReactionJobInfo::kTasksOffset); + Node* const deferred_promises = LoadObjectField( + microtask, PromiseReactionJobInfo::kDeferredPromiseOffset); + Node* const deferred_on_resolves = LoadObjectField( + microtask, PromiseReactionJobInfo::kDeferredOnResolveOffset); + Node* const deferred_on_rejects = LoadObjectField( + microtask, PromiseReactionJobInfo::kDeferredOnRejectOffset); + + TNode<Context> old_context = GetCurrentContext(); + TNode<Context> microtask_context = TNode<Context>::UncheckedCast( + LoadObjectField(microtask, PromiseReactionJobInfo::kContextOffset)); + TNode<Context> native_context = + TNode<Context>::UncheckedCast(LoadNativeContext(microtask_context)); + SetCurrentContext(native_context); + EnterMicrotaskContext(microtask_context); + + Branch(IsFixedArray(deferred_promises), &if_multiple, &if_single); + + BIND(&if_single); + { + CallBuiltin(Builtins::kPromiseHandle, native_context, value, tasks, + deferred_promises, deferred_on_resolves, + deferred_on_rejects); + LeaveMicrotaskContext(); + SetCurrentContext(old_context); + Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop); + } + + BIND(&if_multiple); + { + TVARIABLE(IntPtrT, inner_index, IntPtrConstant(0)); + TNode<IntPtrT> inner_length = + LoadAndUntagFixedArrayBaseLength(deferred_promises); + Label inner_loop(this, &inner_index), done(this); + + CSA_ASSERT(this, IntPtrGreaterThan(inner_length, IntPtrConstant(0))); + Goto(&inner_loop); + BIND(&inner_loop); + { + Node* const task = LoadFixedArrayElement(tasks, inner_index); + Node* const deferred_promise = + LoadFixedArrayElement(deferred_promises, inner_index); + Node* const deferred_on_resolve = + LoadFixedArrayElement(deferred_on_resolves, inner_index); + Node* const deferred_on_reject = + LoadFixedArrayElement(deferred_on_rejects, inner_index); + CallBuiltin(Builtins::kPromiseHandle, native_context, value, task, + deferred_promise, deferred_on_resolve, + deferred_on_reject); + inner_index = IntPtrAdd(inner_index, IntPtrConstant(1)); + Branch(IntPtrLessThan(inner_index, inner_length), &inner_loop, + &done); + } + BIND(&done); + + LeaveMicrotaskContext(); + SetCurrentContext(old_context); + + Branch(IntPtrLessThan(index, num_tasks), &loop, &init_queue_loop); + } + } + + BIND(&is_unreachable); + Unreachable(); + } + } +} + +TF_BUILTIN(PromiseResolveThenableJob, InternalBuiltinsAssembler) { + VARIABLE(exception, MachineRepresentation::kTagged, TheHoleConstant()); + Callable call = CodeFactory::Call(isolate()); + Label reject_promise(this, Label::kDeferred); + TNode<PromiseResolveThenableJobInfo> microtask = + TNode<PromiseResolveThenableJobInfo>::UncheckedCast( + Parameter(Descriptor::kMicrotask)); + TNode<Context> context = + TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext)); + + TNode<JSReceiver> thenable = TNode<JSReceiver>::UncheckedCast(LoadObjectField( + microtask, PromiseResolveThenableJobInfo::kThenableOffset)); + TNode<JSReceiver> then = TNode<JSReceiver>::UncheckedCast( + LoadObjectField(microtask, PromiseResolveThenableJobInfo::kThenOffset)); + TNode<JSFunction> resolve = TNode<JSFunction>::UncheckedCast(LoadObjectField( + microtask, PromiseResolveThenableJobInfo::kResolveOffset)); + TNode<JSFunction> reject = TNode<JSFunction>::UncheckedCast( + LoadObjectField(microtask, PromiseResolveThenableJobInfo::kRejectOffset)); + + Node* const result = CallJS(call, context, then, thenable, resolve, reject); + GotoIfException(result, &reject_promise, &exception); + Return(UndefinedConstant()); + + BIND(&reject_promise); + CallJS(call, context, reject, UndefinedConstant(), exception.value()); + Return(UndefinedConstant()); +} + +TF_BUILTIN(AbortJS, CodeStubAssembler) { + Node* message = Parameter(Descriptor::kObject); + Node* reason = SmiConstant(0); + TailCallRuntime(Runtime::kAbortJS, reason, message); +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc index 3c7956246b..88641b04e2 100644 --- a/deps/v8/src/builtins/builtins-intl-gen.cc +++ b/deps/v8/src/builtins/builtins-intl-gen.cc @@ -8,7 +8,6 @@ #include "src/builtins/builtins-utils-gen.h" #include "src/code-stub-assembler.h" -#include "src/zone/zone-list-inl.h" // TODO(mstarzinger): Temporary cycle breaker. namespace v8 { namespace internal { diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc index f186cf2d76..f6a6d85880 100644 --- a/deps/v8/src/builtins/builtins-iterator-gen.cc +++ b/deps/v8/src/builtins/builtins-iterator-gen.cc @@ -11,9 +11,10 @@ namespace internal { using compiler::Node; -Node* IteratorBuiltinsAssembler::GetIterator(Node* context, Node* object, - Label* if_exception, - Variable* exception) { +IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context, + Node* object, + Label* if_exception, + Variable* exception) { Node* method = GetProperty(context, object, factory()->iterator_symbol()); GotoIfException(method, if_exception, exception); @@ -21,9 +22,9 @@ Node* IteratorBuiltinsAssembler::GetIterator(Node* context, Node* object, Node* iterator = CallJS(callable, context, method, object); GotoIfException(iterator, if_exception, exception); - Label done(this), if_notobject(this, Label::kDeferred); + Label get_next(this), if_notobject(this, Label::kDeferred); GotoIf(TaggedIsSmi(iterator), &if_notobject); - Branch(IsJSReceiver(iterator), &done, &if_notobject); + Branch(IsJSReceiver(iterator), &get_next, &if_notobject); BIND(&if_notobject); { @@ -34,24 +35,21 @@ Node* IteratorBuiltinsAssembler::GetIterator(Node* context, Node* object, Unreachable(); } - BIND(&done); - return iterator; + BIND(&get_next); + Node* const next = GetProperty(context, iterator, factory()->next_string()); + GotoIfException(next, if_exception, exception); + + return IteratorRecord{TNode<JSReceiver>::UncheckedCast(iterator), + TNode<Object>::UncheckedCast(next)}; } -Node* IteratorBuiltinsAssembler::IteratorStep(Node* context, Node* iterator, - Label* if_done, - Node* fast_iterator_result_map, - Label* if_exception, - Variable* exception) { +Node* IteratorBuiltinsAssembler::IteratorStep( + Node* context, const IteratorRecord& iterator, Label* if_done, + Node* fast_iterator_result_map, Label* if_exception, Variable* exception) { DCHECK_NOT_NULL(if_done); - - // IteratorNext - Node* next_method = GetProperty(context, iterator, factory()->next_string()); - GotoIfException(next_method, if_exception, exception); - // 1. a. Let result be ? Invoke(iterator, "next", « »). Callable callable = CodeFactory::Call(isolate()); - Node* result = CallJS(callable, context, next_method, iterator); + Node* result = CallJS(callable, context, iterator.next, iterator.object); GotoIfException(result, if_exception, exception); // 3. If Type(result) is not Object, throw a TypeError exception. @@ -129,20 +127,20 @@ Node* IteratorBuiltinsAssembler::IteratorValue(Node* context, Node* result, return var_value.value(); } -void IteratorBuiltinsAssembler::IteratorCloseOnException(Node* context, - Node* iterator, - Label* if_exception, - Variable* exception) { +void IteratorBuiltinsAssembler::IteratorCloseOnException( + Node* context, const IteratorRecord& iterator, Label* if_exception, + Variable* exception) { // Perform ES #sec-iteratorclose when an exception occurs. This simpler // algorithm does not include redundant steps which are never reachable from // the spec IteratorClose algorithm. DCHECK_NOT_NULL(if_exception); DCHECK_NOT_NULL(exception); CSA_ASSERT(this, IsNotTheHole(exception->value())); - CSA_ASSERT(this, IsJSReceiver(iterator)); + CSA_ASSERT(this, IsJSReceiver(iterator.object)); // Let return be ? GetMethod(iterator, "return"). - Node* method = GetProperty(context, iterator, factory()->return_string()); + Node* method = + GetProperty(context, iterator.object, factory()->return_string()); GotoIfException(method, if_exception, exception); // If return is undefined, return Completion(completion). @@ -152,7 +150,7 @@ void IteratorBuiltinsAssembler::IteratorCloseOnException(Node* context, // Let innerResult be Call(return, iterator, « »). // If an exception occurs, the original exception remains bound Node* inner_result = - CallJS(CodeFactory::Call(isolate()), context, method, iterator); + CallJS(CodeFactory::Call(isolate()), context, method, iterator.object); GotoIfException(inner_result, if_exception, nullptr); // (If completion.[[Type]] is throw) return Completion(completion). @@ -160,9 +158,8 @@ void IteratorBuiltinsAssembler::IteratorCloseOnException(Node* context, } } -void IteratorBuiltinsAssembler::IteratorCloseOnException(Node* context, - Node* iterator, - Variable* exception) { +void IteratorBuiltinsAssembler::IteratorCloseOnException( + Node* context, const IteratorRecord& iterator, Variable* exception) { Label rethrow(this, Label::kDeferred); IteratorCloseOnException(context, iterator, &rethrow, exception); diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h index 9eb332e926..42627b8437 100644 --- a/deps/v8/src/builtins/builtins-iterator-gen.h +++ b/deps/v8/src/builtins/builtins-iterator-gen.h @@ -19,16 +19,17 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler { // https://tc39.github.io/ecma262/#sec-getiterator --- never used for // @@asyncIterator. - Node* GetIterator(Node* context, Node* object, Label* if_exception = nullptr, - Variable* exception = nullptr); + IteratorRecord GetIterator(Node* context, Node* object, + Label* if_exception = nullptr, + Variable* exception = nullptr); // https://tc39.github.io/ecma262/#sec-iteratorstep // Returns `false` if the iterator is done, otherwise returns an // iterator result. // `fast_iterator_result_map` refers to the map for the JSIteratorResult // object, loaded from the native context. - Node* IteratorStep(Node* context, Node* iterator, Label* if_done, - Node* fast_iterator_result_map = nullptr, + Node* IteratorStep(Node* context, const IteratorRecord& iterator, + Label* if_done, Node* fast_iterator_result_map = nullptr, Label* if_exception = nullptr, Variable* exception = nullptr); @@ -42,9 +43,9 @@ class IteratorBuiltinsAssembler : public CodeStubAssembler { Variable* exception = nullptr); // https://tc39.github.io/ecma262/#sec-iteratorclose - void IteratorCloseOnException(Node* context, Node* iterator, + void IteratorCloseOnException(Node* context, const IteratorRecord& iterator, Label* if_exception, Variable* exception); - void IteratorCloseOnException(Node* context, Node* iterator, + void IteratorCloseOnException(Node* context, const IteratorRecord& iterator, Variable* exception); }; diff --git a/deps/v8/src/builtins/builtins-math-gen.cc b/deps/v8/src/builtins/builtins-math-gen.cc index 706fa4f3a8..d588113cdd 100644 --- a/deps/v8/src/builtins/builtins-math-gen.cc +++ b/deps/v8/src/builtins/builtins-math-gen.cc @@ -8,7 +8,6 @@ #include "src/builtins/builtins.h" #include "src/code-factory.h" #include "src/code-stub-assembler.h" -#include "src/zone/zone-list-inl.h" // TODO(mstarzinger): Temporary cycle breaker. namespace v8 { namespace internal { @@ -162,7 +161,7 @@ void MathBuiltinsAssembler::MathMaxMin( SloppyTNode<Float64T>), double default_val) { CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc)); - argc = arguments.GetLength(); + argc = arguments.GetLength(INTPTR_PARAMETERS); VARIABLE(result, MachineRepresentation::kFloat64); result.Bind(Float64Constant(default_val)); diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc index 65170d321d..9e344820dc 100644 --- a/deps/v8/src/builtins/builtins-object-gen.cc +++ b/deps/v8/src/builtins/builtins-object-gen.cc @@ -16,6 +16,8 @@ namespace internal { // ES6 section 19.1 Object Objects typedef compiler::Node Node; +template <class T> +using TNode = CodeStubAssembler::TNode<T>; class ObjectBuiltinsAssembler : public CodeStubAssembler { public: @@ -34,6 +36,46 @@ class ObjectBuiltinsAssembler : public CodeStubAssembler { Node* ConstructDataDescriptor(Node* context, Node* value, Node* writable, Node* enumerable, Node* configurable); Node* GetAccessorOrUndefined(Node* accessor, Label* if_bailout); + + Node* IsSpecialReceiverMap(SloppyTNode<Map> map); +}; + +class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler { + public: + explicit ObjectEntriesValuesBuiltinsAssembler( + compiler::CodeAssemblerState* state) + : ObjectBuiltinsAssembler(state) {} + + protected: + enum CollectType { kEntries, kValues }; + + TNode<Word32T> IsStringWrapperElementsKind(TNode<Map> map); + + TNode<BoolT> IsPropertyEnumerable(TNode<Uint32T> details); + + TNode<BoolT> IsPropertyKindAccessor(TNode<Uint32T> kind); + + TNode<BoolT> IsPropertyKindData(TNode<Uint32T> kind); + + TNode<Uint32T> HasHiddenPrototype(TNode<Map> map); + + TNode<Uint32T> LoadPropertyKind(TNode<Uint32T> details) { + return DecodeWord32<PropertyDetails::KindField>(details); + } + + void GetOwnValuesOrEntries(TNode<Context> context, TNode<Object> maybe_object, + CollectType collect_type); + + void GotoIfMapHasSlowProperties(TNode<Map> map, Label* if_slow); + + TNode<JSArray> FastGetOwnValuesOrEntries( + TNode<Context> context, TNode<JSObject> object, + Label* if_call_runtime_with_fast_path, Label* if_no_properties, + CollectType collect_type); + + TNode<JSArray> FinalizeValuesOrEntriesJSArray( + TNode<Context> context, TNode<FixedArray> values_or_entries, + TNode<IntPtrT> size, TNode<Map> array_map, Label* if_empty); }; void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context, @@ -97,6 +139,265 @@ Node* ObjectBuiltinsAssembler::ConstructDataDescriptor(Node* context, return js_desc; } +Node* ObjectBuiltinsAssembler::IsSpecialReceiverMap(SloppyTNode<Map> map) { + CSA_SLOW_ASSERT(this, IsMap(map)); + Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map)); + uint32_t mask = + Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask; + USE(mask); + // Interceptors or access checks imply special receiver. + CSA_ASSERT(this, + SelectConstant(IsSetWord32(LoadMapBitField(map), mask), is_special, + Int32Constant(1), MachineRepresentation::kWord32)); + return is_special; +} + +TNode<Word32T> +ObjectEntriesValuesBuiltinsAssembler::IsStringWrapperElementsKind( + TNode<Map> map) { + Node* kind = LoadMapElementsKind(map); + return Word32Or( + Word32Equal(kind, Int32Constant(FAST_STRING_WRAPPER_ELEMENTS)), + Word32Equal(kind, Int32Constant(SLOW_STRING_WRAPPER_ELEMENTS))); +} + +TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyEnumerable( + TNode<Uint32T> details) { + TNode<Uint32T> attributes = + DecodeWord32<PropertyDetails::AttributesField>(details); + return IsNotSetWord32(attributes, PropertyAttributes::DONT_ENUM); +} + +TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindAccessor( + TNode<Uint32T> kind) { + return Word32Equal(kind, Int32Constant(PropertyKind::kAccessor)); +} + +TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindData( + TNode<Uint32T> kind) { + return Word32Equal(kind, Int32Constant(PropertyKind::kData)); +} + +TNode<Uint32T> ObjectEntriesValuesBuiltinsAssembler::HasHiddenPrototype( + TNode<Map> map) { + TNode<Uint32T> bit_field3 = LoadMapBitField3(map); + return DecodeWord32<Map::HasHiddenPrototypeBit>(bit_field3); +} + +void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries( + TNode<Context> context, TNode<Object> maybe_object, + CollectType collect_type) { + TNode<JSObject> object = TNode<JSObject>::UncheckedCast( + CallBuiltin(Builtins::kToObject, context, maybe_object)); + + Label if_call_runtime_with_fast_path(this, Label::kDeferred), + if_call_runtime(this, Label::kDeferred), + if_no_properties(this, Label::kDeferred); + + TNode<Map> map = LoadMap(object); + GotoIfNot(IsJSObjectMap(map), &if_call_runtime); + GotoIfMapHasSlowProperties(map, &if_call_runtime); + + TNode<FixedArrayBase> elements = LoadElements(object); + // If the object has elements, we treat it as slow case. + // So, we go to runtime call. + GotoIfNot(IsEmptyFixedArray(elements), &if_call_runtime_with_fast_path); + + TNode<JSArray> result = FastGetOwnValuesOrEntries( + context, object, &if_call_runtime_with_fast_path, &if_no_properties, + collect_type); + Return(result); + + BIND(&if_no_properties); + { + Node* native_context = LoadNativeContext(context); + Node* array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context); + Node* empty_array = AllocateJSArray(PACKED_ELEMENTS, array_map, + IntPtrConstant(0), SmiConstant(0)); + Return(empty_array); + } + + BIND(&if_call_runtime_with_fast_path); + { + // In slow case, we simply call runtime. + if (collect_type == CollectType::kEntries) { + Return(CallRuntime(Runtime::kObjectEntries, context, object)); + } else { + DCHECK(collect_type == CollectType::kValues); + Return(CallRuntime(Runtime::kObjectValues, context, object)); + } + } + + BIND(&if_call_runtime); + { + // In slow case, we simply call runtime. + if (collect_type == CollectType::kEntries) { + Return(CallRuntime(Runtime::kObjectEntriesSkipFastPath, context, object)); + } else { + DCHECK(collect_type == CollectType::kValues); + Return(CallRuntime(Runtime::kObjectValuesSkipFastPath, context, object)); + } + } +} + +void ObjectEntriesValuesBuiltinsAssembler::GotoIfMapHasSlowProperties( + TNode<Map> map, Label* if_slow) { + GotoIf(IsStringWrapperElementsKind(map), if_slow); + GotoIf(IsSpecialReceiverMap(map), if_slow); + GotoIf(HasHiddenPrototype(map), if_slow); + GotoIf(IsDictionaryMap(map), if_slow); +} + +TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries( + TNode<Context> context, TNode<JSObject> object, + Label* if_call_runtime_with_fast_path, Label* if_no_properties, + CollectType collect_type) { + Node* native_context = LoadNativeContext(context); + TNode<Map> array_map = + LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context); + TNode<Map> map = LoadMap(object); + TNode<Uint32T> bit_field3 = LoadMapBitField3(map); + + Label if_has_enum_cache(this), if_not_has_enum_cache(this), + collect_entries(this); + Node* object_enum_length = + DecodeWordFromWord32<Map::EnumLengthBits>(bit_field3); + Node* has_enum_cache = WordNotEqual( + object_enum_length, IntPtrConstant(kInvalidEnumCacheSentinel)); + + // In case, we found enum_cache in object, + // we use it as array_length becuase it has same size for + // Object.(entries/values) result array object length. + // So object_enum_length use less memory space than + // NumberOfOwnDescriptorsBits value. + // And in case, if enum_cache_not_found, + // we call runtime and initialize enum_cache for subsequent call of + // CSA fast path. + Branch(has_enum_cache, &if_has_enum_cache, if_call_runtime_with_fast_path); + + BIND(&if_has_enum_cache); + { + GotoIf(WordEqual(object_enum_length, IntPtrConstant(0)), if_no_properties); + TNode<FixedArray> values_or_entries = TNode<FixedArray>::UncheckedCast( + AllocateFixedArray(PACKED_ELEMENTS, object_enum_length, + INTPTR_PARAMETERS, kAllowLargeObjectAllocation)); + + // If in case we have enum_cache, + // we can't detect accessor of object until loop through descritpros. + // So if object might have accessor, + // we will remain invalid addresses of FixedArray. + // Because in that case, we need to jump to runtime call. + // So the array filled by the-hole even if enum_cache exists. + FillFixedArrayWithValue(PACKED_ELEMENTS, values_or_entries, + IntPtrConstant(0), object_enum_length, + Heap::kTheHoleValueRootIndex); + + TVARIABLE(IntPtrT, var_result_index, IntPtrConstant(0)); + TVARIABLE(IntPtrT, var_descriptor_index, IntPtrConstant(0)); + Variable* vars[] = {&var_descriptor_index, &var_result_index}; + // Let desc be ? O.[[GetOwnProperty]](key). + TNode<DescriptorArray> descriptors = LoadMapDescriptors(map); + Label loop(this, 2, vars), after_loop(this), loop_condition(this); + Branch(IntPtrEqual(var_descriptor_index, object_enum_length), &after_loop, + &loop); + + // We dont use BuildFastLoop. + // Instead, we use hand-written loop + // because of we need to use 'continue' functionality. + BIND(&loop); + { + // Currently, we will not invoke getters, + // so, map will not be changed. + CSA_ASSERT(this, WordEqual(map, LoadMap(object))); + TNode<Uint32T> descriptor_index = TNode<Uint32T>::UncheckedCast( + TruncateWordToWord32(var_descriptor_index)); + Node* next_key = DescriptorArrayGetKey(descriptors, descriptor_index); + + // Skip Symbols. + GotoIf(IsSymbol(next_key), &loop_condition); + + TNode<Uint32T> details = TNode<Uint32T>::UncheckedCast( + DescriptorArrayGetDetails(descriptors, descriptor_index)); + TNode<Uint32T> kind = LoadPropertyKind(details); + + // If property is accessor, we escape fast path and call runtime. + GotoIf(IsPropertyKindAccessor(kind), if_call_runtime_with_fast_path); + CSA_ASSERT(this, IsPropertyKindData(kind)); + + // If desc is not undefined and desc.[[Enumerable]] is true, then + GotoIfNot(IsPropertyEnumerable(details), &loop_condition); + + VARIABLE(var_property_value, MachineRepresentation::kTagged, + UndefinedConstant()); + Node* descriptor_name_index = DescriptorNumberToIndex(descriptor_index); + + // Let value be ? Get(O, key). + LoadPropertyFromFastObject(object, map, descriptors, + descriptor_name_index, details, + &var_property_value); + + // If kind is "value", append value to properties. + Node* value = var_property_value.value(); + + if (collect_type == CollectType::kEntries) { + // Let entry be CreateArrayFromList(« key, value »). + Node* array = nullptr; + Node* elements = nullptr; + std::tie(array, elements) = AllocateUninitializedJSArrayWithElements( + PACKED_ELEMENTS, array_map, SmiConstant(2), nullptr, + IntPtrConstant(2)); + StoreFixedArrayElement(elements, 0, next_key, SKIP_WRITE_BARRIER); + StoreFixedArrayElement(elements, 1, value, SKIP_WRITE_BARRIER); + value = array; + } + + StoreFixedArrayElement(values_or_entries, var_result_index, value); + Increment(&var_result_index, 1); + Goto(&loop_condition); + + BIND(&loop_condition); + { + Increment(&var_descriptor_index, 1); + Branch(IntPtrEqual(var_descriptor_index, object_enum_length), + &after_loop, &loop); + } + } + BIND(&after_loop); + return FinalizeValuesOrEntriesJSArray(context, values_or_entries, + var_result_index, array_map, + if_no_properties); + } +} + +TNode<JSArray> +ObjectEntriesValuesBuiltinsAssembler::FinalizeValuesOrEntriesJSArray( + TNode<Context> context, TNode<FixedArray> result, TNode<IntPtrT> size, + TNode<Map> array_map, Label* if_empty) { + CSA_ASSERT(this, IsJSArrayMap(array_map)); + + GotoIf(IntPtrEqual(size, IntPtrConstant(0)), if_empty); + Node* array = AllocateUninitializedJSArrayWithoutElements( + array_map, SmiTag(size), nullptr); + StoreObjectField(array, JSArray::kElementsOffset, result); + return TNode<JSArray>::UncheckedCast(array); +} + +TF_BUILTIN(ObjectPrototypeToLocaleString, CodeStubAssembler) { + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); + TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver)); + + Label if_null_or_undefined(this, Label::kDeferred); + GotoIf(IsNullOrUndefined(receiver), &if_null_or_undefined); + + TNode<Object> method = + CAST(GetProperty(context, receiver, factory()->toString_string())); + Return(CallJS(CodeFactory::Call(isolate()), context, method, receiver)); + + BIND(&if_null_or_undefined); + ThrowTypeError(context, MessageTemplate::kCalledOnNullOrUndefined, + "Object.prototype.toLocaleString"); +} + TF_BUILTIN(ObjectPrototypeHasOwnProperty, ObjectBuiltinsAssembler) { Node* object = Parameter(Descriptor::kReceiver); Node* key = Parameter(Descriptor::kKey); @@ -250,6 +551,22 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) { } } +TF_BUILTIN(ObjectValues, ObjectEntriesValuesBuiltinsAssembler) { + TNode<JSObject> object = + TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject)); + TNode<Context> context = + TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext)); + GetOwnValuesOrEntries(context, object, CollectType::kValues); +} + +TF_BUILTIN(ObjectEntries, ObjectEntriesValuesBuiltinsAssembler) { + TNode<JSObject> object = + TNode<JSObject>::UncheckedCast(Parameter(Descriptor::kObject)); + TNode<Context> context = + TNode<Context>::UncheckedCast(Parameter(Descriptor::kContext)); + GetOwnValuesOrEntries(context, object, CollectType::kEntries); +} + // ES #sec-object.prototype.isprototypeof TF_BUILTIN(ObjectPrototypeIsPrototypeOf, ObjectBuiltinsAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); @@ -550,7 +867,7 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) { GotoIf(IsNull(holder), &return_default); Node* holder_map = LoadMap(holder); Node* holder_bit_field3 = LoadMapBitField3(holder_map); - GotoIf(IsSetWord32<Map::MayHaveInterestingSymbols>(holder_bit_field3), + GotoIf(IsSetWord32<Map::MayHaveInterestingSymbolsBit>(holder_bit_field3), &return_generic); var_holder.Bind(LoadMapPrototype(holder_map)); Goto(&loop); @@ -615,7 +932,7 @@ TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) { &call_runtime); // Handle dictionary objects or fast objects with properties in runtime. Node* bit_field3 = LoadMapBitField3(properties_map); - GotoIf(IsSetWord32<Map::DictionaryMap>(bit_field3), &call_runtime); + GotoIf(IsSetWord32<Map::IsDictionaryMapBit>(bit_field3), &call_runtime); Branch(IsSetWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3), &call_runtime, &no_properties); } diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc index 36f7ebfc0a..4e353b9260 100644 --- a/deps/v8/src/builtins/builtins-object.cc +++ b/deps/v8/src/builtins/builtins-object.cc @@ -395,31 +395,6 @@ BUILTIN(ObjectIsSealed) { return isolate->heap()->ToBoolean(result.FromJust()); } -BUILTIN(ObjectValues) { - HandleScope scope(isolate); - Handle<Object> object = args.atOrUndefined(isolate, 1); - Handle<JSReceiver> receiver; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver, - Object::ToObject(isolate, object)); - Handle<FixedArray> values; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, values, JSReceiver::GetOwnValues(receiver, ENUMERABLE_STRINGS)); - return *isolate->factory()->NewJSArrayWithElements(values); -} - -BUILTIN(ObjectEntries) { - HandleScope scope(isolate); - Handle<Object> object = args.atOrUndefined(isolate, 1); - Handle<JSReceiver> receiver; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver, - Object::ToObject(isolate, object)); - Handle<FixedArray> entries; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, entries, - JSReceiver::GetOwnEntries(receiver, ENUMERABLE_STRINGS)); - return *isolate->factory()->NewJSArrayWithElements(entries); -} - BUILTIN(ObjectGetOwnPropertyDescriptors) { HandleScope scope(isolate); Handle<Object> object = args.atOrUndefined(isolate, 1); diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc index 67ebc85ba4..1a3ebcd892 100644 --- a/deps/v8/src/builtins/builtins-promise-gen.cc +++ b/deps/v8/src/builtins/builtins-promise-gen.cc @@ -161,12 +161,12 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context, Node* resolve = LoadObjectField(capability, PromiseCapability::kResolveOffset); GotoIf(TaggedIsSmi(resolve), &if_notcallable); - GotoIfNot(IsCallableMap(LoadMap(resolve)), &if_notcallable); + GotoIfNot(IsCallable(resolve), &if_notcallable); Node* reject = LoadObjectField(capability, PromiseCapability::kRejectOffset); GotoIf(TaggedIsSmi(reject), &if_notcallable); - GotoIfNot(IsCallableMap(LoadMap(reject)), &if_notcallable); + GotoIfNot(IsCallable(reject), &if_notcallable); StoreObjectField(capability, PromiseCapability::kPromiseOffset, promise); @@ -189,25 +189,6 @@ Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context, return var_result.value(); } -void PromiseBuiltinsAssembler::InitializeFunctionContext(Node* native_context, - Node* context, - int slots) { - DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS); - StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex); - StoreObjectFieldNoWriteBarrier(context, FixedArray::kLengthOffset, - SmiConstant(slots)); - - Node* const empty_fn = - LoadContextElement(native_context, Context::CLOSURE_INDEX); - StoreContextElementNoWriteBarrier(context, Context::CLOSURE_INDEX, empty_fn); - StoreContextElementNoWriteBarrier(context, Context::PREVIOUS_INDEX, - UndefinedConstant()); - StoreContextElementNoWriteBarrier(context, Context::EXTENSION_INDEX, - TheHoleConstant()); - StoreContextElementNoWriteBarrier(context, Context::NATIVE_CONTEXT_INDEX, - native_context); -} - Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context, int slots) { DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS); @@ -366,8 +347,6 @@ Node* PromiseBuiltinsAssembler::InternalPromiseThen(Node* context, VARIABLE(var_deferred_on_resolve, MachineRepresentation::kTagged); VARIABLE(var_deferred_on_reject, MachineRepresentation::kTagged); - GotoIfForceSlowPath(&promise_capability); - Branch(WordEqual(promise_fun, constructor), &fast_promise_capability, &promise_capability); @@ -415,16 +394,11 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen( append_callbacks(this); GotoIf(TaggedIsSmi(on_resolve), &if_onresolvenotcallable); - Isolate* isolate = this->isolate(); - Node* const on_resolve_map = LoadMap(on_resolve); - Branch(IsCallableMap(on_resolve_map), &onrejectcheck, - &if_onresolvenotcallable); + Branch(IsCallable(on_resolve), &onrejectcheck, &if_onresolvenotcallable); BIND(&if_onresolvenotcallable); { - Node* const default_resolve_handler_symbol = HeapConstant( - isolate->factory()->promise_default_resolve_handler_symbol()); - var_on_resolve.Bind(default_resolve_handler_symbol); + var_on_resolve.Bind(PromiseDefaultResolveHandlerSymbolConstant()); Goto(&onrejectcheck); } @@ -433,15 +407,11 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen( Label if_onrejectnotcallable(this); GotoIf(TaggedIsSmi(on_reject), &if_onrejectnotcallable); - Node* const on_reject_map = LoadMap(on_reject); - Branch(IsCallableMap(on_reject_map), &append_callbacks, - &if_onrejectnotcallable); + Branch(IsCallable(on_reject), &append_callbacks, &if_onrejectnotcallable); BIND(&if_onrejectnotcallable); { - Node* const default_reject_handler_symbol = HeapConstant( - isolate->factory()->promise_default_reject_handler_symbol()); - var_on_reject.Bind(default_reject_handler_symbol); + var_on_reject.Bind(PromiseDefaultRejectHandlerSymbolConstant()); Goto(&append_callbacks); } } @@ -558,8 +528,7 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen( Node* info = AllocatePromiseReactionJobInfo( result, var_on_resolve.value(), deferred_promise, deferred_on_resolve, deferred_on_reject, context); - // TODO(gsathya): Move this to TF - CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, info); + CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info); Goto(&out); BIND(&reject); @@ -578,8 +547,7 @@ Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen( Node* info = AllocatePromiseReactionJobInfo( result, var_on_reject.value(), deferred_promise, deferred_on_resolve, deferred_on_reject, context); - // TODO(gsathya): Move this to TF - CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, info); + CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info); Goto(&out); } } @@ -787,8 +755,7 @@ void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context, // 12. Perform EnqueueJob("PromiseJobs", // PromiseResolveThenableJob, « promise, resolution, thenAction»). BIND(&enqueue); - // TODO(gsathya): Move this to TF - CallRuntime(Runtime::kEnqueuePromiseResolveThenableJob, context, info); + CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info); Goto(&out); } @@ -846,7 +813,7 @@ void PromiseBuiltinsAssembler::PromiseFulfill( result, tasks, deferred_promise, deferred_on_resolve, deferred_on_reject, context); - CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, info); + CallBuiltin(Builtins::kEnqueueMicrotask, NoContextConstant(), info); Goto(&do_promisereset); BIND(&do_promisereset); @@ -1080,19 +1047,18 @@ TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) { Node *resolve, *reject; std::tie(resolve, reject) = CreatePromiseResolvingFunctions( var_result.value(), TrueConstant(), native_context); - Callable call_callable = CodeFactory::Call(isolate); - Node* const maybe_exception = CallJS(call_callable, context, executor, - UndefinedConstant(), resolve, reject); + Node* const maybe_exception = CallJS( + CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined), + context, executor, UndefinedConstant(), resolve, reject); GotoIfException(maybe_exception, &if_rejectpromise, &var_reason); Branch(is_debug_active, &debug_pop, &out); BIND(&if_rejectpromise); { - Callable call_callable = CodeFactory::Call(isolate); - CallJS(call_callable, context, reject, UndefinedConstant(), - var_reason.value()); + CallJS(CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined), + context, reject, UndefinedConstant(), var_reason.value()); Branch(is_debug_active, &debug_pop, &out); } @@ -1130,8 +1096,8 @@ TF_BUILTIN(PromiseInternalConstructor, PromiseBuiltinsAssembler) { } // ES#sec-promise.prototype.then -// Promise.prototype.catch ( onFulfilled, onRejected ) -TF_BUILTIN(PromiseThen, PromiseBuiltinsAssembler) { +// Promise.prototype.then ( onFulfilled, onRejected ) +TF_BUILTIN(PromisePrototypeThen, PromiseBuiltinsAssembler) { // 1. Let promise be the this value. Node* const promise = Parameter(Descriptor::kReceiver); Node* const on_resolve = Parameter(Descriptor::kOnFullfilled); @@ -1169,7 +1135,6 @@ TF_BUILTIN(PromiseHandleReject, PromiseBuiltinsAssembler) { Node* const exception = Parameter(Descriptor::kException); Node* const context = Parameter(Descriptor::kContext); - Callable call_callable = CodeFactory::Call(isolate()); VARIABLE(var_unused, MachineRepresentation::kTagged); Label if_internalhandler(this), if_customhandler(this, Label::kDeferred); @@ -1183,7 +1148,15 @@ TF_BUILTIN(PromiseHandleReject, PromiseBuiltinsAssembler) { BIND(&if_customhandler); { - CallJS(call_callable, context, on_reject, UndefinedConstant(), exception); + VARIABLE(var_exception, MachineRepresentation::kTagged, TheHoleConstant()); + Label if_exception(this); + Node* const ret = CallJS( + CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined), + context, on_reject, UndefinedConstant(), exception); + GotoIfException(ret, &if_exception, &var_exception); + Return(UndefinedConstant()); + BIND(&if_exception); + CallRuntime(Runtime::kReportMessage, context, var_exception.value()); Return(UndefinedConstant()); } } @@ -1225,9 +1198,7 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) { BIND(&if_defaulthandler); { Label if_resolve(this), if_reject(this); - Node* const default_resolve_handler_symbol = HeapConstant( - isolate->factory()->promise_default_resolve_handler_symbol()); - Branch(WordEqual(default_resolve_handler_symbol, handler), &if_resolve, + Branch(IsPromiseDefaultResolveHandlerSymbol(handler), &if_resolve, &if_reject); BIND(&if_resolve); @@ -1246,9 +1217,9 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) { BIND(&if_callablehandler); { - Callable call_callable = CodeFactory::Call(isolate); - Node* const result = - CallJS(call_callable, context, handler, UndefinedConstant(), value); + Node* const result = CallJS( + CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined), + context, handler, UndefinedConstant(), value); var_result.Bind(result); GotoIfException(result, &if_rejectpromise, &var_reason); Branch(IsUndefined(deferred_on_resolve), &if_internalhandler, @@ -1261,10 +1232,10 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) { BIND(&if_customhandler); { - Callable call_callable = CodeFactory::Call(isolate); - Node* const maybe_exception = - CallJS(call_callable, context, deferred_on_resolve, - UndefinedConstant(), var_result.value()); + Node* const maybe_exception = CallJS( + CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined), + context, deferred_on_resolve, UndefinedConstant(), + var_result.value()); GotoIfException(maybe_exception, &if_rejectpromise, &var_reason); Goto(&promisehook_after); } @@ -1297,9 +1268,23 @@ TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) { } } +TF_BUILTIN(PromiseHandleJS, PromiseBuiltinsAssembler) { + Node* const value = Parameter(Descriptor::kValue); + Node* const handler = Parameter(Descriptor::kHandler); + Node* const deferred_promise = Parameter(Descriptor::kDeferredPromise); + Node* const deferred_on_resolve = Parameter(Descriptor::kDeferredOnResolve); + Node* const deferred_on_reject = Parameter(Descriptor::kDeferredOnReject); + Node* const context = Parameter(Descriptor::kContext); + + Node* const result = + CallBuiltin(Builtins::kPromiseHandle, context, value, handler, + deferred_promise, deferred_on_resolve, deferred_on_reject); + Return(result); +} + // ES#sec-promise.prototype.catch // Promise.prototype.catch ( onRejected ) -TF_BUILTIN(PromiseCatch, PromiseBuiltinsAssembler) { +TF_BUILTIN(PromisePrototypeCatch, PromiseBuiltinsAssembler) { // 1. Let promise be the this value. Node* const promise = Parameter(Descriptor::kReceiver); Node* const on_resolve = UndefinedConstant(); @@ -1321,9 +1306,9 @@ TF_BUILTIN(PromiseCatch, PromiseBuiltinsAssembler) { { Node* const then = GetProperty(context, promise, isolate()->factory()->then_string()); - Callable call_callable = CodeFactory::Call(isolate()); - Node* const result = - CallJS(call_callable, context, then, promise, on_resolve, on_reject); + Node* const result = CallJS( + CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined), + context, then, promise, on_resolve, on_reject); Return(result); } } @@ -1407,10 +1392,10 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) { { Node* const capability = NewPromiseCapability(context, constructor); - Callable call_callable = CodeFactory::Call(isolate); Node* const resolve = LoadObjectField(capability, PromiseCapability::kResolveOffset); - CallJS(call_callable, context, resolve, UndefinedConstant(), value); + CallJS(CodeFactory::Call(isolate, ConvertReceiverMode::kNullOrUndefined), + context, resolve, UndefinedConstant(), value); Node* const result = LoadObjectField(capability, PromiseCapability::kPromiseOffset); @@ -1468,8 +1453,6 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) { Label if_nativepromise(this), if_custompromise(this, Label::kDeferred); Node* const native_context = LoadNativeContext(context); - GotoIfForceSlowPath(&if_custompromise); - Node* const promise_fun = LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX); Branch(WordEqual(promise_fun, receiver), &if_nativepromise, @@ -1492,8 +1475,8 @@ TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) { // 4. Perform ? Call(promiseCapability.[[Reject]], undefined, « r »). Node* const reject = LoadObjectField(capability, PromiseCapability::kRejectOffset); - Callable call_callable = CodeFactory::Call(isolate()); - CallJS(call_callable, context, reject, UndefinedConstant(), reason); + CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined), + context, reject, UndefinedConstant(), reason); // 5. Return promiseCapability.[[Promise]]. Node* const promise = @@ -1567,9 +1550,9 @@ TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) { CSA_ASSERT(this, IsCallable(on_finally)); // 3. Let result be ? Call(onFinally). - Callable call_callable = CodeFactory::Call(isolate()); - Node* const result = - CallJS(call_callable, context, on_finally, UndefinedConstant()); + Node* const result = CallJS( + CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined), + context, on_finally, UndefinedConstant()); // 4. Let C be F.[[Constructor]]. Node* const constructor = LoadContextElement(context, kConstructorSlot); @@ -1588,8 +1571,9 @@ TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) { // 8. Return ? Invoke(promise, "then", « valueThunk »). Node* const promise_then = GetProperty(context, promise, factory()->then_string()); - Node* const result_promise = CallJS(call_callable, context, - promise_then, promise, value_thunk); + Node* const result_promise = CallJS( + CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined), + context, promise_then, promise, value_thunk); Return(result_promise); } @@ -1628,9 +1612,9 @@ TF_BUILTIN(PromiseCatchFinally, PromiseBuiltinsAssembler) { CSA_ASSERT(this, IsCallable(on_finally)); // 3. Let result be ? Call(onFinally). - Callable call_callable = CodeFactory::Call(isolate()); - Node* result = - CallJS(call_callable, context, on_finally, UndefinedConstant()); + Node* result = CallJS( + CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined), + context, on_finally, UndefinedConstant()); // 4. Let C be F.[[Constructor]]. Node* const constructor = LoadContextElement(context, kConstructorSlot); @@ -1649,12 +1633,13 @@ TF_BUILTIN(PromiseCatchFinally, PromiseBuiltinsAssembler) { // 8. Return ? Invoke(promise, "then", « thrower »). Node* const promise_then = GetProperty(context, promise, factory()->then_string()); - Node* const result_promise = CallJS(call_callable, context, - promise_then, promise, thrower); + Node* const result_promise = CallJS( + CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined), + context, promise_then, promise, thrower); Return(result_promise); } -TF_BUILTIN(PromiseFinally, PromiseBuiltinsAssembler) { +TF_BUILTIN(PromisePrototypeFinally, PromiseBuiltinsAssembler) { CSA_ASSERT_JS_ARGC_EQ(this, 1); // 1. Let promise be the this value. @@ -1662,9 +1647,9 @@ TF_BUILTIN(PromiseFinally, PromiseBuiltinsAssembler) { Node* const on_finally = Parameter(Descriptor::kOnFinally); Node* const context = Parameter(Descriptor::kContext); - // 2. If IsPromise(promise) is false, throw a TypeError exception. - ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE, - "Promise.prototype.finally"); + // 2. If Type(promise) is not Object, throw a TypeError exception. + ThrowIfNotJSReceiver(context, promise, MessageTemplate::kCalledOnNonObject, + "Promise.prototype.finally"); // 3. Let C be ? SpeciesConstructor(promise, %Promise%). Node* const native_context = LoadNativeContext(context); @@ -1714,9 +1699,10 @@ TF_BUILTIN(PromiseFinally, PromiseBuiltinsAssembler) { BIND(&perform_finally); Node* const promise_then = GetProperty(context, promise, factory()->then_string()); - Node* const result_promise = - CallJS(CodeFactory::Call(isolate()), context, promise_then, promise, - var_then_finally.value(), var_catch_finally.value()); + Node* const result_promise = CallJS( + CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined), + context, promise_then, promise, var_then_finally.value(), + var_catch_finally.value()); Return(result_promise); } @@ -1758,8 +1744,9 @@ TF_BUILTIN(PerformNativePromiseThen, PromiseBuiltinsAssembler) { } Node* PromiseBuiltinsAssembler::PerformPromiseAll( - Node* context, Node* constructor, Node* capability, Node* iterator, - Label* if_exception, Variable* var_exception) { + Node* context, Node* constructor, Node* capability, + const IteratorRecord& iterator, Label* if_exception, + Variable* var_exception) { IteratorBuiltinsAssembler iter_assembler(state()); Label close_iterator(this); @@ -1805,8 +1792,9 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll( GetProperty(context, constructor, factory()->resolve_string()); GotoIfException(promise_resolve, &close_iterator, var_exception); - Node* const next_promise = CallJS(CodeFactory::Call(isolate()), context, - promise_resolve, constructor, next_value); + Node* const next_promise = CallJS( + CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined), + context, promise_resolve, constructor, next_value); GotoIfException(next_promise, &close_iterator, var_exception); // Let resolveElement be a new built-in function object as defined in @@ -1844,7 +1832,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll( BIND(&if_outofrange); { // If the incremented value is out of Smi range, crash. - Abort(kOffsetOutOfRange); + Abort(AbortReason::kOffsetOutOfRange); } BIND(&done); @@ -1857,7 +1845,8 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll( GotoIfException(then, &close_iterator, var_exception); Node* const then_call = CallJS( - CodeFactory::Call(isolate()), context, then, next_promise, resolve, + CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined), + context, then, next_promise, resolve, LoadObjectField(capability, PromiseCapability::kRejectOffset)); GotoIfException(then_call, &close_iterator, var_exception); @@ -1899,9 +1888,9 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll( Node* const resolve = LoadObjectField(capability, PromiseCapability::kResolveOffset); - Node* const resolve_call = - CallJS(CodeFactory::Call(isolate()), context, resolve, - UndefinedConstant(), values_array); + Node* const resolve_call = CallJS( + CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined), + context, resolve, UndefinedConstant(), values_array); GotoIfException(resolve_call, if_exception, var_exception); Goto(&return_promise); @@ -1963,7 +1952,7 @@ TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) { // Let iterator be GetIterator(iterable). // IfAbruptRejectPromise(iterator, promiseCapability). Node* const iterable = Parameter(Descriptor::kIterable); - Node* const iterator = iter_assembler.GetIterator( + IteratorRecord iterator = iter_assembler.GetIterator( context, iterable, &reject_promise, &var_exception); // Let result be PerformPromiseAll(iteratorRecord, C, promiseCapability). @@ -1982,9 +1971,8 @@ TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) { CSA_SLOW_ASSERT(this, IsNotTheHole(var_exception.value())); Node* const reject = LoadObjectField(capability, PromiseCapability::kRejectOffset); - Callable callable = CodeFactory::Call(isolate()); - CallJS(callable, context, reject, UndefinedConstant(), - var_exception.value()); + CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined), + context, reject, UndefinedConstant(), var_exception.value()); Node* const promise = LoadObjectField(capability, PromiseCapability::kPromiseOffset); @@ -2059,8 +2047,8 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) { LoadContextElement(context, kPromiseAllResolveElementCapabilitySlot); Node* const resolve = LoadObjectField(capability, PromiseCapability::kResolveOffset); - CallJS(CodeFactory::Call(isolate()), context, resolve, UndefinedConstant(), - values_array); + CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined), + context, resolve, UndefinedConstant(), values_array); Return(UndefinedConstant()); BIND(&already_called); @@ -2101,7 +2089,7 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) { // Let iterator be GetIterator(iterable). // IfAbruptRejectPromise(iterator, promiseCapability). Node* const iterable = Parameter(Descriptor::kIterable); - Node* const iterator = iter_assembler.GetIterator( + IteratorRecord iterator = iter_assembler.GetIterator( context, iterable, &reject_promise, &var_exception); // Let result be PerformPromiseRace(iteratorRecord, C, promiseCapability). @@ -2134,8 +2122,10 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) { GetProperty(context, receiver, factory()->resolve_string()); GotoIfException(promise_resolve, &close_iterator, &var_exception); - Node* const next_promise = CallJS(CodeFactory::Call(isolate()), context, - promise_resolve, receiver, next_value); + Node* const next_promise = + CallJS(CodeFactory::Call(isolate(), + ConvertReceiverMode::kNotNullOrUndefined), + context, promise_resolve, receiver, next_value); GotoIfException(next_promise, &close_iterator, &var_exception); // Perform ? Invoke(nextPromise, "then", « resolveElement, @@ -2144,8 +2134,10 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) { GetProperty(context, next_promise, factory()->then_string()); GotoIfException(then, &close_iterator, &var_exception); - Node* const then_call = CallJS(CodeFactory::Call(isolate()), context, - then, next_promise, resolve, reject); + Node* const then_call = + CallJS(CodeFactory::Call(isolate(), + ConvertReceiverMode::kNotNullOrUndefined), + context, then, next_promise, resolve, reject); GotoIfException(then_call, &close_iterator, &var_exception); // For catch prediction, mark that rejections here are semantically @@ -2172,9 +2164,8 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) { { Node* const reject = LoadObjectField(capability, PromiseCapability::kRejectOffset); - Callable callable = CodeFactory::Call(isolate()); - CallJS(callable, context, reject, UndefinedConstant(), - var_exception.value()); + CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined), + context, reject, UndefinedConstant(), var_exception.value()); Node* const promise = LoadObjectField(capability, PromiseCapability::kPromiseOffset); diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h index 759176757f..366c7c22cd 100644 --- a/deps/v8/src/builtins/builtins-promise-gen.h +++ b/deps/v8/src/builtins/builtins-promise-gen.h @@ -137,7 +137,6 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler { void BranchIfFastPath(Node* native_context, Node* promise_fun, Node* promise, Label* if_isunmodified, Label* if_ismodified); - void InitializeFunctionContext(Node* native_context, Node* context, int len); Node* CreatePromiseContext(Node* native_context, int slots); void PromiseFulfill(Node* context, Node* promise, Node* result, v8::Promise::PromiseState status); @@ -158,7 +157,7 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler { Node* CreateThrowerFunction(Node* reason, Node* native_context); Node* PerformPromiseAll(Node* context, Node* constructor, Node* capability, - Node* iterator, Label* if_exception, + const IteratorRecord& record, Label* if_exception, Variable* var_exception); Node* IncrementSmiCell(Node* cell, Label* if_overflow = nullptr); diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc index 2d81867d51..64e838d53a 100644 --- a/deps/v8/src/builtins/builtins-proxy-gen.cc +++ b/deps/v8/src/builtins/builtins-proxy-gen.cc @@ -126,6 +126,106 @@ TF_BUILTIN(ProxyConstructor_ConstructStub, ProxiesCodeStubAssembler) { ThrowTypeError(context, MessageTemplate::kProxyHandlerOrTargetRevoked); } +Node* ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext( + Node* proxy, Node* native_context) { + Node* const context = Allocate(FixedArray::SizeFor(kProxyContextLength)); + StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex); + InitializeFunctionContext(native_context, context, kProxyContextLength); + StoreContextElementNoWriteBarrier(context, kProxySlot, proxy); + return context; +} + +Node* ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(Node* proxy, + Node* context) { + Node* const native_context = LoadNativeContext(context); + + Node* const proxy_context = + CreateProxyRevokeFunctionContext(proxy, native_context); + Node* const revoke_map = LoadContextElement( + native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); + Node* const revoke_info = + LoadContextElement(native_context, Context::PROXY_REVOKE_SHARED_FUN); + + return AllocateFunctionWithMapAndContext(revoke_map, revoke_info, + proxy_context); +} + +TF_BUILTIN(ProxyRevocable, ProxiesCodeStubAssembler) { + Node* const target = Parameter(Descriptor::kTarget); + Node* const handler = Parameter(Descriptor::kHandler); + Node* const context = Parameter(Descriptor::kContext); + Node* const native_context = LoadNativeContext(context); + + Label throw_proxy_non_object(this, Label::kDeferred), + throw_proxy_handler_or_target_revoked(this, Label::kDeferred), + return_create_proxy(this); + + GotoIf(TaggedIsSmi(target), &throw_proxy_non_object); + GotoIfNot(IsJSReceiver(target), &throw_proxy_non_object); + GotoIfRevokedProxy(target, &throw_proxy_handler_or_target_revoked); + + GotoIf(TaggedIsSmi(handler), &throw_proxy_non_object); + GotoIfNot(IsJSReceiver(handler), &throw_proxy_non_object); + GotoIfRevokedProxy(handler, &throw_proxy_handler_or_target_revoked); + + Node* const proxy = AllocateProxy(target, handler, context); + Node* const revoke = AllocateProxyRevokeFunction(proxy, context); + + Node* const result = Allocate(JSProxyRevocableResult::kSize); + Node* const result_map = LoadContextElement( + native_context, Context::PROXY_REVOCABLE_RESULT_MAP_INDEX); + StoreMapNoWriteBarrier(result, result_map); + StoreObjectFieldRoot(result, JSProxyRevocableResult::kPropertiesOrHashOffset, + Heap::kEmptyFixedArrayRootIndex); + StoreObjectFieldRoot(result, JSProxyRevocableResult::kElementsOffset, + Heap::kEmptyFixedArrayRootIndex); + StoreObjectFieldNoWriteBarrier(result, JSProxyRevocableResult::kProxyOffset, + proxy); + StoreObjectFieldNoWriteBarrier(result, JSProxyRevocableResult::kRevokeOffset, + revoke); + Return(result); + + BIND(&throw_proxy_non_object); + ThrowTypeError(context, MessageTemplate::kProxyNonObject); + + BIND(&throw_proxy_handler_or_target_revoked); + ThrowTypeError(context, MessageTemplate::kProxyHandlerOrTargetRevoked); +} + +// Proxy Revocation Functions +// https://tc39.github.io/ecma262/#sec-proxy-revocation-functions +TF_BUILTIN(ProxyRevoke, ProxiesCodeStubAssembler) { + Node* const context = Parameter(Descriptor::kContext); + + // 1. Let p be F.[[RevocableProxy]]. + Node* const proxy_slot = IntPtrConstant(kProxySlot); + Node* const proxy = LoadContextElement(context, proxy_slot); + + Label revoke_called(this); + + // 2. If p is null, ... + GotoIf(IsNull(proxy), &revoke_called); + + // 3. Set F.[[RevocableProxy]] to null. + StoreContextElement(context, proxy_slot, NullConstant()); + + // 4. Assert: p is a Proxy object. + CSA_ASSERT(this, IsJSProxy(proxy)); + + // 5. Set p.[[ProxyTarget]] to null. + StoreObjectField(proxy, JSProxy::kTargetOffset, NullConstant()); + + // 6. Set p.[[ProxyHandler]] to null. + StoreObjectField(proxy, JSProxy::kHandlerOffset, NullConstant()); + + // 7. Return undefined. + Return(UndefinedConstant()); + + BIND(&revoke_called); + // 2. ... return undefined. + Return(UndefinedConstant()); +} + TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) { Node* argc = Parameter(Descriptor::kActualArgumentsCount); Node* argc_ptr = ChangeInt32ToIntPtr(argc); diff --git a/deps/v8/src/builtins/builtins-proxy-gen.h b/deps/v8/src/builtins/builtins-proxy-gen.h index 2b2ac54ebe..92b175bfde 100644 --- a/deps/v8/src/builtins/builtins-proxy-gen.h +++ b/deps/v8/src/builtins/builtins-proxy-gen.h @@ -27,17 +27,26 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler { Node* receiver); protected: + enum ProxyRevokeFunctionContextSlot { + kProxySlot = Context::MIN_CONTEXT_SLOTS, + kProxyContextLength, + }; + void GotoIfRevokedProxy(Node* object, Label* if_proxy_revoked); Node* AllocateProxy(Node* target, Node* handler, Node* context); Node* AllocateJSArrayForCodeStubArguments(Node* context, CodeStubArguments& args, Node* argc, ParameterMode mode); + Node* AllocateProxyRevokeFunction(Node* proxy, Node* context); void CheckHasTrapResult(Node* context, Node* target, Node* proxy, Node* name, Label* check_passed, Label* if_bailout); void CheckGetSetTrapResult(Node* context, Node* target, Node* proxy, Node* name, Node* trap_result, Label* if_not_found, JSProxy::AccessKind access_kind); + + private: + Node* CreateProxyRevokeFunctionContext(Node* proxy, Node* native_context); }; } // namespace internal diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc index 5ce4abd557..4227c628d1 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.cc +++ b/deps/v8/src/builtins/builtins-regexp-gen.cc @@ -62,15 +62,15 @@ Node* RegExpBuiltinsAssembler::AllocateRegExpResult(Node* context, Node* length, LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX); StoreMapNoWriteBarrier(result, map); - Node* const empty_array = EmptyFixedArrayConstant(); - DCHECK(Heap::RootIsImmortalImmovable(Heap::kEmptyFixedArrayRootIndex)); StoreObjectFieldNoWriteBarrier(result, JSArray::kPropertiesOrHashOffset, - empty_array); + EmptyFixedArrayConstant()); StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset, elements); StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset, length); StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kIndexOffset, index); - StoreObjectField(result, JSRegExpResult::kInputOffset, input); + StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kInputOffset, input); + StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kGroupsOffset, + UndefinedConstant()); // Initialize the elements. @@ -223,8 +223,6 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( // Allocate a new object to store the named capture properties. // TODO(jgruber): Could be optimized by adding the object map to the heap // root list. - // TODO(jgruber): Replace CreateDataProperty runtime calls once we have - // equivalent functionality in CSA. Node* const native_context = LoadNativeContext(context); Node* const map = LoadContextElement( @@ -233,14 +231,7 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( AllocateNameDictionary(NameDictionary::kInitialCapacity); Node* const group_object = AllocateJSObjectFromMap(map, properties); - - // Store it on the result as a 'group' property. - - { - Node* const name = HeapConstant(isolate()->factory()->groups_string()); - CallRuntime(Runtime::kCreateDataProperty, context, result, name, - group_object); - } + StoreObjectField(result, JSRegExpResult::kGroupsOffset, group_object); // One or more named captures exist, add a property for each one. @@ -267,6 +258,9 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( Node* const capture = LoadFixedArrayElement(result_elements, SmiUntag(index)); + // TODO(jgruber): Calling into runtime to create each property is slow. + // Either we should create properties entirely in CSA (should be doable), + // or only call runtime once and loop there. CallRuntime(Runtime::kCreateDataProperty, context, group_object, name, capture); @@ -834,7 +828,7 @@ Node* RegExpBuiltinsAssembler::IsFastRegExpNoPrototype(Node* const context, Label out(this); VARIABLE(var_result, MachineRepresentation::kWord32); -#if defined(DEBUG) || defined(ENABLE_FASTSLOW_SWITCH) +#ifdef V8_ENABLE_FORCE_SLOW_PATH var_result.Bind(Int32Constant(0)); GotoIfForceSlowPath(&out); #endif @@ -1225,8 +1219,7 @@ TF_BUILTIN(RegExpPrototypeFlagsGetter, RegExpBuiltinsAssembler) { Node* const receiver = maybe_receiver; Label if_isfastpath(this), if_isslowpath(this, Label::kDeferred); - Branch(IsFastRegExpNoPrototype(context, receiver, map), &if_isfastpath, - &if_isslowpath); + BranchIfFastRegExp(context, receiver, map, &if_isfastpath, &if_isslowpath); BIND(&if_isfastpath); Return(FlagsGetter(context, receiver, true)); @@ -2543,7 +2536,7 @@ TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) { // to verify the constructor property and jump to the slow path if it has // been changed. - // Convert {maybe_limit} to a uint32, capping at the maximal smi value. + // Verify {maybe_limit}. VARIABLE(var_limit, MachineRepresentation::kTagged, maybe_limit); Label if_limitissmimax(this), runtime(this, Label::kDeferred); @@ -2552,21 +2545,12 @@ TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) { Label next(this); GotoIf(IsUndefined(maybe_limit), &if_limitissmimax); - GotoIf(TaggedIsPositiveSmi(maybe_limit), &next); - - var_limit.Bind(ToUint32(context, maybe_limit)); - { - // ToUint32(limit) could potentially change the shape of the RegExp - // object. Recheck that we are still on the fast path and bail to runtime - // otherwise. - { - Label next(this); - BranchIfFastRegExp(context, regexp, &next, &runtime); - BIND(&next); - } + Branch(TaggedIsPositiveSmi(maybe_limit), &next, &runtime); - Branch(TaggedIsPositiveSmi(var_limit.value()), &next, &if_limitissmimax); - } + // We need to be extra-strict and require the given limit to be either + // undefined or a positive smi. We can't call ToUint32(maybe_limit) since + // that might move us onto the slow path, resulting in ordering spec + // violations (see https://crbug.com/801171). BIND(&if_limitissmimax); { @@ -2590,13 +2574,8 @@ TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) { RegExpPrototypeSplitBody(context, regexp, string, var_limit.value()); BIND(&runtime); - { - // The runtime call passes in limit to ensure the second ToUint32(limit) - // call is not observable. - CSA_ASSERT(this, IsNumber(var_limit.value())); - Return(CallRuntime(Runtime::kRegExpSplit, context, regexp, string, - var_limit.value())); - } + Return(CallRuntime(Runtime::kRegExpSplit, context, regexp, string, + var_limit.value())); } // ES#sec-regexp.prototype-@@split @@ -2740,7 +2719,7 @@ Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath( TNode<IntPtrT> int_elem = SmiUntag(elem); TNode<IntPtrT> new_match_start = Signed(IntPtrAdd(WordShr(int_elem, IntPtrConstant(11)), - WordAnd(int_elem, IntPtrConstant(0x7ff)))); + WordAnd(int_elem, IntPtrConstant(0x7FF)))); var_match_start = SmiTag(new_match_start); Goto(&loop_epilogue); } diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc index 6122ff85da..278a48c68e 100644 --- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc +++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc @@ -213,7 +213,7 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) { ValidateAtomicIndex(array, index_word32, context); Node* index_word = ChangeUint32ToWord(index_word32); - Node* value_integer = ToInteger(context, value); + Node* value_integer = ToInteger_Inline(CAST(context), CAST(value)); Node* value_word32 = TruncateTaggedToWord32(context, value_integer); #if DEBUG @@ -266,7 +266,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) { ConvertTaggedAtomicIndexToWord32(index, context, &index_integer); ValidateAtomicIndex(array, index_word32, context); - Node* value_integer = ToInteger(context, value); + Node* value_integer = ToInteger_Inline(CAST(context), CAST(value)); #if DEBUG DebugSanityCheckAtomicIndex(array, index_word32, context); @@ -340,8 +340,8 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) { ConvertTaggedAtomicIndexToWord32(index, context, &index_integer); ValidateAtomicIndex(array, index_word32, context); - Node* old_value_integer = ToInteger(context, old_value); - Node* new_value_integer = ToInteger(context, new_value); + Node* old_value_integer = ToInteger_Inline(CAST(context), CAST(old_value)); + Node* new_value_integer = ToInteger_Inline(CAST(context), CAST(new_value)); #if DEBUG DebugSanityCheckAtomicIndex(array, index_word32, context); @@ -436,7 +436,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon( ConvertTaggedAtomicIndexToWord32(index, context, &index_integer); ValidateAtomicIndex(array, index_word32, context); - Node* value_integer = ToInteger(context, value); + Node* value_integer = ToInteger_Inline(CAST(context), CAST(value)); #if DEBUG // In Debug mode, we re-validate the index as a sanity check because diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc index 9d86f3105b..195572de8e 100644 --- a/deps/v8/src/builtins/builtins-string-gen.cc +++ b/deps/v8/src/builtins/builtins-string-gen.cc @@ -126,8 +126,8 @@ Node* StringBuiltinsAssembler::PointerToStringDataAtIndex( void StringBuiltinsAssembler::ConvertAndBoundsCheckStartArgument( Node* context, Variable* var_start, Node* start, Node* string_length) { - TNode<Object> const start_int = - ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero); + TNode<Object> const start_int = ToInteger_Inline( + CAST(context), CAST(start), CodeStubAssembler::kTruncateMinusZero); TNode<Smi> const zero = SmiConstant(0); Label done(this); @@ -319,6 +319,31 @@ void StringBuiltinsAssembler::StringEqual_Loop( } } +void StringBuiltinsAssembler::GenerateStringAt(char const* method_name, + TNode<Context> context, + Node* receiver, + TNode<Object> maybe_position, + TNode<Object> default_return, + StringAtAccessor accessor) { + // Check that {receiver} is coercible to Object and convert it to a String. + TNode<String> string = ToThisString(context, receiver, method_name); + + // Convert the {position} to a Smi and check that it's in bounds of the + // {string}. + Label if_outofbounds(this, Label::kDeferred); + TNode<Number> position = ToInteger_Inline( + context, maybe_position, CodeStubAssembler::kTruncateMinusZero); + GotoIfNot(TaggedIsSmi(position), &if_outofbounds); + TNode<IntPtrT> index = SmiUntag(CAST(position)); + TNode<IntPtrT> length = LoadStringLengthAsWord(string); + GotoIfNot(UintPtrLessThan(index, length), &if_outofbounds); + TNode<Object> result = accessor(string, length, index); + Return(result); + + BIND(&if_outofbounds); + Return(default_return); +} + void StringBuiltinsAssembler::GenerateStringRelationalComparison(Node* context, Node* left, Node* right, @@ -526,28 +551,43 @@ TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) { Operation::kGreaterThanOrEqual); } -TF_BUILTIN(StringCharAt, CodeStubAssembler) { +TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); Node* position = Parameter(Descriptor::kPosition); // Load the character code at the {position} from the {receiver}. - Node* code = StringCharCodeAt(receiver, position); + TNode<Int32T> code = StringCharCodeAt(receiver, position); // And return the single character string with only that {code} - Node* result = StringFromCharCode(code); + TNode<String> result = StringFromCharCode(code); Return(result); } -TF_BUILTIN(StringCharCodeAt, CodeStubAssembler) { +TF_BUILTIN(StringCharCodeAt, StringBuiltinsAssembler) { Node* receiver = Parameter(Descriptor::kReceiver); Node* position = Parameter(Descriptor::kPosition); // Load the character code at the {position} from the {receiver}. - Node* code = StringCharCodeAt(receiver, position); + TNode<Int32T> code = StringCharCodeAt(receiver, position); + + // And return it as TaggedSigned value. + // TODO(turbofan): Allow builtins to return values untagged. + TNode<Smi> result = SmiFromWord32(code); + Return(result); +} + +TF_BUILTIN(StringCodePointAt, StringBuiltinsAssembler) { + Node* receiver = Parameter(Descriptor::kReceiver); + Node* position = Parameter(Descriptor::kPosition); + // TODO(sigurds) Figure out if passing length as argument pays off. + TNode<IntPtrT> length = LoadStringLengthAsWord(receiver); + // Load the character code at the {position} from the {receiver}. + TNode<Int32T> code = + LoadSurrogatePairAt(receiver, length, position, UnicodeEncoding::UTF32); // And return it as TaggedSigned value. // TODO(turbofan): Allow builtins to return values untagged. - Node* result = SmiFromWord32(code); + TNode<Smi> result = SmiFromWord32(code); Return(result); } @@ -563,7 +603,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { Node* context = Parameter(BuiltinDescriptor::kContext); CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc)); - TNode<Smi> smi_argc = SmiTag(arguments.GetLength()); + TNode<Smi> smi_argc = SmiTag(arguments.GetLength(INTPTR_PARAMETERS)); // Check if we have exactly one argument (plus the implicit receiver), i.e. // if the parent frame is not an arguments adaptor frame. Label if_oneargument(this), if_notoneargument(this); @@ -577,7 +617,8 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { // string on the fly otherwise. Node* code = arguments.AtIndex(0); Node* code32 = TruncateTaggedToWord32(context, code); - Node* code16 = Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit)); + TNode<Int32T> code16 = + Signed(Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit))); Node* result = StringFromCharCode(code16); arguments.PopAndReturn(result); } @@ -662,115 +703,49 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) { } // ES6 #sec-string.prototype.charat -TF_BUILTIN(StringPrototypeCharAt, CodeStubAssembler) { +TF_BUILTIN(StringPrototypeCharAt, StringBuiltinsAssembler) { + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Node* receiver = Parameter(Descriptor::kReceiver); - Node* position = Parameter(Descriptor::kPosition); - Node* context = Parameter(Descriptor::kContext); - - // Check that {receiver} is coercible to Object and convert it to a String. - receiver = ToThisString(context, receiver, "String.prototype.charAt"); - - // Convert the {position} to a Smi and check that it's in bounds of the - // {receiver}. - { - Label return_emptystring(this, Label::kDeferred); - position = - ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero); - GotoIfNot(TaggedIsSmi(position), &return_emptystring); - - // Determine the actual length of the {receiver} String. - TNode<Smi> receiver_length = LoadStringLengthAsSmi(receiver); - - // Return "" if the Smi {position} is outside the bounds of the {receiver}. - Label if_positioninbounds(this); - Branch(SmiAboveOrEqual(position, receiver_length), &return_emptystring, - &if_positioninbounds); - - BIND(&return_emptystring); - Return(EmptyStringConstant()); - - BIND(&if_positioninbounds); - } - - // Load the character code at the {position} from the {receiver}. - CSA_ASSERT(this, IntPtrLessThan(SmiUntag(position), - LoadStringLengthAsWord(receiver))); - CSA_ASSERT(this, - IntPtrGreaterThanOrEqual(SmiUntag(position), IntPtrConstant(0))); - Node* code = StringCharCodeAt(receiver, SmiUntag(position)); + TNode<Object> maybe_position = CAST(Parameter(Descriptor::kPosition)); - // And return the single character string with only that {code}. - Node* result = StringFromCharCode(code); - Return(result); + GenerateStringAt("String.prototype.charAt", context, receiver, maybe_position, + EmptyStringConstant(), + [this](TNode<String> string, TNode<IntPtrT> length, + TNode<IntPtrT> index) { + TNode<Int32T> code = StringCharCodeAt(string, index); + return StringFromCharCode(code); + }); } // ES6 #sec-string.prototype.charcodeat -TF_BUILTIN(StringPrototypeCharCodeAt, CodeStubAssembler) { +TF_BUILTIN(StringPrototypeCharCodeAt, StringBuiltinsAssembler) { + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Node* receiver = Parameter(Descriptor::kReceiver); - Node* position = Parameter(Descriptor::kPosition); - Node* context = Parameter(Descriptor::kContext); - - // Check that {receiver} is coercible to Object and convert it to a String. - receiver = ToThisString(context, receiver, "String.prototype.charCodeAt"); - - // Convert the {position} to a Smi and check that it's in bounds of the - // {receiver}. - { - Label return_nan(this, Label::kDeferred); - position = - ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero); - GotoIfNot(TaggedIsSmi(position), &return_nan); - - // Determine the actual length of the {receiver} String. - TNode<Smi> receiver_length = LoadStringLengthAsSmi(receiver); - - // Return NaN if the Smi {position} is outside the bounds of the {receiver}. - Label if_positioninbounds(this); - Branch(SmiAboveOrEqual(position, receiver_length), &return_nan, - &if_positioninbounds); + TNode<Object> maybe_position = CAST(Parameter(Descriptor::kPosition)); - BIND(&return_nan); - Return(NaNConstant()); - - BIND(&if_positioninbounds); - } - - // Load the character at the {position} from the {receiver}. - Node* value = StringCharCodeAt(receiver, SmiUntag(position)); - Node* result = SmiFromWord32(value); - Return(result); + GenerateStringAt("String.prototype.charCodeAt", context, receiver, + maybe_position, NanConstant(), + [this](TNode<String> receiver, TNode<IntPtrT> length, + TNode<IntPtrT> index) { + Node* value = StringCharCodeAt(receiver, index); + return SmiFromWord32(value); + }); } // ES6 #sec-string.prototype.codepointat TF_BUILTIN(StringPrototypeCodePointAt, StringBuiltinsAssembler) { - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Node* receiver = Parameter(Descriptor::kReceiver); - Node* position = Parameter(Descriptor::kPosition); - - // Check that {receiver} is coercible to Object and convert it to a String. - receiver = ToThisString(context, receiver, "String.prototype.codePointAt"); - - // Convert the {position} to a Smi and check that it's in bounds of the - // {receiver}. - Label if_inbounds(this), if_outofbounds(this, Label::kDeferred); - position = - ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero); - GotoIfNot(TaggedIsSmi(position), &if_outofbounds); - TNode<IntPtrT> untagged_position = SmiUntag(position); - TNode<IntPtrT> receiver_length = LoadStringLengthAsWord(receiver); - Branch(UintPtrLessThan(untagged_position, receiver_length), &if_inbounds, - &if_outofbounds); - - BIND(&if_inbounds); - { - Node* value = LoadSurrogatePairAt( - receiver, receiver_length, untagged_position, UnicodeEncoding::UTF32); - Node* result = SmiFromWord32(value); - Return(result); - } + TNode<Object> maybe_position = CAST(Parameter(Descriptor::kPosition)); - BIND(&if_outofbounds); - Return(UndefinedConstant()); + GenerateStringAt("String.prototype.codePointAt", context, receiver, + maybe_position, UndefinedConstant(), + [this](TNode<String> receiver, TNode<IntPtrT> length, + TNode<IntPtrT> index) { + Node* value = LoadSurrogatePairAt(receiver, length, index, + UnicodeEncoding::UTF32); + return SmiFromWord32(value); + }); } // ES6 String.prototype.concat(...args) @@ -999,7 +974,7 @@ void StringIncludesIndexOfAssembler::Generate(SearchVariant variant) { CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc)); Node* const receiver = arguments.GetReceiver(); // From now on use word-size argc value. - argc = arguments.GetLength(); + argc = arguments.GetLength(INTPTR_PARAMETERS); VARIABLE(var_search_string, MachineRepresentation::kTagged); VARIABLE(var_position, MachineRepresentation::kTagged); @@ -1217,16 +1192,17 @@ TF_BUILTIN(StringPrototypeRepeat, StringBuiltinsAssembler) { Label invalid_count(this), invalid_string_length(this), return_emptystring(this); - Node* const context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); Node* const receiver = Parameter(Descriptor::kReceiver); - Node* const count = Parameter(Descriptor::kCount); + TNode<Object> count = CAST(Parameter(Descriptor::kCount)); Node* const string = ToThisString(context, receiver, "String.prototype.repeat"); Node* const is_stringempty = SmiEqual(LoadStringLengthAsSmi(string), SmiConstant(0)); - VARIABLE(var_count, MachineRepresentation::kTagged, - ToInteger(context, count, CodeStubAssembler::kTruncateMinusZero)); + VARIABLE( + var_count, MachineRepresentation::kTagged, + ToInteger_Inline(context, count, CodeStubAssembler::kTruncateMinusZero)); // Verifies a valid count and takes a fast path when the result will be an // empty string. @@ -1713,8 +1689,8 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) { CodeStubArguments args(this, argc); Node* const receiver = args.GetReceiver(); Node* const start = args.GetOptionalArgumentValue(kStart); - Node* const end = args.GetOptionalArgumentValue(kEnd); - Node* const context = Parameter(BuiltinDescriptor::kContext); + TNode<Object> end = CAST(args.GetOptionalArgumentValue(kEnd)); + TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext)); TNode<Smi> const smi_zero = SmiConstant(0); @@ -1737,7 +1713,7 @@ TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) { // else let intEnd be ? ToInteger(end). Node* const end_int = - ToInteger(context, end, CodeStubAssembler::kTruncateMinusZero); + ToInteger_Inline(context, end, CodeStubAssembler::kTruncateMinusZero); // 7. If intEnd < 0, let to be max(len + intEnd, 0); // otherwise let to be min(intEnd, len). @@ -1893,8 +1869,8 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) { Node* const receiver = args.GetReceiver(); Node* const start = args.GetOptionalArgumentValue(kStartArg); - Node* const length = args.GetOptionalArgumentValue(kLengthArg); - Node* const context = Parameter(BuiltinDescriptor::kContext); + TNode<Object> length = CAST(args.GetOptionalArgumentValue(kLengthArg)); + TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext)); Label out(this); @@ -1925,8 +1901,8 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) { Goto(&if_issmi); BIND(&if_isnotundefined); - var_length = - ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero); + var_length = ToInteger_Inline(context, length, + CodeStubAssembler::kTruncateMinusZero); } TVARIABLE(Smi, var_result_length); @@ -1984,7 +1960,7 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd( TVARIABLE(Smi, var_result); TNode<Object> const value_int = - this->ToInteger(context, value, CodeStubAssembler::kTruncateMinusZero); + ToInteger_Inline(context, value, CodeStubAssembler::kTruncateMinusZero); Label if_issmi(this), if_isnotsmi(this, Label::kDeferred); Branch(TaggedIsSmi(value_int), &if_issmi, &if_isnotsmi); @@ -2296,14 +2272,14 @@ TF_BUILTIN(StringPrototypeIterator, CodeStubAssembler) { // Return the |word32| codepoint at {index}. Supports SeqStrings and // ExternalStrings. -TNode<Uint32T> StringBuiltinsAssembler::LoadSurrogatePairAt( +TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt( SloppyTNode<String> string, SloppyTNode<IntPtrT> length, SloppyTNode<IntPtrT> index, UnicodeEncoding encoding) { Label handle_surrogate_pair(this), return_result(this); - TVARIABLE(Uint32T, var_result); - TVARIABLE(Uint32T, var_trail); + TVARIABLE(Int32T, var_result); + TVARIABLE(Int32T, var_trail); var_result = StringCharCodeAt(string, index); - var_trail = Unsigned(Int32Constant(0)); + var_trail = Int32Constant(0); GotoIf(Word32NotEqual(Word32And(var_result, Int32Constant(0xFC00)), Int32Constant(0xD800)), @@ -2318,8 +2294,8 @@ TNode<Uint32T> StringBuiltinsAssembler::LoadSurrogatePairAt( BIND(&handle_surrogate_pair); { - TNode<Uint32T> lead = var_result; - TNode<Uint32T> trail = var_trail; + TNode<Int32T> lead = var_result; + TNode<Int32T> trail = var_trail; // Check that this path is only taken if a surrogate pair is found CSA_SLOW_ASSERT(this, @@ -2331,7 +2307,7 @@ TNode<Uint32T> StringBuiltinsAssembler::LoadSurrogatePairAt( switch (encoding) { case UnicodeEncoding::UTF16: - var_result = Unsigned(Word32Or( + var_result = Signed(Word32Or( // Need to swap the order for big-endian platforms #if V8_TARGET_BIG_ENDIAN Word32Shl(lead, Int32Constant(16)), trail)); @@ -2347,8 +2323,8 @@ TNode<Uint32T> StringBuiltinsAssembler::LoadSurrogatePairAt( Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00); // (lead << 10) + trail + SURROGATE_OFFSET - var_result = Unsigned(Int32Add(Word32Shl(lead, Int32Constant(10)), - Int32Add(trail, surrogate_offset))); + var_result = Signed(Int32Add(Word32Shl(lead, Int32Constant(10)), + Int32Add(trail, surrogate_offset))); break; } } @@ -2387,8 +2363,8 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) { BIND(&next_codepoint); { UnicodeEncoding encoding = UnicodeEncoding::UTF16; - Node* ch = LoadSurrogatePairAt(string, length, position, encoding); - Node* value = StringFromCodePoint(ch, encoding); + TNode<Int32T> ch = LoadSurrogatePairAt(string, length, position, encoding); + TNode<String> value = StringFromCodePoint(ch, encoding); var_value.Bind(value); TNode<IntPtrT> length = LoadStringLengthAsWord(value); StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kNextIndexOffset, diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h index f1111b3465..1bd5429fdb 100644 --- a/deps/v8/src/builtins/builtins-string-gen.h +++ b/deps/v8/src/builtins/builtins-string-gen.h @@ -57,10 +57,19 @@ class StringBuiltinsAssembler : public CodeStubAssembler { SloppyTNode<Object> value, SloppyTNode<Smi> limit); - TNode<Uint32T> LoadSurrogatePairAt(SloppyTNode<String> string, - SloppyTNode<IntPtrT> length, - SloppyTNode<IntPtrT> index, - UnicodeEncoding encoding); + typedef std::function<TNode<Object>( + TNode<String> receiver, TNode<IntPtrT> length, TNode<IntPtrT> index)> + StringAtAccessor; + + void GenerateStringAt(const char* method_name, TNode<Context> context, + Node* receiver, TNode<Object> maybe_position, + TNode<Object> default_return, + StringAtAccessor accessor); + + TNode<Int32T> LoadSurrogatePairAt(SloppyTNode<String> string, + SloppyTNode<IntPtrT> length, + SloppyTNode<IntPtrT> index, + UnicodeEncoding encoding); void StringIndexOf(Node* const subject_string, Node* const search_string, Node* const position, std::function<void(Node*)> f_return); diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc index 14a74afb6d..d2e447538d 100644 --- a/deps/v8/src/builtins/builtins-string.cc +++ b/deps/v8/src/builtins/builtins-string.cc @@ -324,8 +324,8 @@ namespace { inline bool ToUpperOverflows(uc32 character) { // y with umlauts and the micro sign are the only characters that stop // fitting into one-byte when converting to uppercase. - static const uc32 yuml_code = 0xff; - static const uc32 micro_code = 0xb5; + static const uc32 yuml_code = 0xFF; + static const uc32 micro_code = 0xB5; return (character == yuml_code || character == micro_code); } diff --git a/deps/v8/src/builtins/builtins-typedarray-gen.cc b/deps/v8/src/builtins/builtins-typedarray-gen.cc index df89d1ced3..b830a8597d 100644 --- a/deps/v8/src/builtins/builtins-typedarray-gen.cc +++ b/deps/v8/src/builtins/builtins-typedarray-gen.cc @@ -36,15 +36,17 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler { const char* method_name, IterationKind iteration_kind); - void SetupTypedArray(Node* holder, Node* length, Node* byte_offset, - Node* byte_length); - void AttachBuffer(Node* holder, Node* buffer, Node* map, Node* length, - Node* byte_offset); - - Node* LoadMapForType(Node* array); - Node* CalculateExternalPointer(Node* backing_store, Node* byte_offset); + void SetupTypedArray(TNode<JSTypedArray> holder, TNode<Smi> length, + TNode<Number> byte_offset, TNode<Number> byte_length); + void AttachBuffer(TNode<JSTypedArray> holder, TNode<JSArrayBuffer> buffer, + TNode<Map> map, TNode<Smi> length, + TNode<Number> byte_offset); + + TNode<Map> LoadMapForType(TNode<JSTypedArray> array); + TNode<UintPtrT> CalculateExternalPointer(TNode<UintPtrT> backing_store, + TNode<Number> byte_offset); Node* LoadDataPtr(Node* typed_array); - Node* ByteLengthIsValid(Node* byte_length); + TNode<BoolT> ByteLengthIsValid(TNode<Number> byte_length); // Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS. TNode<Word32T> IsUint8ElementsKind(TNode<Word32T> kind); @@ -78,9 +80,8 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler { TNode<IntPtrT> offset); }; -Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) { - CSA_ASSERT(this, IsJSTypedArray(array)); - +TNode<Map> TypedArrayBuiltinsAssembler::LoadMapForType( + TNode<JSTypedArray> array) { Label unreachable(this), done(this); Label uint8_elements(this), uint8_clamped_elements(this), int8_elements(this), uint16_elements(this), int16_elements(this), uint32_elements(this), @@ -99,10 +100,10 @@ Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) { DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds)); DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels)); - VARIABLE(var_typed_map, MachineRepresentation::kTagged); + TVARIABLE(Map, var_typed_map); - Node* array_map = LoadMap(array); - Node* elements_kind = LoadMapElementsKind(array_map); + TNode<Map> array_map = LoadMap(array); + TNode<Int32T> elements_kind = LoadMapElementsKind(array_map); Switch(elements_kind, &unreachable, elements_kinds, elements_kind_labels, kTypedElementsKindCount); @@ -113,7 +114,7 @@ Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) { ExternalArrayType type = isolate()->factory()->GetArrayTypeFromElementsKind(kind); Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(type)); - var_typed_map.Bind(HeapConstant(map)); + var_typed_map = HeapConstant(map); Goto(&done); } } @@ -121,7 +122,7 @@ Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) { BIND(&unreachable); { Unreachable(); } BIND(&done); - return var_typed_map.value(); + return var_typed_map; } // The byte_offset can be higher than Smi range, in which case to perform the @@ -131,10 +132,10 @@ Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) { // can't allocate an array bigger than our 32-bit arithmetic range anyway. 64 // bit platforms could theoretically have an offset up to 2^35 - 1, so we may // need to convert the float heap number to an intptr. -Node* TypedArrayBuiltinsAssembler::CalculateExternalPointer(Node* backing_store, - Node* byte_offset) { - return IntPtrAdd(backing_store, - ChangeNonnegativeNumberToUintPtr(byte_offset)); +TNode<UintPtrT> TypedArrayBuiltinsAssembler::CalculateExternalPointer( + TNode<UintPtrT> backing_store, TNode<Number> byte_offset) { + return Unsigned( + IntPtrAdd(backing_store, ChangeNonnegativeNumberToUintPtr(byte_offset))); } // Setup the TypedArray which is under construction. @@ -142,14 +143,10 @@ Node* TypedArrayBuiltinsAssembler::CalculateExternalPointer(Node* backing_store, // - Set the byte_offset. // - Set the byte_length. // - Set EmbedderFields to 0. -void TypedArrayBuiltinsAssembler::SetupTypedArray(Node* holder, Node* length, - Node* byte_offset, - Node* byte_length) { - CSA_ASSERT(this, IsJSTypedArray(holder)); - CSA_ASSERT(this, TaggedIsSmi(length)); - CSA_ASSERT(this, IsNumber(byte_offset)); - CSA_ASSERT(this, IsNumber(byte_length)); - +void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode<JSTypedArray> holder, + TNode<Smi> length, + TNode<Number> byte_offset, + TNode<Number> byte_length) { StoreObjectField(holder, JSTypedArray::kLengthOffset, length); StoreObjectField(holder, JSArrayBufferView::kByteOffsetOffset, byte_offset); StoreObjectField(holder, JSArrayBufferView::kByteLengthOffset, byte_length); @@ -160,15 +157,11 @@ void TypedArrayBuiltinsAssembler::SetupTypedArray(Node* holder, Node* length, } // Attach an off-heap buffer to a TypedArray. -void TypedArrayBuiltinsAssembler::AttachBuffer(Node* holder, Node* buffer, - Node* map, Node* length, - Node* byte_offset) { - CSA_ASSERT(this, IsJSTypedArray(holder)); - CSA_ASSERT(this, IsJSArrayBuffer(buffer)); - CSA_ASSERT(this, IsMap(map)); - CSA_ASSERT(this, TaggedIsSmi(length)); - CSA_ASSERT(this, IsNumber(byte_offset)); - +void TypedArrayBuiltinsAssembler::AttachBuffer(TNode<JSTypedArray> holder, + TNode<JSArrayBuffer> buffer, + TNode<Map> map, + TNode<Smi> length, + TNode<Number> byte_offset) { StoreObjectField(holder, JSArrayBufferView::kBufferOffset, buffer); Node* elements = Allocate(FixedTypedArrayBase::kHeaderSize); @@ -177,10 +170,11 @@ void TypedArrayBuiltinsAssembler::AttachBuffer(Node* holder, Node* buffer, StoreObjectFieldNoWriteBarrier( elements, FixedTypedArrayBase::kBasePointerOffset, SmiConstant(0)); - Node* backing_store = LoadObjectField( - buffer, JSArrayBuffer::kBackingStoreOffset, MachineType::Pointer()); + TNode<UintPtrT> backing_store = + LoadObjectField<UintPtrT>(buffer, JSArrayBuffer::kBackingStoreOffset); - Node* external_pointer = CalculateExternalPointer(backing_store, byte_offset); + TNode<UintPtrT> external_pointer = + CalculateExternalPointer(backing_store, byte_offset); StoreObjectFieldNoWriteBarrier( elements, FixedTypedArrayBase::kExternalPointerOffset, external_pointer, MachineType::PointerRepresentation()); @@ -189,23 +183,16 @@ void TypedArrayBuiltinsAssembler::AttachBuffer(Node* holder, Node* buffer, } TF_BUILTIN(TypedArrayInitializeWithBuffer, TypedArrayBuiltinsAssembler) { - Node* holder = Parameter(Descriptor::kHolder); - Node* length = Parameter(Descriptor::kLength); - Node* buffer = Parameter(Descriptor::kBuffer); - Node* element_size = Parameter(Descriptor::kElementSize); - Node* byte_offset = Parameter(Descriptor::kByteOffset); - - CSA_ASSERT(this, IsJSTypedArray(holder)); - CSA_ASSERT(this, TaggedIsSmi(length)); - CSA_ASSERT(this, IsJSArrayBuffer(buffer)); - CSA_ASSERT(this, TaggedIsSmi(element_size)); - CSA_ASSERT(this, IsNumber(byte_offset)); + TNode<JSTypedArray> holder = CAST(Parameter(Descriptor::kHolder)); + TNode<Smi> length = CAST(Parameter(Descriptor::kLength)); + TNode<JSArrayBuffer> buffer = CAST(Parameter(Descriptor::kBuffer)); + TNode<Smi> element_size = CAST(Parameter(Descriptor::kElementSize)); + TNode<Number> byte_offset = CAST(Parameter(Descriptor::kByteOffset)); - Node* fixed_typed_map = LoadMapForType(holder); + TNode<Map> fixed_typed_map = LoadMapForType(holder); // SmiMul returns a heap number in case of Smi overflow. - Node* byte_length = SmiMul(length, element_size); - CSA_ASSERT(this, IsNumber(byte_length)); + TNode<Number> byte_length = SmiMul(length, element_size); SetupTypedArray(holder, length, byte_offset, byte_length); AttachBuffer(holder, buffer, fixed_typed_map, length, byte_offset); @@ -213,18 +200,17 @@ TF_BUILTIN(TypedArrayInitializeWithBuffer, TypedArrayBuiltinsAssembler) { } TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) { - Node* holder = Parameter(Descriptor::kHolder); - Node* length = Parameter(Descriptor::kLength); - Node* element_size = Parameter(Descriptor::kElementSize); + TNode<JSTypedArray> holder = CAST(Parameter(Descriptor::kHolder)); + TNode<Smi> length = CAST(Parameter(Descriptor::kLength)); + TNode<Smi> element_size = CAST(Parameter(Descriptor::kElementSize)); Node* initialize = Parameter(Descriptor::kInitialize); - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); - CSA_ASSERT(this, IsJSTypedArray(holder)); CSA_ASSERT(this, TaggedIsPositiveSmi(length)); CSA_ASSERT(this, TaggedIsPositiveSmi(element_size)); CSA_ASSERT(this, IsBoolean(initialize)); - Node* byte_offset = SmiConstant(0); + TNode<Smi> byte_offset = SmiConstant(0); static const int32_t fta_base_data_offset = FixedTypedArrayBase::kDataOffset - kHeapObjectTag; @@ -235,16 +221,16 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) { VARIABLE(var_total_size, MachineType::PointerRepresentation()); // SmiMul returns a heap number in case of Smi overflow. - Node* byte_length = SmiMul(length, element_size); - CSA_ASSERT(this, IsNumber(byte_length)); + TNode<Number> byte_length = SmiMul(length, element_size); SetupTypedArray(holder, length, byte_offset, byte_length); - Node* fixed_typed_map = LoadMapForType(holder); + TNode<Map> fixed_typed_map = LoadMapForType(holder); GotoIf(TaggedIsNotSmi(byte_length), &allocate_off_heap); GotoIf( SmiGreaterThan(byte_length, SmiConstant(V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP)), &allocate_off_heap); + TNode<IntPtrT> word_byte_length = SmiToWord(CAST(byte_length)); Goto(&allocate_on_heap); BIND(&allocate_on_heap); @@ -297,7 +283,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) { DCHECK_EQ(0, FixedTypedArrayBase::kHeaderSize & kObjectAlignmentMask); Node* aligned_header_size = IntPtrConstant(FixedTypedArrayBase::kHeaderSize + kObjectAlignmentMask); - Node* size = IntPtrAdd(SmiToWord(byte_length), aligned_header_size); + Node* size = IntPtrAdd(word_byte_length, aligned_header_size); var_total_size.Bind(WordAnd(size, IntPtrConstant(~kObjectAlignmentMask))); Goto(&allocate_elements); } @@ -305,7 +291,7 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) { BIND(&aligned); { Node* header_size = IntPtrConstant(FixedTypedArrayBase::kHeaderSize); - var_total_size.Bind(IntPtrAdd(SmiToWord(byte_length), header_size)); + var_total_size.Bind(IntPtrAdd(word_byte_length, header_size)); Goto(&allocate_elements); } @@ -344,11 +330,11 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) { ExternalConstant(ExternalReference::libc_memset_function(isolate())); CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(), MachineType::IntPtr(), MachineType::UintPtr(), memset, - backing_store, IntPtrConstant(0), SmiToWord(byte_length)); + backing_store, IntPtrConstant(0), word_byte_length); Goto(&done); } - VARIABLE(var_buffer, MachineRepresentation::kTagged); + TVARIABLE(JSArrayBuffer, var_buffer); BIND(&allocate_off_heap); { @@ -356,8 +342,8 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) { Node* buffer_constructor = LoadContextElement( LoadNativeContext(context), Context::ARRAY_BUFFER_FUN_INDEX); - var_buffer.Bind(ConstructJS(CodeFactory::Construct(isolate()), context, - buffer_constructor, byte_length)); + var_buffer = CAST(ConstructJS(CodeFactory::Construct(isolate()), context, + buffer_constructor, byte_length)); Goto(&attach_buffer); } @@ -365,16 +351,15 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) { { Node* buffer_constructor_noinit = LoadContextElement( LoadNativeContext(context), Context::ARRAY_BUFFER_NOINIT_FUN_INDEX); - var_buffer.Bind(CallJS(CodeFactory::Call(isolate()), context, - buffer_constructor_noinit, UndefinedConstant(), - byte_length)); + var_buffer = CAST(CallJS(CodeFactory::Call(isolate()), context, + buffer_constructor_noinit, UndefinedConstant(), + byte_length)); Goto(&attach_buffer); } BIND(&attach_buffer); { - AttachBuffer(holder, var_buffer.value(), fixed_typed_map, length, - byte_offset); + AttachBuffer(holder, var_buffer, fixed_typed_map, length, byte_offset); Goto(&done); } @@ -385,18 +370,18 @@ TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) { // ES6 #sec-typedarray-length TF_BUILTIN(TypedArrayConstructByLength, TypedArrayBuiltinsAssembler) { Node* holder = Parameter(Descriptor::kHolder); - Node* length = Parameter(Descriptor::kLength); - Node* element_size = Parameter(Descriptor::kElementSize); - Node* context = Parameter(Descriptor::kContext); + TNode<Object> maybe_length = CAST(Parameter(Descriptor::kLength)); + TNode<Object> element_size = CAST(Parameter(Descriptor::kElementSize)); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); CSA_ASSERT(this, IsJSTypedArray(holder)); CSA_ASSERT(this, TaggedIsPositiveSmi(element_size)); - Node* initialize = TrueConstant(); - Label invalid_length(this); - length = ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero); + TNode<Number> length = ToInteger_Inline( + context, maybe_length, CodeStubAssembler::kTruncateMinusZero); + // The maximum length of a TypedArray is MaxSmi(). // Note: this is not per spec, but rather a constraint of our current // representation (which uses smi's). @@ -404,7 +389,7 @@ TF_BUILTIN(TypedArrayConstructByLength, TypedArrayBuiltinsAssembler) { GotoIf(SmiLessThan(length, SmiConstant(0)), &invalid_length); CallBuiltin(Builtins::kTypedArrayInitialize, context, holder, length, - element_size, initialize); + element_size, TrueConstant()); Return(UndefinedConstant()); BIND(&invalid_length); @@ -419,10 +404,10 @@ TF_BUILTIN(TypedArrayConstructByLength, TypedArrayBuiltinsAssembler) { TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) { Node* holder = Parameter(Descriptor::kHolder); Node* buffer = Parameter(Descriptor::kBuffer); - Node* byte_offset = Parameter(Descriptor::kByteOffset); + TNode<Object> byte_offset = CAST(Parameter(Descriptor::kByteOffset)); Node* length = Parameter(Descriptor::kLength); Node* element_size = Parameter(Descriptor::kElementSize); - Node* context = Parameter(Descriptor::kContext); + TNode<Context> context = CAST(Parameter(Descriptor::kContext)); CSA_ASSERT(this, IsJSTypedArray(holder)); CSA_ASSERT(this, IsJSArrayBuffer(buffer)); @@ -440,8 +425,8 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) { GotoIf(IsUndefined(byte_offset), &check_length); - offset.Bind( - ToInteger(context, byte_offset, CodeStubAssembler::kTruncateMinusZero)); + offset.Bind(ToInteger_Inline(context, byte_offset, + CodeStubAssembler::kTruncateMinusZero)); Branch(TaggedIsSmi(offset.value()), &offset_is_smi, &offset_not_smi); // Check that the offset is a multiple of the element size. @@ -569,25 +554,27 @@ Node* TypedArrayBuiltinsAssembler::LoadDataPtr(Node* typed_array) { return IntPtrAdd(base_pointer, external_pointer); } -Node* TypedArrayBuiltinsAssembler::ByteLengthIsValid(Node* byte_length) { +TNode<BoolT> TypedArrayBuiltinsAssembler::ByteLengthIsValid( + TNode<Number> byte_length) { Label smi(this), done(this); - VARIABLE(is_valid, MachineRepresentation::kWord32); + TVARIABLE(BoolT, is_valid); GotoIf(TaggedIsSmi(byte_length), &smi); - CSA_ASSERT(this, IsHeapNumber(byte_length)); - Node* float_value = LoadHeapNumberValue(byte_length); - Node* max_byte_length_double = + TNode<Float64T> float_value = LoadHeapNumberValue(CAST(byte_length)); + TNode<Float64T> max_byte_length_double = Float64Constant(FixedTypedArrayBase::kMaxByteLength); - is_valid.Bind(Float64LessThanOrEqual(float_value, max_byte_length_double)); + is_valid = Float64LessThanOrEqual(float_value, max_byte_length_double); Goto(&done); BIND(&smi); - Node* max_byte_length = IntPtrConstant(FixedTypedArrayBase::kMaxByteLength); - is_valid.Bind(UintPtrLessThanOrEqual(SmiUntag(byte_length), max_byte_length)); + TNode<IntPtrT> max_byte_length = + IntPtrConstant(FixedTypedArrayBase::kMaxByteLength); + is_valid = + UintPtrLessThanOrEqual(SmiUntag(CAST(byte_length)), max_byte_length); Goto(&done); BIND(&done); - return is_valid.value(); + return is_valid; } TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) { @@ -611,8 +598,8 @@ TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) { Return(UndefinedConstant()); BIND(&fill); - Node* holder_kind = LoadMapElementsKind(LoadMap(holder)); - Node* source_kind = LoadMapElementsKind(LoadMap(array_like)); + TNode<Int32T> holder_kind = LoadMapElementsKind(LoadMap(holder)); + TNode<Int32T> source_kind = LoadMapElementsKind(LoadMap(array_like)); GotoIf(Word32Equal(holder_kind, source_kind), &fast_copy); // Copy using the elements accessor. @@ -632,9 +619,10 @@ TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) { array_like, JSTypedArray::kBufferOffset)), Int32Constant(0))); - Node* byte_length = SmiMul(length, element_size); + TNode<Number> byte_length = SmiMul(length, element_size); CSA_ASSERT(this, ByteLengthIsValid(byte_length)); - Node* byte_length_intptr = ChangeNonnegativeNumberToUintPtr(byte_length); + TNode<UintPtrT> byte_length_intptr = + ChangeNonnegativeNumberToUintPtr(byte_length); CSA_ASSERT(this, UintPtrLessThanOrEqual( byte_length_intptr, IntPtrConstant(FixedTypedArrayBase::kMaxByteLength))); @@ -831,24 +819,9 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource( BIND(&fast_c_call); { - // Overlapping backing stores of different element kinds are handled in - // runtime. We're a bit conservative here and bail to runtime if ranges - // overlap and element kinds differ. - - TNode<IntPtrT> target_byte_length = - IntPtrMul(target_length, target_el_size); CSA_ASSERT( - this, UintPtrGreaterThanOrEqual(target_byte_length, IntPtrConstant(0))); - - TNode<IntPtrT> target_data_end_ptr = - IntPtrAdd(target_data_ptr, target_byte_length); - TNode<IntPtrT> source_data_end_ptr = - IntPtrAdd(source_data_ptr, source_byte_length); - - GotoIfNot( - Word32Or(UintPtrLessThanOrEqual(target_data_end_ptr, source_data_ptr), - UintPtrLessThanOrEqual(source_data_end_ptr, target_data_ptr)), - call_runtime); + this, UintPtrGreaterThanOrEqual( + IntPtrMul(target_length, target_el_size), IntPtrConstant(0))); TNode<IntPtrT> source_length = LoadAndUntagObjectField(source, JSTypedArray::kLengthOffset); @@ -959,8 +932,8 @@ TF_BUILTIN(TypedArrayPrototypeSet, TypedArrayBuiltinsAssembler) { // Normalize offset argument (using ToInteger) and handle heap number cases. TNode<Object> offset = args.GetOptionalArgumentValue(1, SmiConstant(0)); - TNode<Number> offset_num = ToInteger(context, offset, kTruncateMinusZero); - CSA_ASSERT(this, IsNumberNormalized(offset_num)); + TNode<Number> offset_num = + ToInteger_Inline(context, offset, kTruncateMinusZero); // Since ToInteger always returns a Smi if the given value is within Smi // range, and the only corner case of -0.0 has already been truncated to 0.0, diff --git a/deps/v8/src/builtins/builtins-utils-gen.h b/deps/v8/src/builtins/builtins-utils-gen.h index f328268288..6af5eff357 100644 --- a/deps/v8/src/builtins/builtins-utils-gen.h +++ b/deps/v8/src/builtins/builtins-utils-gen.h @@ -47,6 +47,9 @@ class CodeAssemblerState; void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \ Name##Assembler assembler(state); \ state->SetInitialDebugInformation(#Name, __FILE__, __LINE__); \ + if (Builtins::KindOf(Builtins::k##Name) == Builtins::TFJ) { \ + assembler.PerformStackCheck(assembler.GetJSContextParameter()); \ + } \ assembler.Generate##Name##Impl(); \ } \ void Name##Assembler::Generate##Name##Impl() diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h index 27199c8462..3493e776b6 100644 --- a/deps/v8/src/builtins/builtins-utils.h +++ b/deps/v8/src/builtins/builtins-utils.h @@ -85,7 +85,8 @@ class BuiltinArguments : public Arguments { V8_NOINLINE static Object* Builtin_Impl_Stats_##name( \ int args_length, Object** args_object, Isolate* isolate) { \ BuiltinArguments args(args_length, args_object); \ - RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Builtin_##name); \ + RuntimeCallTimerScope timer(isolate, \ + RuntimeCallCounterId::kBuiltin_##name); \ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \ "V8.Builtin_" #name); \ return Builtin_Impl_##name(args, isolate); \ diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc index 55fc1c8cd8..dc175e50b7 100644 --- a/deps/v8/src/builtins/builtins.cc +++ b/deps/v8/src/builtins/builtins.cc @@ -172,11 +172,25 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) { #undef CASE_OTHER case kArrayFilterLoopEagerDeoptContinuation: case kArrayFilterLoopLazyDeoptContinuation: + case kArrayEveryLoopEagerDeoptContinuation: + case kArrayEveryLoopLazyDeoptContinuation: + case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation: + case kArrayFindIndexLoopEagerDeoptContinuation: + case kArrayFindIndexLoopLazyDeoptContinuation: + case kArrayFindLoopAfterCallbackLazyDeoptContinuation: + case kArrayFindLoopEagerDeoptContinuation: + case kArrayFindLoopLazyDeoptContinuation: case kArrayForEach: case kArrayForEachLoopEagerDeoptContinuation: case kArrayForEachLoopLazyDeoptContinuation: case kArrayMapLoopEagerDeoptContinuation: case kArrayMapLoopLazyDeoptContinuation: + case kArrayReduceLoopEagerDeoptContinuation: + case kArrayReduceLoopLazyDeoptContinuation: + case kArrayReduceRightLoopEagerDeoptContinuation: + case kArrayReduceRightLoopLazyDeoptContinuation: + case kArraySomeLoopEagerDeoptContinuation: + case kArraySomeLoopLazyDeoptContinuation: case kConsoleAssert: return Callable(code, BuiltinDescriptor(isolate)); default: @@ -213,12 +227,30 @@ bool Builtins::IsLazy(int index) { // TODO(wasm): Remove wasm builtins once immovability is no longer required. switch (index) { case kAbort: // Required by wasm. + case kArrayFindLoopEagerDeoptContinuation: // https://crbug.com/v8/6786. + case kArrayFindLoopLazyDeoptContinuation: // https://crbug.com/v8/6786. + // https://crbug.com/v8/6786. + case kArrayFindLoopAfterCallbackLazyDeoptContinuation: + // https://crbug.com/v8/6786. + case kArrayFindIndexLoopEagerDeoptContinuation: + // https://crbug.com/v8/6786. + case kArrayFindIndexLoopLazyDeoptContinuation: + // https://crbug.com/v8/6786. + case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation: case kArrayForEachLoopEagerDeoptContinuation: // https://crbug.com/v8/6786. case kArrayForEachLoopLazyDeoptContinuation: // https://crbug.com/v8/6786. case kArrayMapLoopEagerDeoptContinuation: // https://crbug.com/v8/6786. case kArrayMapLoopLazyDeoptContinuation: // https://crbug.com/v8/6786. + case kArrayEveryLoopEagerDeoptContinuation: // https://crbug.com/v8/6786. + case kArrayEveryLoopLazyDeoptContinuation: // https://crbug.com/v8/6786. case kArrayFilterLoopEagerDeoptContinuation: // https://crbug.com/v8/6786. case kArrayFilterLoopLazyDeoptContinuation: // https://crbug.com/v8/6786. + case kArrayReduceLoopEagerDeoptContinuation: // https://crbug.com/v8/6786. + case kArrayReduceLoopLazyDeoptContinuation: // https://crbug.com/v8/6786. + case kArrayReduceRightLoopEagerDeoptContinuation: + case kArrayReduceRightLoopLazyDeoptContinuation: + case kArraySomeLoopEagerDeoptContinuation: // https://crbug.com/v8/6786. + case kArraySomeLoopLazyDeoptContinuation: // https://crbug.com/v8/6786. case kCheckOptimizationMarker: case kCompileLazy: case kDeserializeLazy: diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index 7635bada49..368e6670c1 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -211,13 +211,15 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, __ Push(esi); __ Push(ecx); __ Push(edi); + __ PushRoot(Heap::kTheHoleValueRootIndex); __ Push(edx); // ----------- S t a t e ------------- // -- sp[0*kPointerSize]: new target - // -- edi and sp[1*kPointerSize]: constructor function - // -- sp[2*kPointerSize]: argument count - // -- sp[3*kPointerSize]: context + // -- sp[1*kPointerSize]: padding + // -- edi and sp[2*kPointerSize]: constructor function + // -- sp[3*kPointerSize]: argument count + // -- sp[4*kPointerSize]: context // ----------------------------------- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); @@ -237,10 +239,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // ----------- S t a t e ------------- // -- eax: implicit receiver - // -- Slot 3 / sp[0*kPointerSize]: new target - // -- Slot 2 / sp[1*kPointerSize]: constructor function - // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged) - // -- Slot 0 / sp[3*kPointerSize]: context + // -- Slot 4 / sp[0*kPointerSize]: new target + // -- Slot 3 / sp[1*kPointerSize]: padding + // -- Slot 2 / sp[2*kPointerSize]: constructor function + // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged) + // -- Slot 0 / sp[4*kPointerSize]: context // ----------------------------------- // Deoptimizer enters here. masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset( @@ -260,9 +263,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // -- edx: new target // -- sp[0*kPointerSize]: implicit receiver // -- sp[1*kPointerSize]: implicit receiver - // -- sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments (tagged) - // -- sp[4*kPointerSize]: context + // -- sp[2*kPointerSize]: padding + // -- sp[3*kPointerSize]: constructor function + // -- sp[4*kPointerSize]: number of arguments (tagged) + // -- sp[5*kPointerSize]: context // ----------------------------------- // Restore constructor function and argument count. @@ -283,9 +287,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // -- ecx: counter (tagged) // -- sp[0*kPointerSize]: implicit receiver // -- sp[1*kPointerSize]: implicit receiver - // -- edi and sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments (tagged) - // -- sp[4*kPointerSize]: context + // -- sp[2*kPointerSize]: padding + // -- edi and sp[3*kPointerSize]: constructor function + // -- sp[4*kPointerSize]: number of arguments (tagged) + // -- sp[5*kPointerSize]: context // ----------------------------------- __ jmp(&entry, Label::kNear); __ bind(&loop); @@ -301,9 +306,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // ----------- S t a t e ------------- // -- eax: constructor result // -- sp[0*kPointerSize]: implicit receiver - // -- sp[1*kPointerSize]: constructor function - // -- sp[2*kPointerSize]: number of arguments - // -- sp[3*kPointerSize]: context + // -- sp[1*kPointerSize]: padding + // -- sp[2*kPointerSize]: constructor function + // -- sp[3*kPointerSize]: number of arguments + // -- sp[4*kPointerSize]: context // ----------------------------------- // Store offset of return address for deoptimizer. @@ -572,7 +578,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset)); __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx); - __ Assert(equal, kMissingBytecodeArray); + __ Assert(equal, AbortReason::kMissingBytecodeArray); } // Resume (Ignition/TurboFan) generator object. @@ -694,6 +700,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, __ j(equal, &fallthrough); TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, OptimizationMarker::kCompileOptimized, Runtime::kCompileOptimized_NotConcurrent); TailCallRuntimeIfMarkerEquals( @@ -708,7 +717,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, __ cmp( optimized_code_entry, Immediate(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); - __ Assert(equal, kExpectedOptimizationSentinel); + __ Assert(equal, AbortReason::kExpectedOptimizationSentinel); } __ jmp(&fallthrough); } @@ -791,7 +800,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array, __ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0)); __ add(bytecode_size_table, Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); - __ jmp(&load_size, Label::kNear); // Load the size of the current bytecode. __ bind(&load_size); @@ -852,7 +860,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ AssertNotSmi(kInterpreterBytecodeArrayRegister); __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE, eax); - __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ Assert( + equal, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); } // Reset code age. @@ -1239,7 +1249,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ AssertNotSmi(kInterpreterBytecodeArrayRegister); __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE, ebx); - __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ Assert( + equal, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); } // Get the target bytecode offset from the frame. @@ -1300,7 +1312,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) { // The feedback vector must be defined. if (FLAG_debug_code) { __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex); - __ Assert(not_equal, BailoutReason::kExpectedFeedbackVector); + __ Assert(not_equal, AbortReason::kExpectedFeedbackVector); } // Is there an optimization marker or optimized code in the feedback vector? @@ -1818,9 +1830,11 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); // Will both indicate a nullptr and a Smi. __ test(ebx, Immediate(kSmiTagMask)); - __ Assert(not_zero, kUnexpectedInitialMapForInternalArrayFunction); + __ Assert(not_zero, + AbortReason::kUnexpectedInitialMapForInternalArrayFunction); __ CmpObjectType(ebx, MAP_TYPE, ecx); - __ Assert(equal, kUnexpectedInitialMapForInternalArrayFunction); + __ Assert(equal, + AbortReason::kUnexpectedInitialMapForInternalArrayFunction); } // Run the native code for the InternalArray function called as a normal @@ -1847,9 +1861,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) { __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); // Will both indicate a nullptr and a Smi. __ test(ebx, Immediate(kSmiTagMask)); - __ Assert(not_zero, kUnexpectedInitialMapForArrayFunction); + __ Assert(not_zero, AbortReason::kUnexpectedInitialMapForArrayFunction); __ CmpObjectType(ebx, MAP_TYPE, ecx); - __ Assert(equal, kUnexpectedInitialMapForArrayFunction); + __ Assert(equal, AbortReason::kUnexpectedInitialMapForArrayFunction); } // Run the native code for the Array function called as a normal function. @@ -1875,6 +1889,8 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { STATIC_ASSERT(kSmiTagSize == 1); __ lea(edi, Operand(eax, eax, times_1, kSmiTag)); __ push(edi); + + __ Push(Immediate(0)); // Padding. } static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { @@ -1980,7 +1996,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear); __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); __ test_b(FieldOperand(ebx, Map::kBitFieldOffset), - Immediate(1 << Map::kIsConstructor)); + Immediate(Map::IsConstructorBit::kMask)); __ j(not_zero, &new_target_constructor, Label::kNear); __ bind(&new_target_not_constructor); { @@ -2294,7 +2310,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // Check if target is a proxy and call CallProxy external builtin __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), - Immediate(1 << Map::kIsCallable)); + Immediate(Map::IsCallableBit::kMask)); __ j(zero, &non_callable); // Call CallProxy external builtin @@ -2389,7 +2405,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { // Check if target has a [[Construct]] internal method. __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), - Immediate(1 << Map::kIsConstructor)); + Immediate(Map::IsConstructorBit::kMask)); __ j(zero, &non_constructor, Label::kNear); // Only dispatch to bound functions after checking whether they are @@ -2464,19 +2480,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) { __ TailCallRuntime(Runtime::kAbort); } -// static -void Builtins::Generate_AbortJS(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- edx : message as String object - // -- esp[0] : return address - // ----------------------------------- - __ PopReturnAddressTo(ecx); - __ Push(edx); - __ PushReturnAddressFrom(ecx); - __ Move(esi, Smi::kZero); - __ TailCallRuntime(Runtime::kAbortJS); -} - void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- eax : actual number of arguments diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc index 167bc1b829..7af02bb32e 100644 --- a/deps/v8/src/builtins/mips/builtins-mips.cc +++ b/deps/v8/src/builtins/mips/builtins-mips.cc @@ -110,11 +110,11 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { // Initial map for the builtin InternalArray functions should be maps. __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); __ SmiTst(a2, t0); - __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, t0, - Operand(zero_reg)); + __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction, + t0, Operand(zero_reg)); __ GetObjectType(a2, a3, t0); - __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction, t0, - Operand(MAP_TYPE)); + __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction, + t0, Operand(MAP_TYPE)); } // Run the native code for the InternalArray function called as a normal @@ -139,10 +139,10 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) { // Initial map for the builtin Array functions should be maps. __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); __ SmiTst(a2, t0); - __ Assert(ne, kUnexpectedInitialMapForArrayFunction1, t0, + __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction1, t0, Operand(zero_reg)); __ GetObjectType(a2, a3, t0); - __ Assert(eq, kUnexpectedInitialMapForArrayFunction2, t0, + __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction2, t0, Operand(MAP_TYPE)); } @@ -271,13 +271,16 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // Preserve the incoming parameters on the stack. __ SmiTag(a0); - __ Push(cp, a0, a1, a3); + __ Push(cp, a0, a1); + __ PushRoot(Heap::kTheHoleValueRootIndex); + __ Push(a3); // ----------- S t a t e ------------- // -- sp[0*kPointerSize]: new target - // -- a1 and sp[1*kPointerSize]: constructor function - // -- sp[2*kPointerSize]: number of arguments (tagged) - // -- sp[3*kPointerSize]: context + // -- sp[1*kPointerSize]: padding + // -- a1 and sp[2*kPointerSize]: constructor function + // -- sp[3*kPointerSize]: number of arguments (tagged) + // -- sp[4*kPointerSize]: context // ----------------------------------- __ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); @@ -298,10 +301,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // ----------- S t a t e ------------- // -- v0: receiver - // -- Slot 3 / sp[0*kPointerSize]: new target - // -- Slot 2 / sp[1*kPointerSize]: constructor function - // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged) - // -- Slot 0 / sp[3*kPointerSize]: context + // -- Slot 4 / sp[0*kPointerSize]: new target + // -- Slot 3 / sp[1*kPointerSize]: padding + // -- Slot 2 / sp[2*kPointerSize]: constructor function + // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged) + // -- Slot 0 / sp[4*kPointerSize]: context // ----------------------------------- // Deoptimizer enters here. masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset( @@ -319,9 +323,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // -- r3: new target // -- sp[0*kPointerSize]: implicit receiver // -- sp[1*kPointerSize]: implicit receiver - // -- sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments (tagged) - // -- sp[4*kPointerSize]: context + // -- sp[2*kPointerSize]: padding + // -- sp[3*kPointerSize]: constructor function + // -- sp[4*kPointerSize]: number of arguments (tagged) + // -- sp[5*kPointerSize]: context // ----------------------------------- // Restore constructor function and argument count. @@ -342,9 +347,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // -- t3: counter // -- sp[0*kPointerSize]: implicit receiver // -- sp[1*kPointerSize]: implicit receiver - // -- a1 and sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments (tagged) - // -- sp[4*kPointerSize]: context + // -- sp[2*kPointerSize]: padding + // -- a1 and sp[3*kPointerSize]: constructor function + // -- sp[4*kPointerSize]: number of arguments (tagged) + // -- sp[5*kPointerSize]: context // ----------------------------------- __ jmp(&entry); __ bind(&loop); @@ -362,9 +368,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // ----------- S t a t e ------------- // -- v0: constructor result // -- sp[0*kPointerSize]: implicit receiver - // -- sp[1*kPointerSize]: constructor function - // -- sp[2*kPointerSize]: number of arguments - // -- sp[3*kPointerSize]: context + // -- sp[1*kPointerSize]: padding + // -- sp[2*kPointerSize]: constructor function + // -- sp[3*kPointerSize]: number of arguments + // -- sp[4*kPointerSize]: context // ----------------------------------- // Store offset of return address for deoptimizer. @@ -635,7 +642,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset)); __ GetObjectType(a3, a3, a3); - __ Assert(eq, kMissingBytecodeArray, a3, Operand(BYTECODE_ARRAY_TYPE)); + __ Assert(eq, AbortReason::kMissingBytecodeArray, a3, + Operand(BYTECODE_ARRAY_TYPE)); } // Resume (Ignition/TurboFan) generator object. @@ -752,6 +760,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Operand(Smi::FromEnum(OptimizationMarker::kNone))); TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, OptimizationMarker::kCompileOptimized, Runtime::kCompileOptimized_NotConcurrent); TailCallRuntimeIfMarkerEquals( @@ -764,7 +775,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, // that an interrupt will eventually update the slot with optimized code. if (FLAG_debug_code) { __ Assert( - eq, kExpectedOptimizationSentinel, optimized_code_entry, + eq, AbortReason::kExpectedOptimizationSentinel, + optimized_code_entry, Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); } __ jmp(&fallthrough); @@ -843,7 +855,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array, __ lbu(bytecode, MemOperand(scratch2)); __ Addu(bytecode_size_table, bytecode_size_table, Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); - __ jmp(&load_size); // Load the size of the current bytecode. __ bind(&load_size); @@ -907,11 +918,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Check function data field is actually a BytecodeArray object. if (FLAG_debug_code) { __ SmiTst(kInterpreterBytecodeArrayRegister, t0); - __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, t0, - Operand(zero_reg)); + __ Assert(ne, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, + t0, Operand(zero_reg)); __ GetObjectType(kInterpreterBytecodeArrayRegister, t0, t0); - __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, t0, - Operand(BYTECODE_ARRAY_TYPE)); + __ Assert(eq, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, + t0, Operand(BYTECODE_ARRAY_TYPE)); } // Reset code age. @@ -1189,11 +1202,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { if (FLAG_debug_code) { // Check function data field is actually a BytecodeArray object. __ SmiTst(kInterpreterBytecodeArrayRegister, at); - __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, at, - Operand(zero_reg)); + __ Assert(ne, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, + at, Operand(zero_reg)); __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1); - __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a1, - Operand(BYTECODE_ARRAY_TYPE)); + __ Assert(eq, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, + a1, Operand(BYTECODE_ARRAY_TYPE)); } // Get the target bytecode offset from the frame. @@ -1257,7 +1272,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) { // The feedback vector must be defined. if (FLAG_debug_code) { __ LoadRoot(at, Heap::kUndefinedValueRootIndex); - __ Assert(ne, BailoutReason::kExpectedFeedbackVector, feedback_vector, + __ Assert(ne, AbortReason::kExpectedFeedbackVector, feedback_vector, Operand(at)); } @@ -1804,8 +1819,9 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { __ sll(a0, a0, kSmiTagSize); __ li(t0, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); __ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit()); - __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp + - kPointerSize)); + __ Push(Smi::kZero); // Padding. + __ Addu(fp, sp, + Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp)); } static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { @@ -1814,8 +1830,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { // ----------------------------------- // Get the number of arguments passed (as a smi), tear down the frame and // then tear down the parameters. - __ lw(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp + - kPointerSize))); + __ lw(a1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ mov(sp, fp); __ MultiPop(fp.bit() | ra.bit()); __ Lsa(sp, sp, a1, kPointerSizeLog2 - kSmiTagSize); @@ -1891,7 +1906,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ JumpIfSmi(a3, &new_target_not_constructor); __ lw(t1, FieldMemOperand(a3, HeapObject::kMapOffset)); __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); - __ And(t1, t1, Operand(1 << Map::kIsConstructor)); + __ And(t1, t1, Operand(Map::IsConstructorBit::kMask)); __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg)); __ bind(&new_target_not_constructor); { @@ -2165,7 +2180,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // Check if target has a [[Call]] internal method. __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); - __ And(t1, t1, Operand(1 << Map::kIsCallable)); + __ And(t1, t1, Operand(Map::IsCallableBit::kMask)); __ Branch(&non_callable, eq, t1, Operand(zero_reg)); // Check if target is a proxy and call CallProxy external builtin @@ -2321,7 +2336,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { // Check if target has a [[Construct]] internal method. __ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset)); - __ And(t3, t3, Operand(1 << Map::kIsConstructor)); + __ And(t3, t3, Operand(Map::IsConstructorBit::kMask)); __ Branch(&non_constructor, eq, t3, Operand(zero_reg)); // Only dispatch to bound functions after checking whether they are @@ -2389,17 +2404,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) { __ TailCallRuntime(Runtime::kAbort); } -// static -void Builtins::Generate_AbortJS(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0 : message as String object - // -- ra : return address - // ----------------------------------- - __ Push(a0); - __ Move(cp, Smi::kZero); - __ TailCallRuntime(Runtime::kAbortJS); -} - void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // State setup as expected by MacroAssembler::InvokePrologue. // ----------- S t a t e ------------- @@ -2489,8 +2493,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ sll(t2, a2, kPointerSizeLog2); __ Subu(t1, fp, Operand(t2)); // Adjust for frame. - __ Subu(t1, t1, Operand(StandardFrameConstants::kFixedFrameSizeFromFp + - 2 * kPointerSize)); + __ Subu(t1, t1, + Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp + + kPointerSize)); Label fill; __ bind(&fill); diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index 811ae637ad..266393070c 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -110,11 +110,11 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { // Initial map for the builtin InternalArray functions should be maps. __ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); __ SmiTst(a2, a4); - __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, a4, - Operand(zero_reg)); + __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction, + a4, Operand(zero_reg)); __ GetObjectType(a2, a3, a4); - __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction, a4, - Operand(MAP_TYPE)); + __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction, + a4, Operand(MAP_TYPE)); } // Run the native code for the InternalArray function called as a normal @@ -139,10 +139,10 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) { // Initial map for the builtin Array functions should be maps. __ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); __ SmiTst(a2, a4); - __ Assert(ne, kUnexpectedInitialMapForArrayFunction1, a4, + __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction1, a4, Operand(zero_reg)); __ GetObjectType(a2, a3, a4); - __ Assert(eq, kUnexpectedInitialMapForArrayFunction2, a4, + __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction2, a4, Operand(MAP_TYPE)); } @@ -273,13 +273,16 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // Preserve the incoming parameters on the stack. __ SmiTag(a0); - __ Push(cp, a0, a1, a3); + __ Push(cp, a0, a1); + __ PushRoot(Heap::kTheHoleValueRootIndex); + __ Push(a3); // ----------- S t a t e ------------- // -- sp[0*kPointerSize]: new target - // -- a1 and sp[1*kPointerSize]: constructor function - // -- sp[2*kPointerSize]: number of arguments (tagged) - // -- sp[3*kPointerSize]: context + // -- sp[1*kPointerSize]: padding + // -- a1 and sp[2*kPointerSize]: constructor function + // -- sp[3*kPointerSize]: number of arguments (tagged) + // -- sp[4*kPointerSize]: context // ----------------------------------- __ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); @@ -300,10 +303,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // ----------- S t a t e ------------- // -- v0: receiver - // -- Slot 3 / sp[0*kPointerSize]: new target - // -- Slot 2 / sp[1*kPointerSize]: constructor function - // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged) - // -- Slot 0 / sp[3*kPointerSize]: context + // -- Slot 4 / sp[0*kPointerSize]: new target + // -- Slot 3 / sp[1*kPointerSize]: padding + // -- Slot 2 / sp[2*kPointerSize]: constructor function + // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged) + // -- Slot 0 / sp[4*kPointerSize]: context // ----------------------------------- // Deoptimizer enters here. masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset( @@ -321,9 +325,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // -- r3: new target // -- sp[0*kPointerSize]: implicit receiver // -- sp[1*kPointerSize]: implicit receiver - // -- sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments (tagged) - // -- sp[4*kPointerSize]: context + // -- sp[2*kPointerSize]: padding + // -- sp[3*kPointerSize]: constructor function + // -- sp[4*kPointerSize]: number of arguments (tagged) + // -- sp[5*kPointerSize]: context // ----------------------------------- // Restore constructor function and argument count. @@ -344,9 +349,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // -- t3: counter // -- sp[0*kPointerSize]: implicit receiver // -- sp[1*kPointerSize]: implicit receiver - // -- a1 and sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments (tagged) - // -- sp[4*kPointerSize]: context + // -- sp[2*kPointerSize]: padding + // -- a1 and sp[3*kPointerSize]: constructor function + // -- sp[4*kPointerSize]: number of arguments (tagged) + // -- sp[5*kPointerSize]: context // ----------------------------------- __ jmp(&entry); __ bind(&loop); @@ -364,9 +370,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // ----------- S t a t e ------------- // -- v0: constructor result // -- sp[0*kPointerSize]: implicit receiver - // -- sp[1*kPointerSize]: constructor function - // -- sp[2*kPointerSize]: number of arguments - // -- sp[3*kPointerSize]: context + // -- sp[1*kPointerSize]: padding + // -- sp[2*kPointerSize]: constructor function + // -- sp[3*kPointerSize]: number of arguments + // -- sp[4*kPointerSize]: context // ----------------------------------- // Store offset of return address for deoptimizer. @@ -526,7 +533,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); __ Ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset)); __ GetObjectType(a3, a3, a3); - __ Assert(eq, kMissingBytecodeArray, a3, Operand(BYTECODE_ARRAY_TYPE)); + __ Assert(eq, AbortReason::kMissingBytecodeArray, a3, + Operand(BYTECODE_ARRAY_TYPE)); } // Resume (Ignition/TurboFan) generator object. @@ -752,6 +760,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, Operand(Smi::FromEnum(OptimizationMarker::kNone))); TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, OptimizationMarker::kCompileOptimized, Runtime::kCompileOptimized_NotConcurrent); TailCallRuntimeIfMarkerEquals( @@ -764,7 +775,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, // that an interrupt will eventually update the slot with optimized code. if (FLAG_debug_code) { __ Assert( - eq, kExpectedOptimizationSentinel, optimized_code_entry, + eq, AbortReason::kExpectedOptimizationSentinel, + optimized_code_entry, Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue))); } __ jmp(&fallthrough); @@ -843,7 +855,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array, __ Lbu(bytecode, MemOperand(scratch2)); __ Daddu(bytecode_size_table, bytecode_size_table, Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); - __ jmp(&load_size); // Load the size of the current bytecode. __ bind(&load_size); @@ -907,11 +918,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Check function data field is actually a BytecodeArray object. if (FLAG_debug_code) { __ SmiTst(kInterpreterBytecodeArrayRegister, a4); - __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a4, - Operand(zero_reg)); + __ Assert(ne, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, + a4, Operand(zero_reg)); __ GetObjectType(kInterpreterBytecodeArrayRegister, a4, a4); - __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a4, - Operand(BYTECODE_ARRAY_TYPE)); + __ Assert(eq, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, + a4, Operand(BYTECODE_ARRAY_TYPE)); } // Reset code age. @@ -1189,11 +1202,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { if (FLAG_debug_code) { // Check function data field is actually a BytecodeArray object. __ SmiTst(kInterpreterBytecodeArrayRegister, at); - __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, at, - Operand(zero_reg)); + __ Assert(ne, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, + at, Operand(zero_reg)); __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1); - __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a1, - Operand(BYTECODE_ARRAY_TYPE)); + __ Assert(eq, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, + a1, Operand(BYTECODE_ARRAY_TYPE)); } // Get the target bytecode offset from the frame. @@ -1257,7 +1272,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) { // The feedback vector must be defined. if (FLAG_debug_code) { __ LoadRoot(at, Heap::kUndefinedValueRootIndex); - __ Assert(ne, BailoutReason::kExpectedFeedbackVector, feedback_vector, + __ Assert(ne, AbortReason::kExpectedFeedbackVector, feedback_vector, Operand(at)); } @@ -1820,8 +1835,9 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { __ dsll32(a0, a0, 0); __ li(a4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); __ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit()); - __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp + - kPointerSize)); + __ Push(Smi::kZero); // Padding. + __ Daddu(fp, sp, + Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp)); } static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { @@ -1830,8 +1846,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { // ----------------------------------- // Get the number of arguments passed (as a smi), tear down the frame and // then tear down the parameters. - __ Ld(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp + - kPointerSize))); + __ Ld(a1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ mov(sp, fp); __ MultiPop(fp.bit() | ra.bit()); __ SmiScale(a4, a1, kPointerSizeLog2); @@ -1915,7 +1930,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ JumpIfSmi(a3, &new_target_not_constructor); __ ld(t1, FieldMemOperand(a3, HeapObject::kMapOffset)); __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); - __ And(t1, t1, Operand(1 << Map::kIsConstructor)); + __ And(t1, t1, Operand(Map::IsConstructorBit::kMask)); __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg)); __ bind(&new_target_not_constructor); { @@ -2187,7 +2202,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // Check if target has a [[Call]] internal method. __ Lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); - __ And(t1, t1, Operand(1 << Map::kIsCallable)); + __ And(t1, t1, Operand(Map::IsCallableBit::kMask)); __ Branch(&non_callable, eq, t1, Operand(zero_reg)); __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE)); @@ -2340,7 +2355,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { // Check if target has a [[Construct]] internal method. __ Lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset)); - __ And(t3, t3, Operand(1 << Map::kIsConstructor)); + __ And(t3, t3, Operand(Map::IsConstructorBit::kMask)); __ Branch(&non_constructor, eq, t3, Operand(zero_reg)); // Only dispatch to bound functions after checking whether they are @@ -2408,17 +2423,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) { __ TailCallRuntime(Runtime::kAbort); } -// static -void Builtins::Generate_AbortJS(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0 : message as String object - // -- ra : return address - // ----------------------------------- - __ Push(a0); - __ Move(cp, Smi::kZero); - __ TailCallRuntime(Runtime::kAbortJS); -} - void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // State setup as expected by MacroAssembler::InvokePrologue. // ----------- S t a t e ------------- @@ -2510,8 +2514,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ dsll(a6, a2, kPointerSizeLog2); __ Dsubu(a4, fp, Operand(a6)); // Adjust for frame. - __ Dsubu(a4, a4, Operand(StandardFrameConstants::kFixedFrameSizeFromFp + - 2 * kPointerSize)); + __ Dsubu(a4, a4, + Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp + + kPointerSize)); Label fill; __ bind(&fill); diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index e0db87cc0c..34da70ff0f 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -109,9 +109,10 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { // Initial map for the builtin InternalArray functions should be maps. __ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset)); __ TestIfSmi(r5, r0); - __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, cr0); + __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction, + cr0); __ CompareObjectType(r5, r6, r7, MAP_TYPE); - __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction); + __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction); } // Run the native code for the InternalArray function called as a normal @@ -136,9 +137,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) { // Initial map for the builtin Array functions should be maps. __ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset)); __ TestIfSmi(r5, r0); - __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0); + __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0); __ CompareObjectType(r5, r6, r7, MAP_TYPE); - __ Assert(eq, kUnexpectedInitialMapForArrayFunction); + __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction); } __ mr(r6, r4); @@ -278,13 +279,16 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // Preserve the incoming parameters on the stack. __ SmiTag(r3); - __ Push(cp, r3, r4, r6); + __ Push(cp, r3, r4); + __ PushRoot(Heap::kUndefinedValueRootIndex); + __ Push(r6); // ----------- S t a t e ------------- // -- sp[0*kPointerSize]: new target - // -- r4 and sp[1*kPointerSize]: constructor function - // -- sp[2*kPointerSize]: number of arguments (tagged) - // -- sp[3*kPointerSize]: context + // -- sp[1*kPointerSize]: padding + // -- r4 and sp[2*kPointerSize]: constructor function + // -- sp[3*kPointerSize]: number of arguments (tagged) + // -- sp[4*kPointerSize]: context // ----------------------------------- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); @@ -305,10 +309,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // ----------- S t a t e ------------- // -- r3: receiver - // -- Slot 3 / sp[0*kPointerSize]: new target - // -- Slot 2 / sp[1*kPointerSize]: constructor function - // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged) - // -- Slot 0 / sp[3*kPointerSize]: context + // -- Slot 4 / sp[0*kPointerSize]: new target + // -- Slot 3 / sp[1*kPointerSize]: padding + // -- Slot 2 / sp[2*kPointerSize]: constructor function + // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged) + // -- Slot 0 / sp[4*kPointerSize]: context // ----------------------------------- // Deoptimizer enters here. masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset( @@ -326,9 +331,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // -- r6: new target // -- sp[0*kPointerSize]: implicit receiver // -- sp[1*kPointerSize]: implicit receiver - // -- sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments (tagged) - // -- sp[4*kPointerSize]: context + // -- sp[2*kPointerSize]: padding + // -- sp[3*kPointerSize]: constructor function + // -- sp[4*kPointerSize]: number of arguments (tagged) + // -- sp[5*kPointerSize]: context // ----------------------------------- // Restore constructor function and argument count. @@ -348,9 +354,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // -- cr0: condition indicating whether r3 is zero // -- sp[0*kPointerSize]: implicit receiver // -- sp[1*kPointerSize]: implicit receiver - // -- r4 and sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments (tagged) - // -- sp[4*kPointerSize]: context + // -- sp[2*kPointerSize]: padding + // -- r4 and sp[3*kPointerSize]: constructor function + // -- sp[4*kPointerSize]: number of arguments (tagged) + // -- sp[5*kPointerSize]: context // ----------------------------------- __ beq(&no_args, cr0); __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2)); @@ -373,9 +380,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // ----------- S t a t e ------------- // -- r0: constructor result // -- sp[0*kPointerSize]: implicit receiver - // -- sp[1*kPointerSize]: constructor function - // -- sp[2*kPointerSize]: number of arguments - // -- sp[3*kPointerSize]: context + // -- sp[1*kPointerSize]: padding + // -- sp[2*kPointerSize]: constructor function + // -- sp[3*kPointerSize]: number of arguments + // -- sp[4*kPointerSize]: context // ----------------------------------- // Store offset of return address for deoptimizer. @@ -545,7 +553,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { if (FLAG_debug_code) { __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset)); __ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE); - __ Assert(eq, kMissingBytecodeArray); + __ Assert(eq, AbortReason::kMissingBytecodeArray); } // Resume (Ignition/TurboFan) generator object. @@ -636,8 +644,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ mov(cp, Operand(context_address)); __ LoadP(cp, MemOperand(cp)); - __ InitializeRootRegister(); - // Push the function and the receiver onto the stack. __ Push(r4, r5); @@ -773,6 +779,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, __ beq(&fallthrough); TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, OptimizationMarker::kCompileOptimized, Runtime::kCompileOptimized_NotConcurrent); TailCallRuntimeIfMarkerEquals( @@ -787,7 +796,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, __ CmpSmiLiteral( optimized_code_entry, Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0); - __ Assert(eq, kExpectedOptimizationSentinel); + __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); } __ b(&fallthrough); } @@ -868,11 +877,9 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array, __ lbzx(bytecode, MemOperand(bytecode_array, bytecode_offset)); __ addi(bytecode_size_table, bytecode_size_table, Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); - __ b(&load_size); // Load the size of the current bytecode. __ bind(&load_size); - __ ShiftLeftImm(scratch2, bytecode, Operand(2)); __ lwzx(scratch2, MemOperand(bytecode_size_table, scratch2)); __ add(bytecode_offset, bytecode_offset, scratch2); @@ -941,10 +948,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { if (FLAG_debug_code) { __ TestIfSmi(kInterpreterBytecodeArrayRegister, r0); - __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, cr0); + __ Assert(ne, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, + cr0); __ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg, BYTECODE_ARRAY_TYPE); - __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ Assert( + eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); } // Reset code age. @@ -1226,10 +1236,13 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { if (FLAG_debug_code) { // Check function data field is actually a BytecodeArray object. __ TestIfSmi(kInterpreterBytecodeArrayRegister, r0); - __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, cr0); + __ Assert(ne, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, + cr0); __ CompareObjectType(kInterpreterBytecodeArrayRegister, r4, no_reg, BYTECODE_ARRAY_TYPE); - __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ Assert( + eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); } // Get the target bytecode offset from the frame. @@ -1291,7 +1304,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) { // The feedback vector must be defined. if (FLAG_debug_code) { __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex); - __ Assert(ne, BailoutReason::kExpectedFeedbackVector); + __ Assert(ne, AbortReason::kExpectedFeedbackVector); } // Is there an optimization marker or optimized code in the feedback vector? @@ -1867,8 +1880,9 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { } else { __ Push(fp, r7, r4, r3); } - __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp + - kPointerSize)); + __ Push(Smi::kZero); // Padding. + __ addi(fp, sp, + Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp)); } static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { @@ -1877,8 +1891,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { // ----------------------------------- // Get the number of arguments passed (as a smi), tear down the frame and // then tear down the parameters. - __ LoadP(r4, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp + - kPointerSize))); + __ LoadP(r4, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); int stack_adjustment = kPointerSize; // adjust for receiver __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment); __ SmiToPtrArrayOffset(r0, r4); @@ -1956,7 +1969,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ JumpIfSmi(r6, &new_target_not_constructor); __ LoadP(scratch, FieldMemOperand(r6, HeapObject::kMapOffset)); __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ TestBit(scratch, Map::kIsConstructor, r0); + __ TestBit(scratch, Map::IsConstructorBit::kShift, r0); __ bne(&new_target_constructor, cr0); __ bind(&new_target_not_constructor); { @@ -2253,7 +2266,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // Check if target has a [[Call]] internal method. __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset)); - __ TestBit(r7, Map::kIsCallable, r0); + __ TestBit(r7, Map::IsCallableBit::kShift, r0); __ beq(&non_callable, cr0); // Check if target is a proxy and call CallProxy external builtin @@ -2349,7 +2362,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { // Check if target has a [[Construct]] internal method. __ lbz(r5, FieldMemOperand(r7, Map::kBitFieldOffset)); - __ TestBit(r5, Map::kIsConstructor, r0); + __ TestBit(r5, Map::IsConstructorBit::kShift, r0); __ beq(&non_constructor, cr0); // Only dispatch to bound functions after checking whether they are @@ -2419,17 +2432,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) { __ TailCallRuntime(Runtime::kAbort); } -// static -void Builtins::Generate_AbortJS(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r4 : message as String object - // -- lr : return address - // ----------------------------------- - __ push(r4); - __ LoadSmiLiteral(cp, Smi::kZero); - __ TailCallRuntime(Runtime::kAbortJS); -} - void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r3 : actual number of arguments @@ -2524,8 +2526,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2)); __ sub(r7, fp, r7); // Adjust for frame. - __ subi(r7, r7, Operand(StandardFrameConstants::kFixedFrameSizeFromFp + - 2 * kPointerSize)); + __ subi(r7, r7, + Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp + + kPointerSize)); Label fill; __ bind(&fill); diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index 42c478bd42..020b04b91d 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -109,9 +109,10 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { // Initial map for the builtin InternalArray functions should be maps. __ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset)); __ TestIfSmi(r4); - __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, cr0); + __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction, + cr0); __ CompareObjectType(r4, r5, r6, MAP_TYPE); - __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction); + __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction); } // Run the native code for the InternalArray function called as a normal @@ -136,9 +137,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) { // Initial map for the builtin Array functions should be maps. __ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset)); __ TestIfSmi(r4); - __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0); + __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0); __ CompareObjectType(r4, r5, r6, MAP_TYPE); - __ Assert(eq, kUnexpectedInitialMapForArrayFunction); + __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction); } __ LoadRR(r5, r3); @@ -272,13 +273,16 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // Preserve the incoming parameters on the stack. __ SmiTag(r2); - __ Push(cp, r2, r3, r5); + __ Push(cp, r2, r3); + __ PushRoot(Heap::kUndefinedValueRootIndex); + __ Push(r5); // ----------- S t a t e ------------- // -- sp[0*kPointerSize]: new target - // -- r3 and sp[1*kPointerSize]: constructor function - // -- sp[2*kPointerSize]: number of arguments (tagged) - // -- sp[3*kPointerSize]: context + // -- sp[1*kPointerSize]: padding + // -- r3 and sp[2*kPointerSize]: constructor function + // -- sp[3*kPointerSize]: number of arguments (tagged) + // -- sp[4*kPointerSize]: context // ----------------------------------- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); @@ -300,10 +304,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // ----------- S t a t e ------------- // -- r2: receiver - // -- Slot 3 / sp[0*kPointerSize]: new target - // -- Slot 2 / sp[1*kPointerSize]: constructor function - // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged) - // -- Slot 0 / sp[3*kPointerSize]: context + // -- Slot 4 / sp[0*kPointerSize]: new target + // -- Slot 3 / sp[1*kPointerSize]: padding + // -- Slot 2 / sp[2*kPointerSize]: constructor function + // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged) + // -- Slot 0 / sp[4*kPointerSize]: context // ----------------------------------- // Deoptimizer enters here. masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset( @@ -321,9 +326,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // -- r5: new target // -- sp[0*kPointerSize]: implicit receiver // -- sp[1*kPointerSize]: implicit receiver - // -- sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments (tagged) - // -- sp[4*kPointerSize]: context + // -- sp[2*kPointerSize]: padding + // -- sp[3*kPointerSize]: constructor function + // -- sp[4*kPointerSize]: number of arguments (tagged) + // -- sp[5*kPointerSize]: context // ----------------------------------- // Restore constructor function and argument count. @@ -343,9 +349,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // -- cr0: condition indicating whether r2 is zero // -- sp[0*kPointerSize]: implicit receiver // -- sp[1*kPointerSize]: implicit receiver - // -- r3 and sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments (tagged) - // -- sp[4*kPointerSize]: context + // -- sp[2*kPointerSize]: padding + // -- r3 and sp[3*kPointerSize]: constructor function + // -- sp[4*kPointerSize]: number of arguments (tagged) + // -- sp[5*kPointerSize]: context // ----------------------------------- __ beq(&no_args); @@ -366,9 +373,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // ----------- S t a t e ------------- // -- r0: constructor result // -- sp[0*kPointerSize]: implicit receiver - // -- sp[1*kPointerSize]: constructor function - // -- sp[2*kPointerSize]: number of arguments - // -- sp[3*kPointerSize]: context + // -- sp[1*kPointerSize]: padding + // -- sp[2*kPointerSize]: constructor function + // -- sp[3*kPointerSize]: number of arguments + // -- sp[4*kPointerSize]: context // ----------------------------------- // Store offset of return address for deoptimizer. @@ -540,7 +548,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { if (FLAG_debug_code) { __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset)); __ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE); - __ Assert(eq, kMissingBytecodeArray); + __ Assert(eq, AbortReason::kMissingBytecodeArray); } // Resume (Ignition/TurboFan) generator object. @@ -632,8 +640,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ mov(cp, Operand(context_address)); __ LoadP(cp, MemOperand(cp)); - __ InitializeRootRegister(); - // Push the function and the receiver onto the stack. __ Push(r3, r4); @@ -776,6 +782,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, __ beq(&fallthrough); TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, OptimizationMarker::kCompileOptimized, Runtime::kCompileOptimized_NotConcurrent); TailCallRuntimeIfMarkerEquals( @@ -790,7 +799,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, __ CmpSmiLiteral( optimized_code_entry, Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0); - __ Assert(eq, kExpectedOptimizationSentinel); + __ Assert(eq, AbortReason::kExpectedOptimizationSentinel); } __ b(&fallthrough, Label::kNear); } @@ -870,10 +879,9 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array, __ LoadlB(bytecode, MemOperand(bytecode_array, bytecode_offset)); __ AddP(bytecode_size_table, bytecode_size_table, Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); - __ b(&load_size); + // Load the size of the current bytecode. __ bind(&load_size); - __ ShiftLeftP(scratch2, bytecode, Operand(2)); __ LoadlW(scratch2, MemOperand(bytecode_size_table, scratch2)); __ AddP(bytecode_offset, bytecode_offset, scratch2); @@ -938,10 +946,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // Check function data field is actually a BytecodeArray object. if (FLAG_debug_code) { __ TestIfSmi(kInterpreterBytecodeArrayRegister); - __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ Assert( + ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); __ CompareObjectType(kInterpreterBytecodeArrayRegister, r2, no_reg, BYTECODE_ARRAY_TYPE); - __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ Assert( + eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); } // Reset code age. @@ -1224,10 +1234,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { if (FLAG_debug_code) { // Check function data field is actually a BytecodeArray object. __ TestIfSmi(kInterpreterBytecodeArrayRegister); - __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ Assert( + ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); __ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg, BYTECODE_ARRAY_TYPE); - __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ Assert( + eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); } // Get the target bytecode offset from the frame. @@ -1289,7 +1301,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) { // The feedback vector must be defined. if (FLAG_debug_code) { __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex); - __ Assert(ne, BailoutReason::kExpectedFeedbackVector); + __ Assert(ne, AbortReason::kExpectedFeedbackVector); } // Is there an optimization marker or optimized code in the feedback vector? @@ -1854,7 +1866,8 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { // Old FP <--- New FP // Argument Adapter SMI // Function - // ArgC as SMI <--- New SP + // ArgC as SMI + // Padding <--- New SP __ lay(sp, MemOperand(sp, -5 * kPointerSize)); // Cleanse the top nibble of 31-bit pointers. @@ -1864,8 +1877,9 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { __ StoreP(r6, MemOperand(sp, 2 * kPointerSize)); __ StoreP(r3, MemOperand(sp, 1 * kPointerSize)); __ StoreP(r2, MemOperand(sp, 0 * kPointerSize)); - __ la(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp + - kPointerSize)); + __ Push(Smi::kZero); // Padding. + __ la(fp, + MemOperand(sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp)); } static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { @@ -1874,8 +1888,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { // ----------------------------------- // Get the number of arguments passed (as a smi), tear down the frame and // then tear down the parameters. - __ LoadP(r3, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp + - kPointerSize))); + __ LoadP(r3, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); int stack_adjustment = kPointerSize; // adjust for receiver __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment); __ SmiToPtrArrayOffset(r3, r3); @@ -1954,7 +1967,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ JumpIfSmi(r5, &new_target_not_constructor); __ LoadP(scratch, FieldMemOperand(r5, HeapObject::kMapOffset)); __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ tmll(scratch, Operand(Map::kIsConstructor)); + __ tmll(scratch, Operand(Map::IsConstructorBit::kShift)); __ bne(&new_target_constructor); __ bind(&new_target_not_constructor); { @@ -2252,7 +2265,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // Check if target has a [[Call]] internal method. __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset)); - __ TestBit(r6, Map::kIsCallable); + __ TestBit(r6, Map::IsCallableBit::kShift); __ beq(&non_callable); // Check if target is a proxy and call CallProxy external builtin @@ -2348,7 +2361,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { // Check if target has a [[Construct]] internal method. __ LoadlB(r4, FieldMemOperand(r6, Map::kBitFieldOffset)); - __ TestBit(r4, Map::kIsConstructor); + __ TestBit(r4, Map::IsConstructorBit::kShift); __ beq(&non_constructor); // Only dispatch to bound functions after checking whether they are @@ -2418,17 +2431,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) { __ TailCallRuntime(Runtime::kAbort); } -// static -void Builtins::Generate_AbortJS(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r3 : message as String object - // -- lr : return address - // ----------------------------------- - __ push(r3); - __ LoadSmiLiteral(cp, Smi::kZero); - __ TailCallRuntime(Runtime::kAbortJS); -} - void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r2 : actual number of arguments @@ -2522,8 +2524,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2)); __ SubP(r6, fp, r6); // Adjust for frame. - __ SubP(r6, r6, Operand(StandardFrameConstants::kFixedFrameSizeFromFp + - 2 * kPointerSize)); + __ SubP(r6, r6, + Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp + + kPointerSize)); Label fill; __ bind(&fill); diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc index b9073e1f13..5a09658867 100644 --- a/deps/v8/src/builtins/setup-builtins-internal.cc +++ b/deps/v8/src/builtins/setup-builtins-internal.cc @@ -107,7 +107,11 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index, // Canonicalize handles, so that we can share constant pool entries pointing // to code targets without dereferencing their handles. CanonicalHandleScope canonical(isolate); - Zone zone(isolate->allocator(), ZONE_NAME); + + SegmentSize segment_size = isolate->serializer_enabled() + ? SegmentSize::kLarge + : SegmentSize::kDefault; + Zone zone(isolate->allocator(), ZONE_NAME, segment_size); const int argc_with_recv = (argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1; compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv, @@ -127,7 +131,10 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index, // Canonicalize handles, so that we can share constant pool entries pointing // to code targets without dereferencing their handles. CanonicalHandleScope canonical(isolate); - Zone zone(isolate->allocator(), ZONE_NAME); + SegmentSize segment_size = isolate->serializer_enabled() + ? SegmentSize::kLarge + : SegmentSize::kDefault; + Zone zone(isolate->allocator(), ZONE_NAME, segment_size); // The interface descriptor with given key must be initialized at this point // and this construction just queries the details from the descriptors table. CallInterfaceDescriptor descriptor(isolate, interface_descriptor); diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index f2820fa410..cd35abb362 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -170,7 +170,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ Push(Operand(rbx, rcx, times_pointer_size, 0)); __ bind(&entry); __ decp(rcx); - __ j(greater_equal, &loop); + __ j(greater_equal, &loop, Label::kNear); // Call the function. // rax: number of arguments (untagged) @@ -217,19 +217,21 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, __ Push(rsi); __ Push(rcx); __ Push(rdi); + __ PushRoot(Heap::kTheHoleValueRootIndex); __ Push(rdx); // ----------- S t a t e ------------- // -- sp[0*kPointerSize]: new target - // -- rdi and sp[1*kPointerSize]: constructor function - // -- sp[2*kPointerSize]: argument count - // -- sp[3*kPointerSize]: context + // -- sp[1*kPointerSize]: padding + // -- rdi and sp[2*kPointerSize]: constructor function + // -- sp[3*kPointerSize]: argument count + // -- sp[4*kPointerSize]: context // ----------------------------------- __ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); __ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset), Immediate(SharedFunctionInfo::kDerivedConstructorMask)); - __ j(not_zero, ¬_create_implicit_receiver); + __ j(not_zero, ¬_create_implicit_receiver, Label::kNear); // If not derived class constructor: Allocate the new receiver object. __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1); @@ -243,10 +245,11 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // ----------- S t a t e ------------- // -- rax implicit receiver - // -- Slot 3 / sp[0*kPointerSize] new target - // -- Slot 2 / sp[1*kPointerSize] constructor function - // -- Slot 1 / sp[2*kPointerSize] number of arguments (tagged) - // -- Slot 0 / sp[3*kPointerSize] context + // -- Slot 4 / sp[0*kPointerSize] new target + // -- Slot 3 / sp[1*kPointerSize] padding + // -- Slot 2 / sp[2*kPointerSize] constructor function + // -- Slot 1 / sp[3*kPointerSize] number of arguments (tagged) + // -- Slot 0 / sp[4*kPointerSize] context // ----------------------------------- // Deoptimizer enters here. masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset( @@ -265,9 +268,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // ----------- S t a t e ------------- // -- sp[0*kPointerSize] implicit receiver // -- sp[1*kPointerSize] implicit receiver - // -- sp[2*kPointerSize] constructor function - // -- sp[3*kPointerSize] number of arguments (tagged) - // -- sp[4*kPointerSize] context + // -- sp[2*kPointerSize] padding + // -- sp[3*kPointerSize] constructor function + // -- sp[4*kPointerSize] number of arguments (tagged) + // -- sp[5*kPointerSize] context // ----------------------------------- // Restore constructor function and argument count. @@ -288,16 +292,17 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // -- rcx: counter (tagged) // -- sp[0*kPointerSize]: implicit receiver // -- sp[1*kPointerSize]: implicit receiver - // -- rdi and sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments (tagged) - // -- sp[4*kPointerSize]: context + // -- sp[2*kPointerSize]: padding + // -- rdi and sp[3*kPointerSize]: constructor function + // -- sp[4*kPointerSize]: number of arguments (tagged) + // -- sp[5*kPointerSize]: context // ----------------------------------- __ jmp(&entry, Label::kNear); __ bind(&loop); __ Push(Operand(rbx, rcx, times_pointer_size, 0)); __ bind(&entry); __ decp(rcx); - __ j(greater_equal, &loop); + __ j(greater_equal, &loop, Label::kNear); // Call the function. ParameterCount actual(rax); @@ -306,9 +311,10 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // ----------- S t a t e ------------- // -- rax constructor result // -- sp[0*kPointerSize] implicit receiver - // -- sp[1*kPointerSize] constructor function - // -- sp[2*kPointerSize] number of arguments - // -- sp[3*kPointerSize] context + // -- sp[1*kPointerSize] padding + // -- sp[2*kPointerSize] constructor function + // -- sp[3*kPointerSize] number of arguments + // -- sp[4*kPointerSize] context // ----------------------------------- // Store offset of return address for deoptimizer. @@ -363,7 +369,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm, // on-stack receiver as the result. __ bind(&use_receiver); __ movp(rax, Operand(rsp, 0 * kPointerSize)); - __ JumpIfRoot(rax, Heap::kTheHoleValueRootIndex, &do_throw); + __ JumpIfRoot(rax, Heap::kTheHoleValueRootIndex, &do_throw, Label::kNear); __ bind(&leave_frame); // Restore the arguments count. @@ -519,7 +525,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Argument count in rax. Clobbers rcx. Label enough_stack_space, stack_overflow; Generate_StackOverflowCheck(masm, rax, rcx, &stack_overflow, Label::kNear); - __ jmp(&enough_stack_space); + __ jmp(&enough_stack_space, Label::kNear); __ bind(&stack_overflow); __ CallRuntime(Runtime::kThrowStackOverflow); @@ -540,7 +546,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ addp(rcx, Immediate(1)); __ bind(&entry); __ cmpp(rcx, rax); - __ j(not_equal, &loop); + __ j(not_equal, &loop, Label::kNear); // Invoke the builtin code. Handle<Code> builtin = is_construct @@ -642,7 +648,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset)); __ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx); - __ Assert(equal, kMissingBytecodeArray); + __ Assert(equal, AbortReason::kMissingBytecodeArray); } // Resume (Ignition/TurboFan) generator object. @@ -768,6 +774,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, __ j(equal, &fallthrough); TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, + OptimizationMarker::kLogFirstExecution, + Runtime::kFunctionFirstExecution); + TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry, OptimizationMarker::kCompileOptimized, Runtime::kCompileOptimized_NotConcurrent); TailCallRuntimeIfMarkerEquals( @@ -781,7 +790,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, if (FLAG_debug_code) { __ SmiCompare(optimized_code_entry, Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)); - __ Assert(equal, kExpectedOptimizationSentinel); + __ Assert(equal, AbortReason::kExpectedOptimizationSentinel); } __ jmp(&fallthrough); } @@ -859,7 +868,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array, __ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0)); __ addp(bytecode_size_table, Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount)); - __ jmp(&load_size, Label::kNear); // Load the size of the current bytecode. __ bind(&load_size); @@ -922,7 +930,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ AssertNotSmi(kInterpreterBytecodeArrayRegister); __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE, rax); - __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ Assert( + equal, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); } // Reset code age. @@ -958,7 +968,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { Label loop_header; Label loop_check; __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); - __ j(always, &loop_check); + __ j(always, &loop_check, Label::kNear); __ bind(&loop_header); // TODO(rmcilroy): Consider doing more than one push per loop iteration. __ Push(rax); @@ -1051,7 +1061,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm, // Push the arguments. Label loop_header, loop_check; - __ j(always, &loop_check); + __ j(always, &loop_check, Label::kNear); __ bind(&loop_header); __ Push(Operand(start_address, 0)); __ subp(start_address, Immediate(kPointerSize)); @@ -1212,7 +1222,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ AssertNotSmi(kInterpreterBytecodeArrayRegister); __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE, rbx); - __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); + __ Assert( + equal, + AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); } // Get the target bytecode offset from the frame. @@ -1274,7 +1286,7 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) { // The feedback vector must be defined. if (FLAG_debug_code) { __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex); - __ Assert(not_equal, BailoutReason::kExpectedFeedbackVector); + __ Assert(not_equal, AbortReason::kExpectedFeedbackVector); } // Is there an optimization marker or optimized code in the feedback vector? @@ -1806,9 +1818,10 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) { // Will both indicate a nullptr and a Smi. STATIC_ASSERT(kSmiTag == 0); Condition not_smi = NegateCondition(masm->CheckSmi(rbx)); - __ Check(not_smi, kUnexpectedInitialMapForInternalArrayFunction); + __ Check(not_smi, + AbortReason::kUnexpectedInitialMapForInternalArrayFunction); __ CmpObjectType(rbx, MAP_TYPE, rcx); - __ Check(equal, kUnexpectedInitialMapForInternalArrayFunction); + __ Check(equal, AbortReason::kUnexpectedInitialMapForInternalArrayFunction); } // Run the native code for the InternalArray function called as a normal @@ -1835,9 +1848,9 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) { // Will both indicate a nullptr and a Smi. STATIC_ASSERT(kSmiTag == 0); Condition not_smi = NegateCondition(masm->CheckSmi(rbx)); - __ Check(not_smi, kUnexpectedInitialMapForArrayFunction); + __ Check(not_smi, AbortReason::kUnexpectedInitialMapForArrayFunction); __ CmpObjectType(rbx, MAP_TYPE, rcx); - __ Check(equal, kUnexpectedInitialMapForArrayFunction); + __ Check(equal, AbortReason::kUnexpectedInitialMapForArrayFunction); } __ movp(rdx, rdi); @@ -1863,6 +1876,8 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { // arguments and the receiver. __ Integer32ToSmi(r8, rax); __ Push(r8); + + __ Push(Immediate(0)); // Padding. } static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { @@ -1922,19 +1937,6 @@ void Builtins::Generate_Abort(MacroAssembler* masm) { __ TailCallRuntime(Runtime::kAbort); } -// static -void Builtins::Generate_AbortJS(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rdx : message as String object - // -- rsp[0] : return address - // ----------------------------------- - __ PopReturnAddressTo(rcx); - __ Push(rdx); - __ PushReturnAddressFrom(rcx); - __ Move(rsi, Smi::kZero); - __ TailCallRuntime(Runtime::kAbortJS); -} - void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- rax : actual number of arguments @@ -2115,7 +2117,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear); __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); __ testb(FieldOperand(rbx, Map::kBitFieldOffset), - Immediate(1 << Map::kIsConstructor)); + Immediate(Map::IsConstructorBit::kMask)); __ j(not_zero, &new_target_constructor, Label::kNear); __ bind(&new_target_not_constructor); { @@ -2242,7 +2244,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // Patch receiver to global proxy. __ LoadGlobalProxy(rcx); } - __ jmp(&convert_receiver); + __ jmp(&convert_receiver, Label::kNear); } __ bind(&convert_to_object); { @@ -2419,12 +2421,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // Check if target has a [[Call]] internal method. __ testb(FieldOperand(rcx, Map::kBitFieldOffset), - Immediate(1 << Map::kIsCallable)); - __ j(zero, &non_callable); + Immediate(Map::IsCallableBit::kMask)); + __ j(zero, &non_callable, Label::kNear); // Check if target is a proxy and call CallProxy external builtin __ CmpInstanceType(rcx, JS_PROXY_TYPE); - __ j(not_equal, &non_function); + __ j(not_equal, &non_function, Label::kNear); __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET); // 2. Call to something else, which might have a [[Call]] internal method (if @@ -2516,7 +2518,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { // Check if target has a [[Construct]] internal method. __ testb(FieldOperand(rcx, Map::kBitFieldOffset), - Immediate(1 << Map::kIsConstructor)); + Immediate(Map::IsConstructorBit::kMask)); __ j(zero, &non_constructor, Label::kNear); // Only dispatch to bound functions after checking whether they are @@ -2527,7 +2529,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { // Only dispatch to proxies after checking whether they are constructors. __ CmpInstanceType(rcx, JS_PROXY_TYPE); - __ j(not_equal, &non_proxy); + __ j(not_equal, &non_proxy, Label::kNear); __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), RelocInfo::CODE_TARGET); @@ -2568,7 +2570,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm, Label skip; // If the code object is null, just return to the caller. - __ cmpp(rax, Immediate(0)); + __ testp(rax, rax); __ j(not_equal, &skip, Label::kNear); __ ret(0); |