summaryrefslogtreecommitdiff
path: root/chromium/v8/src/builtins
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/builtins')
-rw-r--r--chromium/v8/src/builtins/accessors.cc34
-rw-r--r--chromium/v8/src/builtins/aggregate-error.tq7
-rw-r--r--chromium/v8/src/builtins/arm/builtins-arm.cc52
-rw-r--r--chromium/v8/src/builtins/arm64/builtins-arm64.cc58
-rw-r--r--chromium/v8/src/builtins/arraybuffer.tq105
-rw-r--r--chromium/v8/src/builtins/base.tq10
-rw-r--r--chromium/v8/src/builtins/builtins-api.cc6
-rw-r--r--chromium/v8/src/builtins/builtins-array-gen.cc1
-rw-r--r--chromium/v8/src/builtins/builtins-array.cc65
-rw-r--r--chromium/v8/src/builtins/builtins-arraybuffer.cc249
-rw-r--r--chromium/v8/src/builtins/builtins-async-function-gen.cc4
-rw-r--r--chromium/v8/src/builtins/builtins-async-gen.cc62
-rw-r--r--chromium/v8/src/builtins/builtins-async-gen.h6
-rw-r--r--chromium/v8/src/builtins/builtins-async-generator-gen.cc2
-rw-r--r--chromium/v8/src/builtins/builtins-call-gen.cc103
-rw-r--r--chromium/v8/src/builtins/builtins-call-gen.h7
-rw-r--r--chromium/v8/src/builtins/builtins-debug-gen.cc22
-rw-r--r--chromium/v8/src/builtins/builtins-definitions.h46
-rw-r--r--chromium/v8/src/builtins/builtins-error.cc5
-rw-r--r--chromium/v8/src/builtins/builtins-generator-gen.cc2
-rw-r--r--chromium/v8/src/builtins/builtins-handler-gen.cc55
-rw-r--r--chromium/v8/src/builtins/builtins-ic-gen.cc273
-rw-r--r--chromium/v8/src/builtins/builtins-internal-gen.cc40
-rw-r--r--chromium/v8/src/builtins/builtins-intl.cc43
-rw-r--r--chromium/v8/src/builtins/builtins-lazy-gen.cc15
-rw-r--r--chromium/v8/src/builtins/builtins-microtask-queue-gen.cc61
-rw-r--r--chromium/v8/src/builtins/builtins-regexp-gen.cc31
-rw-r--r--chromium/v8/src/builtins/builtins-trace.cc1
-rw-r--r--chromium/v8/src/builtins/builtins-typed-array-gen.cc69
-rw-r--r--chromium/v8/src/builtins/builtins-typed-array.cc3
-rw-r--r--chromium/v8/src/builtins/builtins-utils.h3
-rw-r--r--chromium/v8/src/builtins/builtins-wasm-gen.cc1
-rw-r--r--chromium/v8/src/builtins/cast.tq24
-rw-r--r--chromium/v8/src/builtins/constructor.tq11
-rw-r--r--chromium/v8/src/builtins/conversion.tq21
-rw-r--r--chromium/v8/src/builtins/ia32/builtins-ia32.cc65
-rw-r--r--chromium/v8/src/builtins/ic-callable.tq73
-rw-r--r--chromium/v8/src/builtins/ic.tq9
-rw-r--r--chromium/v8/src/builtins/iterator.tq12
-rw-r--r--chromium/v8/src/builtins/mips/builtins-mips.cc37
-rw-r--r--chromium/v8/src/builtins/mips64/builtins-mips64.cc49
-rw-r--r--chromium/v8/src/builtins/ppc/builtins-ppc.cc241
-rw-r--r--chromium/v8/src/builtins/promise-abstract-operations.tq15
-rw-r--r--chromium/v8/src/builtins/promise-all.tq3
-rw-r--r--chromium/v8/src/builtins/promise-constructor.tq7
-rw-r--r--chromium/v8/src/builtins/promise-jobs.tq3
-rw-r--r--chromium/v8/src/builtins/promise-misc.tq121
-rw-r--r--chromium/v8/src/builtins/promise-resolve.tq16
-rw-r--r--chromium/v8/src/builtins/riscv64/builtins-riscv64.cc460
-rw-r--r--chromium/v8/src/builtins/s390/builtins-s390.cc51
-rw-r--r--chromium/v8/src/builtins/setup-builtins-internal.cc16
-rw-r--r--chromium/v8/src/builtins/typed-array-createtypedarray.tq72
-rw-r--r--chromium/v8/src/builtins/wasm.tq5
-rw-r--r--chromium/v8/src/builtins/x64/builtins-x64.cc194
54 files changed, 2213 insertions, 733 deletions
diff --git a/chromium/v8/src/builtins/accessors.cc b/chromium/v8/src/builtins/accessors.cc
index c255184caeb..0285b33e1f6 100644
--- a/chromium/v8/src/builtins/accessors.cc
+++ b/chromium/v8/src/builtins/accessors.cc
@@ -113,8 +113,7 @@ void Accessors::ReconfigureToDataProperty(
v8::Local<v8::Name> key, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope stats_scope(
- isolate, RuntimeCallCounterId::kReconfigureToDataProperty);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kReconfigureToDataProperty);
HandleScope scope(isolate);
Handle<Object> receiver = Utils::OpenHandle(*info.This());
Handle<JSObject> holder =
@@ -155,8 +154,7 @@ Handle<AccessorInfo> Accessors::MakeArgumentsIteratorInfo(Isolate* isolate) {
void Accessors::ArrayLengthGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kArrayLengthGetter);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kArrayLengthGetter);
DisallowGarbageCollection no_gc;
HandleScope scope(isolate);
JSArray holder = JSArray::cast(*Utils::OpenHandle(*info.Holder()));
@@ -168,8 +166,7 @@ void Accessors::ArrayLengthSetter(
v8::Local<v8::Name> name, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kArrayLengthSetter);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kArrayLengthSetter);
HandleScope scope(isolate);
DCHECK(Utils::OpenHandle(*name)->SameValue(
@@ -206,7 +203,12 @@ void Accessors::ArrayLengthSetter(
return;
}
- JSArray::SetLength(array, length);
+ if (JSArray::SetLength(array, length).IsNothing()) {
+ // TODO(victorgomes): AccessorNameBooleanSetterCallback does not handle
+ // exceptions.
+ FATAL("Fatal JavaScript invalid array length %u", length);
+ UNREACHABLE();
+ }
uint32_t actual_new_len = 0;
CHECK(array->length().ToArrayLength(&actual_new_len));
@@ -282,8 +284,7 @@ Handle<AccessorInfo> Accessors::MakeModuleNamespaceEntryInfo(
void Accessors::StringLengthGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kStringLengthGetter);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kStringLengthGetter);
DisallowGarbageCollection no_gc;
HandleScope scope(isolate);
@@ -330,8 +331,7 @@ static Handle<Object> GetFunctionPrototype(Isolate* isolate,
void Accessors::FunctionPrototypeGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kFunctionPrototypeGetter);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionPrototypeGetter);
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -344,8 +344,7 @@ void Accessors::FunctionPrototypeSetter(
v8::Local<v8::Name> name, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kFunctionPrototypeSetter);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionPrototypeSetter);
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
Handle<JSFunction> object =
@@ -367,8 +366,7 @@ Handle<AccessorInfo> Accessors::MakeFunctionPrototypeInfo(Isolate* isolate) {
void Accessors::FunctionLengthGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kFunctionLengthGetter);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionLengthGetter);
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -722,8 +720,7 @@ Handle<AccessorInfo> Accessors::MakeFunctionCallerInfo(Isolate* isolate) {
void Accessors::BoundFunctionLengthGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kBoundFunctionLengthGetter);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kBoundFunctionLengthGetter);
HandleScope scope(isolate);
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -749,8 +746,7 @@ Handle<AccessorInfo> Accessors::MakeBoundFunctionLengthInfo(Isolate* isolate) {
void Accessors::BoundFunctionNameGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kBoundFunctionNameGetter);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kBoundFunctionNameGetter);
HandleScope scope(isolate);
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
diff --git a/chromium/v8/src/builtins/aggregate-error.tq b/chromium/v8/src/builtins/aggregate-error.tq
index 9c70ffcb006..c811403274d 100644
--- a/chromium/v8/src/builtins/aggregate-error.tq
+++ b/chromium/v8/src/builtins/aggregate-error.tq
@@ -19,8 +19,9 @@ transitioning javascript builtin AggregateErrorConstructor(
// [[Writable]]: *true*, [[Enumerable]]: *false*, [[Configurable]]: *true*
// c. Perform ! DefinePropertyOrThrow(_O_, *"message"*, _msgDesc_).
const message: JSAny = arguments[1];
- const obj: JSObject =
- ConstructAggregateErrorHelper(context, target, newTarget, message);
+ const options: JSAny = arguments[2];
+ const obj: JSObject = ConstructAggregateErrorHelper(
+ context, target, newTarget, message, options);
// 4. Let errorsList be ? IterableToList(errors).
const errors: JSAny = arguments[0];
@@ -38,7 +39,7 @@ transitioning javascript builtin AggregateErrorConstructor(
}
extern transitioning runtime ConstructAggregateErrorHelper(
- Context, JSFunction, JSAny, Object): JSObject;
+ Context, JSFunction, JSAny, Object, Object): JSObject;
extern transitioning runtime ConstructInternalAggregateErrorHelper(
Context, Object): JSObject;
diff --git a/chromium/v8/src/builtins/arm/builtins-arm.cc b/chromium/v8/src/builtins/arm/builtins-arm.cc
index 817d30fe26a..83252446af8 100644
--- a/chromium/v8/src/builtins/arm/builtins-arm.cc
+++ b/chromium/v8/src/builtins/arm/builtins-arm.cc
@@ -6,6 +6,7 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
@@ -118,7 +119,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// r0: number of arguments (untagged)
// r1: constructor function
// r3: new target
- __ InvokeFunctionWithNewTarget(r1, r3, r0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(r1, r3, r0, InvokeType::kCall);
// Restore context from the frame.
__ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
@@ -236,7 +237,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(r6);
// Call the function.
- __ InvokeFunctionWithNewTarget(r1, r3, r0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(r1, r3, r0, InvokeType::kCall);
// ----------- S t a t e -------------
// -- r0: constructor result
@@ -337,7 +338,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Store input value into generator object.
__ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
+ kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Load suspended function and context.
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
@@ -388,16 +389,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
- __ mov(r6, r3);
-
__ bind(&loop);
- __ sub(r6, r6, Operand(1), SetCC);
+ __ sub(r3, r3, Operand(1), SetCC);
__ b(lt, &done_loop);
- __ add(scratch, r2, Operand(r6, LSL, kTaggedSizeLog2));
+ __ add(scratch, r2, Operand(r3, LSL, kTaggedSizeLog2));
__ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ Push(scratch);
__ b(&loop);
-
__ bind(&done_loop);
// Push receiver.
@@ -799,8 +797,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
// Store code entry in the closure.
__ str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -1051,7 +1049,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ CompareObjectType(feedback_vector, scratch, scratch,
@@ -1124,7 +1122,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// Baseline code frames store the feedback vector where interpreter would
// store the bytecode offset.
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ CompareObjectType(feedback_vector, scratch, scratch,
@@ -1646,7 +1644,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Jump(kJavaScriptCallCodeStartRegister);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
__ ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
@@ -1691,7 +1689,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
@@ -1841,6 +1839,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ __ ldr(kContextRegister,
+ MemOperand(fp, BaselineFrameConstants::kContextOffset));
return OnStackReplacement(masm, false);
}
@@ -2009,6 +2009,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// static
+// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -2020,7 +2021,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -----------------------------------
Register scratch = r8;
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow r2 to be a FixedArray, or a FixedDoubleArray if r4 == 0.
Label ok, fail;
__ AssertNotSmi(r2);
@@ -2278,7 +2279,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ ldrh(r2,
FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(r1, no_reg, r2, r0, JUMP_FUNCTION);
+ __ InvokeFunctionCode(r1, no_reg, r2, r0, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
@@ -2640,6 +2641,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// TODO(v8:10701): Implement for this platform.
__ Trap();
}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
@@ -2652,12 +2658,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
//
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// r2: pointer to the first argument
__ mov(r5, Operand(r1));
- if (argv_mode == kArgvInRegister) {
+ if (argv_mode == ArgvMode::kRegister) {
// Move argv into the correct register.
__ mov(r1, Operand(r2));
} else {
@@ -2669,7 +2675,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
- save_doubles == kSaveFPRegs, 0,
+ save_doubles == SaveFPRegsMode::kSave, 0,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// Store a copy of argc in callee-saved registers for later.
@@ -2726,12 +2732,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- Register argc = argv_mode == kArgvInRegister
+ Register argc = argv_mode == ArgvMode::kRegister
// We don't want to pop arguments so set argc to no_reg.
? no_reg
// Callee-saved register r4 still holds argc.
: r4;
- __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc);
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc);
__ mov(pc, lr);
// Handling of exception.
@@ -2841,7 +2847,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// If we reach this code, 30 <= exponent <= 83.
// `TryInlineTruncateDoubleToI` above will have truncated any double with an
// exponent lower than 30.
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Scratch is exponent - 1.
__ cmp(scratch, Operand(30 - 1));
__ Check(ge, AbortReason::kUnexpectedValue);
@@ -2957,7 +2963,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ str(r4, MemOperand(r9, kNextOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ ldr(r1, MemOperand(r9, kLevelOffset));
__ cmp(r1, r6);
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
diff --git a/chromium/v8/src/builtins/arm64/builtins-arm64.cc b/chromium/v8/src/builtins/arm64/builtins-arm64.cc
index d095d60b302..3cf3f0153fc 100644
--- a/chromium/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/chromium/v8/src/builtins/arm64/builtins-arm64.cc
@@ -6,6 +6,7 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
@@ -99,7 +100,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
Label already_aligned;
Register argc = x0;
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
// Check that FrameScope pushed the context on to the stack already.
__ Peek(x2, 0);
__ Cmp(x2, cp);
@@ -176,7 +177,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -----------------------------------
// Call the function.
- __ InvokeFunctionWithNewTarget(x1, x3, argc, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(x1, x3, argc, InvokeType::kCall);
// Restore the context from the frame.
__ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
@@ -219,7 +220,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ EnterFrame(StackFrame::CONSTRUCT);
Label post_instantiation_deopt_entry, not_create_implicit_receiver;
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
// Check that FrameScope pushed the context on to the stack already.
__ Peek(x2, 0);
__ Cmp(x2, cp);
@@ -336,7 +337,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Call the function.
__ Mov(x0, x12);
- __ InvokeFunctionWithNewTarget(x1, x3, x0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(x1, x3, x0, InvokeType::kCall);
// ----------- S t a t e -------------
// -- sp[0*kSystemPointerSize]: implicit receiver
@@ -442,7 +443,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ StoreTaggedField(
x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
+ kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Load suspended function and context.
__ LoadTaggedPointerField(
@@ -639,7 +640,8 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Initialize the pointer cage base register.
- __ Mov(kPointerCageBaseRegister, x0);
+ __ LoadRootRelative(kPtrComprCageBaseRegister,
+ IsolateData::cage_base_offset());
#endif
}
@@ -925,7 +927,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Don't initialize the reserved registers.
// x26 : root register (kRootRegister).
// x27 : context pointer (cp).
- // x28 : pointer cage base register (kPointerCageBaseRegister).
+ // x28 : pointer cage base register (kPtrComprCageBaseRegister).
// x29 : frame pointer (fp).
Handle<Code> builtin = is_construct
@@ -966,8 +968,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
__ StoreTaggedField(optimized_code,
FieldMemOperand(closure, JSFunction::kCodeOffset));
__ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -998,7 +1000,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ Tst(params_size, kSystemPointerSize - 1);
__ Check(eq, AbortReason::kUnexpectedValue);
}
@@ -1230,7 +1232,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ CompareObjectType(feedback_vector, x4, x4, FEEDBACK_VECTOR_TYPE);
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
}
@@ -1288,7 +1290,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// Baseline code frames store the feedback vector where interpreter would
// store the bytecode offset.
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ CompareObjectType(feedback_vector, x4, x4, FEEDBACK_VECTOR_TYPE);
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
}
@@ -1859,7 +1861,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Br(x17);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
__ ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
@@ -1903,7 +1905,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
@@ -2087,6 +2089,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ __ ldr(kContextRegister,
+ MemOperand(fp, BaselineFrameConstants::kContextOffset));
return OnStackReplacement(masm, false);
}
@@ -2385,6 +2389,7 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
} // namespace
// static
+// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -2394,7 +2399,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- x4 : len (number of elements to push from args)
// -- x3 : new.target (for [[Construct]])
// -----------------------------------
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
Label ok, fail;
__ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
@@ -2618,7 +2623,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Ldrh(x2,
FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(x1, no_reg, x2, x0, JUMP_FUNCTION);
+ __ InvokeFunctionCode(x1, no_reg, x2, x0, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ Bind(&class_constructor);
@@ -3036,6 +3041,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// TODO(v8:10701): Implement for this platform.
__ Trap();
}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
@@ -3053,7 +3063,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Register parameters:
// x0: argc (including receiver, untagged)
// x1: target
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// x11: argv (pointer to first argument)
//
// The stack on entry holds the arguments and the receiver, with the receiver
@@ -3085,7 +3095,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// (arg[argc-2]), or just below the receiver in case there are no arguments.
// - Adjust for the arg[] array.
Register temp_argv = x11;
- if (argv_mode == kArgvOnStack) {
+ if (argv_mode == ArgvMode::kStack) {
__ SlotAddress(temp_argv, x0);
// - Adjust for the receiver.
__ Sub(temp_argv, temp_argv, 1 * kSystemPointerSize);
@@ -3096,7 +3106,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Enter the exit frame.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
- save_doubles == kSaveFPRegs, x10, extra_stack_space,
+ save_doubles == SaveFPRegsMode::kSave, x10, extra_stack_space,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// Poke callee-saved registers into reserved space.
@@ -3177,8 +3187,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Peek(argc, 2 * kSystemPointerSize);
__ Peek(target, 3 * kSystemPointerSize);
- __ LeaveExitFrame(save_doubles == kSaveFPRegs, x10, x9);
- if (argv_mode == kArgvOnStack) {
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, x10, x9);
+ if (argv_mode == ArgvMode::kStack) {
// Drop the remaining stack slots and return from the stub.
__ DropArguments(x11);
}
@@ -3247,7 +3257,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Compute the handler entry address and jump to it. We use x17 here for the
// jump target, as this jump can occasionally end up at the start of
- // InterpreterEnterBytecodeDispatch, which when CFI is enabled starts with
+ // InterpreterEnterAtBytecode, which when CFI is enabled starts with
// a "BTI c".
UseScratchRegisterScope temps(masm);
temps.Exclude(x17);
@@ -3296,7 +3306,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// signed overflow in the int64_t target. Since we've already handled
// exponents >= 84, we can guarantee that 63 <= exponent < 84.
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
__ Cmp(exponent, HeapNumber::kExponentBias + 63);
// Exponents less than this should have been handled by the Fcvt case.
__ Check(ge, AbortReason::kUnexpectedValue);
@@ -3412,7 +3422,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
__ Cmp(w1, level_reg);
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
diff --git a/chromium/v8/src/builtins/arraybuffer.tq b/chromium/v8/src/builtins/arraybuffer.tq
index 179c4b38fd2..5794414443b 100644
--- a/chromium/v8/src/builtins/arraybuffer.tq
+++ b/chromium/v8/src/builtins/arraybuffer.tq
@@ -9,21 +9,25 @@ transitioning javascript builtin ArrayBufferPrototypeGetByteLength(
js-implicit context: NativeContext, receiver: JSAny)(): Number {
// 1. Let O be the this value.
// 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
+ const functionName = 'get ArrayBuffer.prototype.byteLength';
const o = Cast<JSArrayBuffer>(receiver) otherwise
ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver,
- 'get ArrayBuffer.prototype.byteLength', receiver);
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
// 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception.
if (IsSharedArrayBuffer(o)) {
ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver,
- 'get ArrayBuffer.prototype.byteLength', receiver);
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
}
- // 4. If IsDetachedBuffer(O) is true, throw a TypeError exception.
+ // 4. If IsResizableArrayBuffer(O) is true, throw a TypeError exception.
+ if (IsResizableArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 5. If IsDetachedBuffer(O) is true, throw a TypeError exception.
// TODO(v8:4895): We don't actually throw here.
- // 5. Let length be O.[[ArrayBufferByteLength]].
+ // 6. Let length be O.[[ArrayBufferByteLength]].
const length = o.byte_length;
- // 6. Return length.
+ // 7. Return length.
return Convert<Number>(length);
}
@@ -32,15 +36,43 @@ transitioning javascript builtin SharedArrayBufferPrototypeGetByteLength(
js-implicit context: NativeContext, receiver: JSAny)(): Number {
// 1. Let O be the this value.
// 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
+ const functionName = 'get SharedArrayBuffer.prototype.byteLength';
const o = Cast<JSArrayBuffer>(receiver) otherwise
ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver,
- 'get SharedArrayBuffer.prototype.byteLength', receiver);
- // 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ // 3. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
if (!IsSharedArrayBuffer(o)) {
ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver,
- 'get SharedArrayBuffer.prototype.byteLength', receiver);
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 4. If IsResizableArrayBuffer(O) is true, throw a TypeError exception.
+ if (IsResizableArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 5. Let length be O.[[ArrayBufferByteLength]].
+ const length = o.byte_length;
+ // 6. Return length.
+ return Convert<Number>(length);
+}
+
+// #sec-get-resizablearraybuffer.prototype.bytelength
+transitioning javascript builtin ResizableArrayBufferPrototypeGetByteLength(
+ js-implicit context: NativeContext, receiver: JSAny)(): Number {
+ // 1. Let O be the this value.
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]).
+ const functionName = 'get ResizableArrayBuffer.prototype.byteLength';
+ const o = Cast<JSArrayBuffer>(receiver) otherwise
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ if (!IsResizableArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception.
+ if (IsSharedArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
}
// 4. Let length be O.[[ArrayBufferByteLength]].
const length = o.byte_length;
@@ -48,6 +80,55 @@ transitioning javascript builtin SharedArrayBufferPrototypeGetByteLength(
return Convert<Number>(length);
}
+// #sec-get-resizablearraybuffer.prototype.maxbytelength
+transitioning javascript builtin ResizableArrayBufferPrototypeGetMaxByteLength(
+ js-implicit context: NativeContext, receiver: JSAny)(): Number {
+ // 1. Let O be the this value.
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]).
+ const functionName = 'get ResizableArrayBuffer.prototype.maxByteLength';
+ const o = Cast<JSArrayBuffer>(receiver) otherwise
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ if (!IsResizableArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception.
+ if (IsSharedArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 4. Let length be O.[[ArrayBufferMaxByteLength]].
+ const length = o.max_byte_length;
+ // 5. Return length.
+ return Convert<Number>(length);
+}
+
+// #sec-get-growablesharedarraybuffer.prototype.maxbytelength
+transitioning javascript builtin
+GrowableSharedArrayBufferPrototypeGetMaxByteLength(
+ js-implicit context: NativeContext, receiver: JSAny)(): Number {
+ // 1. Let O be the this value.
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]).
+ const functionName = 'get GrowableSharedArrayBuffer.prototype.maxByteLength';
+ const o = Cast<JSArrayBuffer>(receiver) otherwise
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ if (!IsResizableArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
+ if (!IsSharedArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 4. Let length be O.[[ArrayBufferMaxByteLength]].
+ const length = o.max_byte_length;
+ // 5. Return length.
+ return Convert<Number>(length);
+}
+
// #sec-arraybuffer.isview
transitioning javascript builtin ArrayBufferIsView(arg: JSAny): Boolean {
// 1. If Type(arg) is not Object, return false.
diff --git a/chromium/v8/src/builtins/base.tq b/chromium/v8/src/builtins/base.tq
index 08639c04daf..fc84e1a2ce4 100644
--- a/chromium/v8/src/builtins/base.tq
+++ b/chromium/v8/src/builtins/base.tq
@@ -141,6 +141,7 @@ intrinsic %MakeLazy<T: type, A1: type, A2: type, A3: type>(
// template, but Torque doesn't understand how to use templates for extern
// macros, so just add whatever overload definitions you need here.
extern macro RunLazy(Lazy<Smi>): Smi;
+extern macro RunLazy(Lazy<JSAny>): JSAny;
// A Smi value containing a bitfield struct as its integer data.
@useParentTypeChecker type SmiTagged<T : type extends uint31> extends Smi;
@@ -262,6 +263,8 @@ extern enum UpdateFeedbackMode { kOptionalFeedback, kGuaranteedFeedback }
extern operator '==' macro UpdateFeedbackModeEqual(
constexpr UpdateFeedbackMode, constexpr UpdateFeedbackMode): constexpr bool;
+extern enum CallFeedbackContent extends int32 { kTarget, kReceiver }
+
extern enum UnicodeEncoding { UTF16, UTF32 }
// Promise constants
@@ -961,6 +964,8 @@ extern operator '|' macro ConstexprWord32Or(
constexpr int32, constexpr int32): constexpr int32;
extern operator '^' macro Word32Xor(int32, int32): int32;
extern operator '^' macro Word32Xor(uint32, uint32): uint32;
+extern operator '<<' macro ConstexprWord32Shl(
+ constexpr uint32, constexpr int32): uint32;
extern operator '==' macro Word64Equal(int64, int64): bool;
extern operator '==' macro Word64Equal(uint64, uint64): bool;
@@ -1296,6 +1301,9 @@ macro GetFastAliasedArgumentsMap(implicit context: Context)(): Map {
macro GetWeakCellMap(implicit context: Context)(): Map {
return %GetClassMapConstant<WeakCell>();
}
+macro GetPrototypeApplyFunction(implicit context: Context)(): JSFunction {
+ return *NativeContextSlot(ContextSlot::FUNCTION_PROTOTYPE_APPLY_INDEX);
+}
// Call(Context, Target, Receiver, ...Args)
// TODO(joshualitt): Assuming the context parameter is for throwing when Target
@@ -1689,7 +1697,7 @@ extern transitioning runtime SetOwnPropertyIgnoreAttributes(
namespace runtime {
extern runtime
-GetDerivedMap(Context, JSFunction, JSReceiver): Map;
+GetDerivedMap(Context, JSFunction, JSReceiver, JSAny): Map;
}
extern macro IsDeprecatedMap(Map): bool;
diff --git a/chromium/v8/src/builtins/builtins-api.cc b/chromium/v8/src/builtins/builtins-api.cc
index 35e6cc393cb..b39bfc84a55 100644
--- a/chromium/v8/src/builtins/builtins-api.cc
+++ b/chromium/v8/src/builtins/builtins-api.cc
@@ -23,8 +23,7 @@ namespace {
// TODO(dcarney): CallOptimization duplicates this logic, merge.
JSReceiver GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo info,
JSReceiver receiver) {
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kGetCompatibleReceiver);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kGetCompatibleReceiver);
Object recv_type = info.signature();
// No signature, return holder.
if (!recv_type.IsFunctionTemplateInfo()) return receiver;
@@ -171,8 +170,7 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
Handle<Object> receiver,
int argc, Handle<Object> args[],
Handle<HeapObject> new_target) {
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kInvokeApiFunction);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kInvokeApiFunction);
DCHECK(function->IsFunctionTemplateInfo() ||
(function->IsJSFunction() &&
JSFunction::cast(*function).shared().IsApiFunction()));
diff --git a/chromium/v8/src/builtins/builtins-array-gen.cc b/chromium/v8/src/builtins/builtins-array-gen.cc
index 6b522fda6c0..833627c7b41 100644
--- a/chromium/v8/src/builtins/builtins-array-gen.cc
+++ b/chromium/v8/src/builtins/builtins-array-gen.cc
@@ -10,6 +10,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/execution/frame-constants.h"
#include "src/heap/factory-inl.h"
#include "src/objects/allocation-site-inl.h"
diff --git a/chromium/v8/src/builtins/builtins-array.cc b/chromium/v8/src/builtins/builtins-array.cc
index d3bbd980a55..6fe1bfc712f 100644
--- a/chromium/v8/src/builtins/builtins-array.cc
+++ b/chromium/v8/src/builtins/builtins-array.cc
@@ -173,7 +173,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> SetLengthProperty(
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
if (!JSArray::HasReadOnlyLength(array)) {
DCHECK_LE(length, kMaxUInt32);
- JSArray::SetLength(array, static_cast<uint32_t>(length));
+ MAYBE_RETURN_NULL(
+ JSArray::SetLength(array, static_cast<uint32_t>(length)));
return receiver;
}
}
@@ -207,16 +208,16 @@ V8_WARN_UNUSED_RESULT Object GenericArrayFill(Isolate* isolate,
return *receiver;
}
-V8_WARN_UNUSED_RESULT bool TryFastArrayFill(
+V8_WARN_UNUSED_RESULT Maybe<bool> TryFastArrayFill(
Isolate* isolate, BuiltinArguments* args, Handle<JSReceiver> receiver,
Handle<Object> value, double start_index, double end_index) {
// If indices are too large, use generic path since they are stored as
// properties, not in the element backing store.
- if (end_index > kMaxUInt32) return false;
- if (!receiver->IsJSObject()) return false;
+ if (end_index > kMaxUInt32) return Just(false);
+ if (!receiver->IsJSObject()) return Just(false);
if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, args, 1, 1)) {
- return false;
+ return Just(false);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -240,14 +241,14 @@ V8_WARN_UNUSED_RESULT bool TryFastArrayFill(
CHECK(DoubleToUint32IfEqualToSelf(end_index, &end));
ElementsAccessor* accessor = array->GetElementsAccessor();
- accessor->Fill(array, value, start, end);
- return true;
+ RETURN_ON_EXCEPTION_VALUE(isolate, accessor->Fill(array, value, start, end),
+ Nothing<bool>());
+ return Just(true);
}
} // namespace
BUILTIN(ArrayPrototypeFill) {
HandleScope scope(isolate);
-
if (isolate->debug_execution_mode() == DebugInfo::kSideEffects) {
if (!isolate->debug()->PerformSideEffectCheckForObject(args.receiver())) {
return ReadOnlyRoots(isolate).exception();
@@ -292,10 +293,12 @@ BUILTIN(ArrayPrototypeFill) {
Handle<Object> value = args.atOrUndefined(isolate, 1);
- if (TryFastArrayFill(isolate, &args, receiver, value, start_index,
- end_index)) {
- return *receiver;
- }
+ bool success;
+ MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, success,
+ TryFastArrayFill(isolate, &args, receiver, value, start_index,
+ end_index));
+ if (success) return *receiver;
return GenericArrayFill(isolate, receiver, value, start_index, end_index);
}
@@ -385,7 +388,9 @@ BUILTIN(ArrayPush) {
}
ElementsAccessor* accessor = array->GetElementsAccessor();
- uint32_t new_length = accessor->Push(array, &args, to_add);
+ uint32_t new_length;
+ MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, new_length, accessor->Push(array, &args, to_add));
return *isolate->factory()->NewNumberFromUint((new_length));
}
@@ -468,7 +473,8 @@ BUILTIN(ArrayPop) {
Handle<Object> result;
if (IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
// Fast Elements Path
- result = array->GetElementsAccessor()->Pop(array);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, array->GetElementsAccessor()->Pop(array));
} else {
// Use Slow Lookup otherwise
uint32_t new_length = len - 1;
@@ -483,7 +489,9 @@ BUILTIN(ArrayPop) {
isolate->factory()->length_string(),
Object::TypeOf(isolate, array), array));
}
- JSArray::SetLength(array, new_length);
+ bool set_len_ok;
+ MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, set_len_ok, JSArray::SetLength(array, new_length));
}
return *result;
@@ -595,7 +603,8 @@ BUILTIN(ArrayShift) {
if (CanUseFastArrayShift(isolate, receiver)) {
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- return *array->GetElementsAccessor()->Shift(array);
+ RETURN_RESULT_OR_FAILURE(isolate,
+ array->GetElementsAccessor()->Shift(array));
}
return GenericArrayShift(isolate, receiver, length);
@@ -623,7 +632,9 @@ BUILTIN(ArrayUnshift) {
DCHECK(!JSArray::HasReadOnlyLength(array));
ElementsAccessor* accessor = array->GetElementsAccessor();
- int new_length = accessor->Unshift(array, &args, to_add);
+ uint32_t new_length;
+ MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, new_length, accessor->Unshift(array, &args, to_add));
return Smi::FromInt(new_length);
}
@@ -742,7 +753,7 @@ class ArrayConcatVisitor {
array, fast_elements() ? HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
array->set_length(*length);
array->set_elements(*storage_fixed_array());
- array->synchronized_set_map(*map);
+ array->set_map(*map, kReleaseStore);
return array;
}
@@ -880,9 +891,11 @@ uint32_t EstimateElementCount(Isolate* isolate, Handle<JSArray> array) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
// External arrays are always dense.
return length;
+
+#undef TYPED_ARRAY_CASE
case NO_ELEMENTS:
return 0;
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -956,9 +969,7 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
}
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- {
+ TYPED_ARRAYS(TYPED_ARRAY_CASE) {
size_t length = Handle<JSTypedArray>::cast(object)->length();
if (range <= length) {
length = range;
@@ -974,6 +985,11 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
if (length == range) return; // All indices accounted for already.
break;
}
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ // TODO(v8:11111): Support RAB / GSAB.
+ UNREACHABLE();
+
+#undef TYPED_ARRAY_CASE
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
DisallowGarbageCollection no_gc;
@@ -1199,8 +1215,11 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
break;
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
return IterateElementsSlow(isolate, receiver, length, visitor);
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ // TODO(v8:11111): Support RAB / GSAB.
+ UNREACHABLE();
+#undef TYPED_ARRAY_CASE
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
// |array| is guaranteed to be an array or typed array.
diff --git a/chromium/v8/src/builtins/builtins-arraybuffer.cc b/chromium/v8/src/builtins/builtins-arraybuffer.cc
index 0f5f9051861..2d07847d570 100644
--- a/chromium/v8/src/builtins/builtins-arraybuffer.cc
+++ b/chromium/v8/src/builtins/builtins-arraybuffer.cc
@@ -23,17 +23,43 @@ namespace internal {
name)); \
}
+#define CHECK_RESIZABLE(expected, name, method) \
+ if (name->is_resizable() != expected) { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, \
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, \
+ isolate->factory()->NewStringFromAsciiChecked(method), \
+ name)); \
+ }
+
// -----------------------------------------------------------------------------
// ES#sec-arraybuffer-objects
namespace {
+bool RoundUpToPageSize(size_t byte_length, size_t page_size,
+ size_t max_allowed_byte_length, size_t* pages) {
+ size_t bytes_wanted = RoundUp(byte_length, page_size);
+ if (bytes_wanted > max_allowed_byte_length) {
+ return false;
+ }
+ *pages = bytes_wanted / page_size;
+ return true;
+}
+
Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
Handle<JSReceiver> new_target, Handle<Object> length,
- InitializedFlag initialized) {
- SharedFlag shared = (*target != target->native_context().array_buffer_fun())
- ? SharedFlag::kShared
- : SharedFlag::kNotShared;
+ Handle<Object> max_length, InitializedFlag initialized) {
+ SharedFlag shared =
+ (*target != target->native_context().array_buffer_fun() &&
+ *target != target->native_context().resizable_array_buffer_fun())
+ ? SharedFlag::kShared
+ : SharedFlag::kNotShared;
+ ResizableFlag resizable =
+ (*target == target->native_context().resizable_array_buffer_fun() ||
+ *target == target->native_context().growable_shared_array_buffer_fun())
+ ? ResizableFlag::kResizable
+ : ResizableFlag::kNotResizable;
Handle<JSObject> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -42,9 +68,10 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
// Ensure that all fields are initialized because BackingStore::Allocate is
// allowed to GC. Note that we cannot move the allocation of the ArrayBuffer
// after BackingStore::Allocate because of the spec.
- array_buffer->Setup(shared, nullptr);
+ array_buffer->Setup(shared, resizable, nullptr);
size_t byte_length;
+ size_t max_byte_length = 0;
if (!TryNumberToSize(*length, &byte_length) ||
byte_length > JSArrayBuffer::kMaxByteLength) {
// ToNumber failed.
@@ -52,8 +79,46 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
- auto backing_store =
- BackingStore::Allocate(isolate, byte_length, shared, initialized);
+ std::unique_ptr<BackingStore> backing_store;
+ if (resizable == ResizableFlag::kNotResizable) {
+ backing_store =
+ BackingStore::Allocate(isolate, byte_length, shared, initialized);
+ } else {
+ Handle<Object> number_max_length;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_max_length,
+ Object::ToInteger(isolate, max_length));
+
+ if (!TryNumberToSize(*number_max_length, &max_byte_length)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength));
+ }
+ if (byte_length > max_byte_length) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength));
+ }
+
+ size_t page_size = AllocatePageSize();
+ size_t initial_pages;
+ if (!RoundUpToPageSize(byte_length, page_size,
+ JSArrayBuffer::kMaxByteLength, &initial_pages)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+ }
+
+ size_t max_pages;
+ if (!RoundUpToPageSize(max_byte_length, page_size,
+ JSArrayBuffer::kMaxByteLength, &max_pages)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength));
+ }
+ constexpr bool kIsWasmMemory = false;
+ backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory(
+ isolate, byte_length, page_size, initial_pages, max_pages,
+ kIsWasmMemory, shared);
+ }
if (!backing_store) {
// Allocation of backing store failed.
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -61,6 +126,7 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
}
array_buffer->Attach(std::move(backing_store));
+ array_buffer->set_max_byte_length(max_byte_length);
return *array_buffer;
}
@@ -71,7 +137,10 @@ BUILTIN(ArrayBufferConstructor) {
HandleScope scope(isolate);
Handle<JSFunction> target = args.target();
DCHECK(*target == target->native_context().array_buffer_fun() ||
- *target == target->native_context().shared_array_buffer_fun());
+ *target == target->native_context().shared_array_buffer_fun() ||
+ *target == target->native_context().resizable_array_buffer_fun() ||
+ *target ==
+ target->native_context().growable_shared_array_buffer_fun());
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
@@ -87,10 +156,11 @@ BUILTIN(ArrayBufferConstructor) {
if (number_length->Number() < 0.0) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
- }
+ }
- return ConstructBuffer(isolate, target, new_target, number_length,
- InitializedFlag::kZeroInitialized);
+ Handle<Object> max_length = args.atOrUndefined(isolate, 2);
+ return ConstructBuffer(isolate, target, new_target, number_length, max_length,
+ InitializedFlag::kZeroInitialized);
}
// This is a helper to construct an ArrayBuffer with uinitialized memory.
@@ -101,7 +171,7 @@ BUILTIN(ArrayBufferConstructor_DoNotInitialize) {
Handle<JSFunction> target(isolate->native_context()->array_buffer_fun(),
isolate);
Handle<Object> length = args.atOrUndefined(isolate, 1);
- return ConstructBuffer(isolate, target, target, length,
+ return ConstructBuffer(isolate, target, target, length, Handle<Object>(),
InitializedFlag::kUninitialized);
}
@@ -119,6 +189,8 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
// * [SAB] If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
CHECK_SHARED(is_shared, array_buffer, kMethodName);
+ CHECK_RESIZABLE(false, array_buffer, kMethodName);
+
// * [AB] If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
if (!is_shared && array_buffer->was_detached()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -280,5 +352,158 @@ BUILTIN(ArrayBufferPrototypeSlice) {
return SliceHelper(args, isolate, kMethodName, false);
}
+static Object ResizeHelper(BuiltinArguments args, Isolate* isolate,
+ const char* kMethodName, bool is_shared) {
+ HandleScope scope(isolate);
+
+ // 1 Let O be the this value.
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]).
+ CHECK_RECEIVER(JSArrayBuffer, array_buffer, kMethodName);
+ CHECK_RESIZABLE(true, array_buffer, kMethodName);
+
+ // [RAB] 3. If IsSharedArrayBuffer(O) is true, throw a *TypeError* exception
+ // [GSAB] 3. If IsSharedArrayBuffer(O) is false, throw a *TypeError* exception
+ CHECK_SHARED(is_shared, array_buffer, kMethodName);
+
+ // Let newByteLength to ? ToIntegerOrInfinity(newLength).
+ Handle<Object> new_length = args.at(1);
+ Handle<Object> number_new_byte_length;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_new_byte_length,
+ Object::ToInteger(isolate, new_length));
+
+ // [RAB] If IsDetachedBuffer(O) is true, throw a TypeError exception.
+ if (!is_shared && array_buffer->was_detached()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDetachedOperation,
+ isolate->factory()->NewStringFromAsciiChecked(
+ kMethodName)));
+ }
+
+ // [RAB] If newByteLength < 0 or newByteLength >
+ // O.[[ArrayBufferMaxByteLength]], throw a RangeError exception.
+
+ // [GSAB] If newByteLength < currentByteLength or newByteLength >
+ // O.[[ArrayBufferMaxByteLength]], throw a RangeError exception.
+ size_t new_byte_length;
+ if (!TryNumberToSize(*number_new_byte_length, &new_byte_length)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferResizeLength,
+ isolate->factory()->NewStringFromAsciiChecked(
+ kMethodName)));
+ }
+
+ if (is_shared && new_byte_length < array_buffer->byte_length()) {
+ // GrowableSharedArrayBuffer is only allowed to grow.
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferResizeLength,
+ isolate->factory()->NewStringFromAsciiChecked(
+ kMethodName)));
+ }
+
+ if (new_byte_length > array_buffer->max_byte_length()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferResizeLength,
+ isolate->factory()->NewStringFromAsciiChecked(
+ kMethodName)));
+ }
+
+ size_t page_size = AllocatePageSize();
+ size_t new_committed_pages;
+ bool round_return_value =
+ RoundUpToPageSize(new_byte_length, page_size,
+ JSArrayBuffer::kMaxByteLength, &new_committed_pages);
+ CHECK(round_return_value);
+
+ // [RAB] Let hostHandled be ? HostResizeArrayBuffer(O, newByteLength).
+ // [GSAB] Let hostHandled be ? HostGrowArrayBuffer(O, newByteLength).
+ // If hostHandled is handled, return undefined.
+
+ // TODO(v8:11111): Wasm integration.
+
+ if (!is_shared) {
+ // [RAB] Let oldBlock be O.[[ArrayBufferData]].
+ // [RAB] Let newBlock be ? CreateByteDataBlock(newByteLength).
+ // [RAB] Let copyLength be min(newByteLength, O.[[ArrayBufferByteLength]]).
+ // [RAB] Perform CopyDataBlockBytes(newBlock, 0, oldBlock, 0, copyLength).
+ // [RAB] NOTE: Neither creation of the new Data Block nor copying from the
+ // old Data Block are observable. Implementations reserve the right to
+ // implement this method as in-place growth or shrinkage.
+ if (array_buffer->GetBackingStore()->ResizeInPlace(
+ isolate, new_byte_length, new_committed_pages * page_size) !=
+ BackingStore::ResizeOrGrowResult::kSuccess) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kOutOfMemory,
+ isolate->factory()->NewStringFromAsciiChecked(
+ kMethodName)));
+ }
+ // [RAB] Set O.[[ArrayBufferByteLength]] to newLength.
+ array_buffer->set_byte_length(new_byte_length);
+ } else {
+ // [GSAB] (Detailed description of the algorithm omitted.)
+ auto result = array_buffer->GetBackingStore()->GrowInPlace(
+ isolate, new_byte_length, new_committed_pages * page_size);
+ if (result == BackingStore::ResizeOrGrowResult::kFailure) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kOutOfMemory,
+ isolate->factory()->NewStringFromAsciiChecked(
+ kMethodName)));
+ }
+ if (result == BackingStore::ResizeOrGrowResult::kRace) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewRangeError(
+ MessageTemplate::kInvalidArrayBufferResizeLength,
+ isolate->factory()->NewStringFromAsciiChecked(kMethodName)));
+ }
+ // Invariant: byte_length for a GSAB is 0 (it needs to be read from the
+ // BackingStore).
+ CHECK_EQ(0, array_buffer->byte_length());
+ }
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+// ES #sec-get-growablesharedarraybuffer.prototype.bytelength
+// get GrowableSharedArrayBuffer.prototype.byteLength
+BUILTIN(GrowableSharedArrayBufferPrototypeGetByteLength) {
+ const char* const kMethodName =
+ "get GrowableSharedArrayBuffer.prototype.byteLength";
+ HandleScope scope(isolate);
+
+ // 1. Let O be the this value.
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxLength]]).
+ CHECK_RECEIVER(JSArrayBuffer, array_buffer, kMethodName);
+ CHECK_RESIZABLE(true, array_buffer, kMethodName);
+ // 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
+ CHECK_SHARED(true, array_buffer, kMethodName);
+
+ // 4. Let length be ArrayBufferByteLength(O, SeqCst).
+
+ // Invariant: byte_length for GSAB is 0 (it needs to be read from the
+ // BackingStore).
+ DCHECK_EQ(0, array_buffer->byte_length());
+
+ size_t byte_length =
+ array_buffer->GetBackingStore()->byte_length(std::memory_order_seq_cst);
+
+ // 5. Return length.
+ return *isolate->factory()->NewNumberFromSize(byte_length);
+}
+
+// ES #sec-resizablearraybuffer.prototype.resize
+// ResizableArrayBuffer.prototype.resize(new_size))
+BUILTIN(ResizableArrayBufferPrototypeResize) {
+ const char* const kMethodName = "ResizableArrayBuffer.prototype.resize";
+ constexpr bool kIsShared = false;
+ return ResizeHelper(args, isolate, kMethodName, kIsShared);
+}
+
+// ES #sec-growablesharedarraybuffer.prototype.grow
+// GrowableSharedArrayBuffer.prototype.grow(new_size))
+BUILTIN(GrowableSharedArrayBufferPrototypeGrow) {
+ const char* const kMethodName = "GrowableSharedArrayBuffer.prototype.grow";
+ constexpr bool kIsShared = true;
+ return ResizeHelper(args, isolate, kMethodName, kIsShared);
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/builtins/builtins-async-function-gen.cc b/chromium/v8/src/builtins/builtins-async-function-gen.cc
index 49b00caa048..1644997ed01 100644
--- a/chromium/v8/src/builtins/builtins-async-function-gen.cc
+++ b/chromium/v8/src/builtins/builtins-async-function-gen.cc
@@ -157,12 +157,14 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
StoreObjectFieldNoWriteBarrier(
async_function_object, JSAsyncFunctionObject::kPromiseOffset, promise);
+ RunContextPromiseHookInit(context, promise, UndefinedConstant());
+
// Fire promise hooks if enabled and push the Promise under construction
// in an async function on the catch prediction stack to handle exceptions
// thrown before the first await.
Label if_instrumentation(this, Label::kDeferred),
if_instrumentation_done(this);
- Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
+ Branch(IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
&if_instrumentation, &if_instrumentation_done);
BIND(&if_instrumentation);
{
diff --git a/chromium/v8/src/builtins/builtins-async-gen.cc b/chromium/v8/src/builtins/builtins-async-gen.cc
index 9ee6037b2bd..629f1e94fa4 100644
--- a/chromium/v8/src/builtins/builtins-async-gen.cc
+++ b/chromium/v8/src/builtins/builtins-async-gen.cc
@@ -97,18 +97,11 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
TVARIABLE(HeapObject, var_throwaway, UndefinedConstant());
- // Deal with PromiseHooks and debug support in the runtime. This
- // also allocates the throwaway promise, which is only needed in
- // case of PromiseHooks or debugging.
- Label if_debugging(this, Label::kDeferred), do_resolve_promise(this);
- Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
- &if_debugging, &do_resolve_promise);
- BIND(&if_debugging);
- var_throwaway =
- CAST(CallRuntime(Runtime::kAwaitPromisesInitOld, context, value, promise,
- outer_promise, on_reject, is_predicted_as_caught));
- Goto(&do_resolve_promise);
- BIND(&do_resolve_promise);
+ RunContextPromiseHookInit(context, promise, outer_promise);
+
+ InitAwaitPromise(Runtime::kAwaitPromisesInitOld, context, value, promise,
+ outer_promise, on_reject, is_predicted_as_caught,
+ &var_throwaway);
// Perform ! Call(promiseCapability.[[Resolve]], undefined, « promise »).
CallBuiltin(Builtins::kResolvePromise, context, promise, value);
@@ -168,21 +161,46 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOptimized(
TVARIABLE(HeapObject, var_throwaway, UndefinedConstant());
+ InitAwaitPromise(Runtime::kAwaitPromisesInit, context, promise, promise,
+ outer_promise, on_reject, is_predicted_as_caught,
+ &var_throwaway);
+
+ return CallBuiltin(Builtins::kPerformPromiseThen, native_context, promise,
+ on_resolve, on_reject, var_throwaway.value());
+}
+
+void AsyncBuiltinsAssembler::InitAwaitPromise(
+ Runtime::FunctionId id, TNode<Context> context, TNode<Object> value,
+ TNode<Object> promise, TNode<Object> outer_promise,
+ TNode<HeapObject> on_reject, TNode<Oddball> is_predicted_as_caught,
+ TVariable<HeapObject>* var_throwaway) {
// Deal with PromiseHooks and debug support in the runtime. This
// also allocates the throwaway promise, which is only needed in
// case of PromiseHooks or debugging.
- Label if_debugging(this, Label::kDeferred), do_perform_promise_then(this);
- Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
- &if_debugging, &do_perform_promise_then);
+ Label if_debugging(this, Label::kDeferred),
+ if_promise_hook(this, Label::kDeferred),
+ not_debugging(this),
+ do_nothing(this);
+ TNode<Uint32T> promiseHookFlags = PromiseHookFlags();
+ Branch(IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
+ promiseHookFlags), &if_debugging, &not_debugging);
BIND(&if_debugging);
- var_throwaway =
- CAST(CallRuntime(Runtime::kAwaitPromisesInit, context, promise, promise,
+ *var_throwaway =
+ CAST(CallRuntime(id, context, value, promise,
outer_promise, on_reject, is_predicted_as_caught));
- Goto(&do_perform_promise_then);
- BIND(&do_perform_promise_then);
-
- return CallBuiltin(Builtins::kPerformPromiseThen, native_context, promise,
- on_resolve, on_reject, var_throwaway.value());
+ Goto(&do_nothing);
+ BIND(&not_debugging);
+
+ // This call to NewJSPromise is to keep behaviour parity with what happens
+ // in Runtime::kAwaitPromisesInit above if native hooks are set. It will
+ // create a throwaway promise that will trigger an init event and will get
+ // passed into Builtins::kPerformPromiseThen below.
+ Branch(IsContextPromiseHookEnabled(promiseHookFlags), &if_promise_hook,
+ &do_nothing);
+ BIND(&if_promise_hook);
+ *var_throwaway = NewJSPromise(context, promise);
+ Goto(&do_nothing);
+ BIND(&do_nothing);
}
TNode<Object> AsyncBuiltinsAssembler::Await(
diff --git a/chromium/v8/src/builtins/builtins-async-gen.h b/chromium/v8/src/builtins/builtins-async-gen.h
index 833e78d45d5..34b7a0ce1d6 100644
--- a/chromium/v8/src/builtins/builtins-async-gen.h
+++ b/chromium/v8/src/builtins/builtins-async-gen.h
@@ -62,6 +62,12 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
TNode<SharedFunctionInfo> on_resolve_sfi,
TNode<SharedFunctionInfo> on_reject_sfi,
TNode<Oddball> is_predicted_as_caught);
+
+ void InitAwaitPromise(
+ Runtime::FunctionId id, TNode<Context> context, TNode<Object> value,
+ TNode<Object> promise, TNode<Object> outer_promise,
+ TNode<HeapObject> on_reject, TNode<Oddball> is_predicted_as_caught,
+ TVariable<HeapObject>* var_throwaway);
};
} // namespace internal
diff --git a/chromium/v8/src/builtins/builtins-async-generator-gen.cc b/chromium/v8/src/builtins/builtins-async-generator-gen.cc
index 03df9e307c7..0e94fd20939 100644
--- a/chromium/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/chromium/v8/src/builtins/builtins-async-generator-gen.cc
@@ -518,7 +518,7 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
// the "promiseResolve" hook would not be fired otherwise.
Label if_fast(this), if_slow(this, Label::kDeferred), return_promise(this);
GotoIfForceSlowPath(&if_slow);
- GotoIf(IsPromiseHookEnabled(), &if_slow);
+ GotoIf(IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(), &if_slow);
Branch(IsPromiseThenProtectorCellInvalid(), &if_slow, &if_fast);
BIND(&if_fast);
diff --git a/chromium/v8/src/builtins/builtins-call-gen.cc b/chromium/v8/src/builtins/builtins-call-gen.cc
index 664f57aadb2..89bf77d0b07 100644
--- a/chromium/v8/src/builtins/builtins-call-gen.cc
+++ b/chromium/v8/src/builtins/builtins-call-gen.cc
@@ -64,38 +64,45 @@ void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
masm->isolate()->builtins()->CallFunction());
}
+// TODO(cbruni): Try reusing code between builtin versions to avoid binary
+// overhead.
+TF_BUILTIN(Call_ReceiverIsNullOrUndefined_Baseline_Compact,
+ CallOrConstructBuiltinsAssembler) {
+ auto receiver = UndefinedConstant();
+ CallReceiver<Descriptor>(Builtins::kCall_ReceiverIsNullOrUndefined, receiver);
+}
+
TF_BUILTIN(Call_ReceiverIsNullOrUndefined_Baseline,
CallOrConstructBuiltinsAssembler) {
- auto target = Parameter<Object>(Descriptor::kFunction);
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
- auto context = LoadContextFromBaseline();
- auto feedback_vector = LoadFeedbackVectorFromBaseline();
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
- TailCallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined, context, target,
- argc);
+ auto receiver = UndefinedConstant();
+ CallReceiver<Descriptor>(Builtins::kCall_ReceiverIsNullOrUndefined, argc,
+ slot, receiver);
+}
+
+TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,
+ CallOrConstructBuiltinsAssembler) {
+ CallReceiver<Descriptor>(Builtins::kCall_ReceiverIsNotNullOrUndefined);
}
TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_Baseline,
CallOrConstructBuiltinsAssembler) {
- auto target = Parameter<Object>(Descriptor::kFunction);
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
- auto context = LoadContextFromBaseline();
- auto feedback_vector = LoadFeedbackVectorFromBaseline();
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
- TailCallBuiltin(Builtins::kCall_ReceiverIsNotNullOrUndefined, context, target,
- argc);
+ CallReceiver<Descriptor>(Builtins::kCall_ReceiverIsNotNullOrUndefined, argc,
+ slot);
+}
+
+TF_BUILTIN(Call_ReceiverIsAny_Baseline_Compact,
+ CallOrConstructBuiltinsAssembler) {
+ CallReceiver<Descriptor>(Builtins::kCall_ReceiverIsAny);
}
TF_BUILTIN(Call_ReceiverIsAny_Baseline, CallOrConstructBuiltinsAssembler) {
- auto target = Parameter<Object>(Descriptor::kFunction);
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
- auto context = LoadContextFromBaseline();
- auto feedback_vector = LoadFeedbackVectorFromBaseline();
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
- TailCallBuiltin(Builtins::kCall_ReceiverIsAny, context, target, argc);
+ CallReceiver<Descriptor>(Builtins::kCall_ReceiverIsAny, argc, slot);
}
TF_BUILTIN(Call_ReceiverIsNullOrUndefined_WithFeedback,
@@ -105,7 +112,9 @@ TF_BUILTIN(Call_ReceiverIsNullOrUndefined_WithFeedback,
auto context = Parameter<Context>(Descriptor::kContext);
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ CollectCallFeedback(
+ target, [=] { return receiver; }, context, feedback_vector, slot);
TailCallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined, context, target,
argc);
}
@@ -117,7 +126,9 @@ TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_WithFeedback,
auto context = Parameter<Context>(Descriptor::kContext);
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ CollectCallFeedback(
+ target, [=] { return receiver; }, context, feedback_vector, slot);
TailCallBuiltin(Builtins::kCall_ReceiverIsNotNullOrUndefined, context, target,
argc);
}
@@ -128,7 +139,9 @@ TF_BUILTIN(Call_ReceiverIsAny_WithFeedback, CallOrConstructBuiltinsAssembler) {
auto context = Parameter<Context>(Descriptor::kContext);
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ CollectCallFeedback(
+ target, [=] { return receiver; }, context, feedback_vector, slot);
TailCallBuiltin(Builtins::kCall_ReceiverIsAny, context, target, argc);
}
@@ -449,6 +462,43 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
}
}
+template <class Descriptor>
+void CallOrConstructBuiltinsAssembler::CallReceiver(
+ Builtins::Name id, base::Optional<TNode<Object>> receiver) {
+ static_assert(std::is_same<Descriptor,
+ CallTrampoline_Baseline_CompactDescriptor>::value,
+ "Incompatible Descriptor");
+ auto bitfield = UncheckedParameter<Word32T>(Descriptor::kBitField);
+ TNode<Int32T> argc =
+ Signed(DecodeWord32<
+ CallTrampoline_Baseline_CompactDescriptor::ArgumentCountField>(
+ bitfield));
+ TNode<UintPtrT> slot = ChangeUint32ToWord(
+ DecodeWord32<CallTrampoline_Baseline_CompactDescriptor::SlotField>(
+ bitfield));
+ CallReceiver<Descriptor>(id, argc, slot, receiver);
+}
+
+template <class Descriptor>
+void CallOrConstructBuiltinsAssembler::CallReceiver(
+ Builtins::Name id, TNode<Int32T> argc, TNode<UintPtrT> slot,
+ base::Optional<TNode<Object>> maybe_receiver) {
+ auto target = Parameter<Object>(Descriptor::kFunction);
+ auto context = LoadContextFromBaseline();
+ auto feedback_vector = LoadFeedbackVectorFromBaseline();
+ LazyNode<Object> receiver = [=] {
+ if (maybe_receiver) {
+ return *maybe_receiver;
+ } else {
+ CodeStubArguments args(this, argc);
+ return args.GetReceiver();
+ }
+ };
+
+ CollectCallFeedback(target, receiver, context, feedback_vector, slot);
+ TailCallBuiltin(id, context, target, argc);
+}
+
TF_BUILTIN(CallWithArrayLike, CallOrConstructBuiltinsAssembler) {
auto target = Parameter<Object>(Descriptor::kTarget);
base::Optional<TNode<Object>> new_target = base::nullopt;
@@ -464,7 +514,9 @@ TF_BUILTIN(CallWithArrayLike_WithFeedback, CallOrConstructBuiltinsAssembler) {
auto context = Parameter<Context>(Descriptor::kContext);
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ CollectCallFeedback(
+ target, [=] { return receiver; }, context, feedback_vector, slot);
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
}
@@ -485,7 +537,10 @@ TF_BUILTIN(CallWithSpread_Baseline, CallOrConstructBuiltinsAssembler) {
auto context = LoadContextFromBaseline();
auto feedback_vector = LoadFeedbackVectorFromBaseline();
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
+ CodeStubArguments args(this, args_count);
+ CollectCallFeedback(
+ target, [=] { return args.GetReceiver(); }, context, feedback_vector,
+ slot);
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
@@ -497,7 +552,9 @@ TF_BUILTIN(CallWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
auto context = Parameter<Context>(Descriptor::kContext);
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ CollectCallFeedback(
+ target, [=] { return receiver; }, context, feedback_vector, slot);
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
diff --git a/chromium/v8/src/builtins/builtins-call-gen.h b/chromium/v8/src/builtins/builtins-call-gen.h
index c938662d5e5..ff4d998ff3a 100644
--- a/chromium/v8/src/builtins/builtins-call-gen.h
+++ b/chromium/v8/src/builtins/builtins-call-gen.h
@@ -30,6 +30,13 @@ class CallOrConstructBuiltinsAssembler : public CodeStubAssembler {
TNode<Object> spread, TNode<Int32T> args_count,
TNode<Context> context);
+ template <class Descriptor>
+ void CallReceiver(Builtins::Name id,
+ base::Optional<TNode<Object>> = base::nullopt);
+ template <class Descriptor>
+ void CallReceiver(Builtins::Name id, TNode<Int32T> argc, TNode<UintPtrT> slot,
+ base::Optional<TNode<Object>> = base::nullopt);
+
enum class CallFunctionTemplateMode : uint8_t {
kCheckAccess,
kCheckCompatibleReceiver,
diff --git a/chromium/v8/src/builtins/builtins-debug-gen.cc b/chromium/v8/src/builtins/builtins-debug-gen.cc
deleted file mode 100644
index 9d47cf16006..00000000000
--- a/chromium/v8/src/builtins/builtins-debug-gen.cc
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/builtins/builtins-utils.h"
-#include "src/builtins/builtins.h"
-#include "src/debug/debug.h"
-#include "src/objects/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void Builtins::Generate_FrameDropperTrampoline(MacroAssembler* masm) {
- DebugCodegen::GenerateFrameDropperTrampoline(masm);
-}
-
-void Builtins::Generate_HandleDebuggerStatement(MacroAssembler* masm) {
- DebugCodegen::GenerateHandleDebuggerStatement(masm);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/chromium/v8/src/builtins/builtins-definitions.h b/chromium/v8/src/builtins/builtins-definitions.h
index b0e608418eb..78255a30e93 100644
--- a/chromium/v8/src/builtins/builtins-definitions.h
+++ b/chromium/v8/src/builtins/builtins-definitions.h
@@ -50,8 +50,13 @@ namespace internal {
ASM(Call_ReceiverIsNullOrUndefined, CallTrampoline) \
ASM(Call_ReceiverIsNotNullOrUndefined, CallTrampoline) \
ASM(Call_ReceiverIsAny, CallTrampoline) \
+ TFC(Call_ReceiverIsNullOrUndefined_Baseline_Compact, \
+ CallTrampoline_Baseline_Compact) \
TFC(Call_ReceiverIsNullOrUndefined_Baseline, CallTrampoline_Baseline) \
+ TFC(Call_ReceiverIsNotNullOrUndefined_Baseline_Compact, \
+ CallTrampoline_Baseline_Compact) \
TFC(Call_ReceiverIsNotNullOrUndefined_Baseline, CallTrampoline_Baseline) \
+ TFC(Call_ReceiverIsAny_Baseline_Compact, CallTrampoline_Baseline_Compact) \
TFC(Call_ReceiverIsAny_Baseline, CallTrampoline_Baseline) \
TFC(Call_ReceiverIsNullOrUndefined_WithFeedback, \
CallTrampoline_WithFeedback) \
@@ -133,13 +138,13 @@ namespace internal {
InterpreterPushArgsThenConstruct) \
ASM(InterpreterPushArgsThenConstructWithFinalSpread, \
InterpreterPushArgsThenConstruct) \
- ASM(InterpreterEnterBytecodeAdvance, Dummy) \
- ASM(InterpreterEnterBytecodeDispatch, Dummy) \
+ ASM(InterpreterEnterAtBytecode, Dummy) \
+ ASM(InterpreterEnterAtNextBytecode, Dummy) \
ASM(InterpreterOnStackReplacement, ContextOnly) \
\
/* Baseline Compiler */ \
ASM(BaselineOutOfLinePrologue, BaselineOutOfLinePrologue) \
- ASM(BaselineOnStackReplacement, ContextOnly) \
+ ASM(BaselineOnStackReplacement, Void) \
ASM(BaselineLeaveFrame, BaselineLeaveFrame) \
ASM(BaselineEnterAtBytecode, Void) \
ASM(BaselineEnterAtNextBytecode, Void) \
@@ -200,8 +205,6 @@ namespace internal {
\
/* Debugger */ \
TFJ(DebugBreakTrampoline, kDontAdaptArgumentsSentinel) \
- ASM(FrameDropperTrampoline, FrameDropperTrampoline) \
- ASM(HandleDebuggerStatement, ContextOnly) \
\
/* Type conversions */ \
TFC(ToNumber, TypeConversion) \
@@ -770,6 +773,11 @@ namespace internal {
ASM(RegExpInterpreterTrampoline, CCall) \
ASM(RegExpExperimentalTrampoline, CCall) \
\
+ /* ResizableArrayBuffer & GrowableSharedArrayBuffer */ \
+ CPP(ResizableArrayBufferPrototypeResize) \
+ CPP(GrowableSharedArrayBufferPrototypeGrow) \
+ CPP(GrowableSharedArrayBufferPrototypeGetByteLength) \
+ \
/* Set */ \
TFJ(SetConstructor, kDontAdaptArgumentsSentinel) \
TFJ(SetPrototypeHas, 1, kReceiver, kKey) \
@@ -863,6 +871,7 @@ namespace internal {
IF_WASM(ASM, GenericJSToWasmWrapper, Dummy) \
IF_WASM(ASM, WasmCompileLazy, Dummy) \
IF_WASM(ASM, WasmDebugBreak, Dummy) \
+ IF_WASM(ASM, WasmOnStackReplace, Dummy) \
IF_WASM(TFC, WasmFloat32ToNumber, WasmFloat32ToNumber) \
IF_WASM(TFC, WasmFloat64ToNumber, WasmFloat64ToNumber) \
IF_WASM(TFC, WasmI32AtomicWait32, WasmI32AtomicWait32) \
@@ -983,6 +992,7 @@ namespace internal {
CPP(CollatorPrototypeCompare) \
/* ecma402 #sec-intl.collator.supportedlocalesof */ \
CPP(CollatorSupportedLocalesOf) \
+ /* ecma402 #sec-intl.collator.prototype.resolvedoptions */ \
CPP(CollatorPrototypeResolvedOptions) \
/* ecma402 #sup-date.prototype.tolocaledatestring */ \
CPP(DatePrototypeToLocaleDateString) \
@@ -1028,21 +1038,46 @@ namespace internal {
CPP(ListFormatSupportedLocalesOf) \
/* ecma402 #sec-intl-locale-constructor */ \
CPP(LocaleConstructor) \
+ /* ecma402 #sec-Intl.Locale.prototype.baseName */ \
CPP(LocalePrototypeBaseName) \
+ /* ecma402 #sec-Intl.Locale.prototype.calendar */ \
CPP(LocalePrototypeCalendar) \
+ /* ecma402 #sec-Intl.Locale.prototype.calendars */ \
+ CPP(LocalePrototypeCalendars) \
+ /* ecma402 #sec-Intl.Locale.prototype.caseFirst */ \
CPP(LocalePrototypeCaseFirst) \
+ /* ecma402 #sec-Intl.Locale.prototype.collation */ \
CPP(LocalePrototypeCollation) \
+ /* ecma402 #sec-Intl.Locale.prototype.collations */ \
+ CPP(LocalePrototypeCollations) \
+ /* ecma402 #sec-Intl.Locale.prototype.hourCycle */ \
CPP(LocalePrototypeHourCycle) \
+ /* ecma402 #sec-Intl.Locale.prototype.hourCycles */ \
+ CPP(LocalePrototypeHourCycles) \
+ /* ecma402 #sec-Intl.Locale.prototype.language */ \
CPP(LocalePrototypeLanguage) \
/* ecma402 #sec-Intl.Locale.prototype.maximize */ \
CPP(LocalePrototypeMaximize) \
/* ecma402 #sec-Intl.Locale.prototype.minimize */ \
CPP(LocalePrototypeMinimize) \
+ /* ecma402 #sec-Intl.Locale.prototype.numeric */ \
CPP(LocalePrototypeNumeric) \
+ /* ecma402 #sec-Intl.Locale.prototype.numberingSystem */ \
CPP(LocalePrototypeNumberingSystem) \
+ /* ecma402 #sec-Intl.Locale.prototype.numberingSystems */ \
+ CPP(LocalePrototypeNumberingSystems) \
+ /* ecma402 #sec-Intl.Locale.prototype.region */ \
CPP(LocalePrototypeRegion) \
+ /* ecma402 #sec-Intl.Locale.prototype.script */ \
CPP(LocalePrototypeScript) \
+ /* ecma402 #sec-Intl.Locale.prototype.textInfo */ \
+ CPP(LocalePrototypeTextInfo) \
+ /* ecma402 #sec-Intl.Locale.prototype.timezones */ \
+ CPP(LocalePrototypeTimeZones) \
+ /* ecma402 #sec-Intl.Locale.prototype.toString */ \
CPP(LocalePrototypeToString) \
+ /* ecma402 #sec-Intl.Locale.prototype.weekInfo */ \
+ CPP(LocalePrototypeWeekInfo) \
/* ecma402 #sec-intl.numberformat */ \
CPP(NumberFormatConstructor) \
/* ecma402 #sec-number-format-functions */ \
@@ -1057,6 +1092,7 @@ namespace internal {
CPP(NumberFormatSupportedLocalesOf) \
/* ecma402 #sec-intl.pluralrules */ \
CPP(PluralRulesConstructor) \
+ /* ecma402 #sec-intl.pluralrules.prototype.resolvedoptions */ \
CPP(PluralRulesPrototypeResolvedOptions) \
/* ecma402 #sec-intl.pluralrules.prototype.select */ \
CPP(PluralRulesPrototypeSelect) \
diff --git a/chromium/v8/src/builtins/builtins-error.cc b/chromium/v8/src/builtins/builtins-error.cc
index 840298eacbf..44dce9224a3 100644
--- a/chromium/v8/src/builtins/builtins-error.cc
+++ b/chromium/v8/src/builtins/builtins-error.cc
@@ -18,9 +18,12 @@ namespace internal {
// ES6 section 19.5.1.1 Error ( message )
BUILTIN(ErrorConstructor) {
HandleScope scope(isolate);
+ Handle<Object> options = FLAG_harmony_error_cause
+ ? args.atOrUndefined(isolate, 2)
+ : isolate->factory()->undefined_value();
RETURN_RESULT_OR_FAILURE(
isolate, ErrorUtils::Construct(isolate, args.target(), args.new_target(),
- args.atOrUndefined(isolate, 1)));
+ args.atOrUndefined(isolate, 1), options));
}
// static
diff --git a/chromium/v8/src/builtins/builtins-generator-gen.cc b/chromium/v8/src/builtins/builtins-generator-gen.cc
index 2e9d7e24e4f..b2d6e223e16 100644
--- a/chromium/v8/src/builtins/builtins-generator-gen.cc
+++ b/chromium/v8/src/builtins/builtins-generator-gen.cc
@@ -205,7 +205,7 @@ TF_BUILTIN(GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) {
// TODO(cbruni): Merge with corresponding bytecode handler.
TF_BUILTIN(SuspendGeneratorBaseline, GeneratorBuiltinsAssembler) {
auto generator = Parameter<JSGeneratorObject>(Descriptor::kGeneratorObject);
- auto context = Parameter<Context>(Descriptor::kContext);
+ auto context = LoadContextFromBaseline();
StoreJSGeneratorObjectContext(generator, context);
auto suspend_id = SmiTag(UncheckedParameter<IntPtrT>(Descriptor::kSuspendId));
StoreJSGeneratorObjectContinuation(generator, suspend_id);
diff --git a/chromium/v8/src/builtins/builtins-handler-gen.cc b/chromium/v8/src/builtins/builtins-handler-gen.cc
index 3cbd626b8e4..19a31b81a7c 100644
--- a/chromium/v8/src/builtins/builtins-handler-gen.cc
+++ b/chromium/v8/src/builtins/builtins-handler-gen.cc
@@ -183,28 +183,39 @@ TF_BUILTIN(ElementsTransitionAndStore_NoTransitionHandleCOW,
// All elements kinds handled by EmitElementStore. Specifically, this includes
// fast elements and fixed typed array elements.
-#define ELEMENTS_KINDS(V) \
- V(PACKED_SMI_ELEMENTS) \
- V(HOLEY_SMI_ELEMENTS) \
- V(PACKED_ELEMENTS) \
- V(PACKED_NONEXTENSIBLE_ELEMENTS) \
- V(PACKED_SEALED_ELEMENTS) \
- V(HOLEY_ELEMENTS) \
- V(HOLEY_NONEXTENSIBLE_ELEMENTS) \
- V(HOLEY_SEALED_ELEMENTS) \
- V(PACKED_DOUBLE_ELEMENTS) \
- V(HOLEY_DOUBLE_ELEMENTS) \
- V(UINT8_ELEMENTS) \
- V(INT8_ELEMENTS) \
- V(UINT16_ELEMENTS) \
- V(INT16_ELEMENTS) \
- V(UINT32_ELEMENTS) \
- V(INT32_ELEMENTS) \
- V(FLOAT32_ELEMENTS) \
- V(FLOAT64_ELEMENTS) \
- V(UINT8_CLAMPED_ELEMENTS) \
- V(BIGUINT64_ELEMENTS) \
- V(BIGINT64_ELEMENTS)
+#define ELEMENTS_KINDS(V) \
+ V(PACKED_SMI_ELEMENTS) \
+ V(HOLEY_SMI_ELEMENTS) \
+ V(PACKED_ELEMENTS) \
+ V(PACKED_NONEXTENSIBLE_ELEMENTS) \
+ V(PACKED_SEALED_ELEMENTS) \
+ V(HOLEY_ELEMENTS) \
+ V(HOLEY_NONEXTENSIBLE_ELEMENTS) \
+ V(HOLEY_SEALED_ELEMENTS) \
+ V(PACKED_DOUBLE_ELEMENTS) \
+ V(HOLEY_DOUBLE_ELEMENTS) \
+ V(UINT8_ELEMENTS) \
+ V(INT8_ELEMENTS) \
+ V(UINT16_ELEMENTS) \
+ V(INT16_ELEMENTS) \
+ V(UINT32_ELEMENTS) \
+ V(INT32_ELEMENTS) \
+ V(FLOAT32_ELEMENTS) \
+ V(FLOAT64_ELEMENTS) \
+ V(UINT8_CLAMPED_ELEMENTS) \
+ V(BIGUINT64_ELEMENTS) \
+ V(BIGINT64_ELEMENTS) \
+ V(RAB_GSAB_UINT8_ELEMENTS) \
+ V(RAB_GSAB_INT8_ELEMENTS) \
+ V(RAB_GSAB_UINT16_ELEMENTS) \
+ V(RAB_GSAB_INT16_ELEMENTS) \
+ V(RAB_GSAB_UINT32_ELEMENTS) \
+ V(RAB_GSAB_INT32_ELEMENTS) \
+ V(RAB_GSAB_FLOAT32_ELEMENTS) \
+ V(RAB_GSAB_FLOAT64_ELEMENTS) \
+ V(RAB_GSAB_UINT8_CLAMPED_ELEMENTS) \
+ V(RAB_GSAB_BIGUINT64_ELEMENTS) \
+ V(RAB_GSAB_BIGINT64_ELEMENTS)
void HandlerBuiltinsAssembler::DispatchByElementsKind(
TNode<Int32T> elements_kind, const ElementsKindSwitchCase& case_function,
diff --git a/chromium/v8/src/builtins/builtins-ic-gen.cc b/chromium/v8/src/builtins/builtins-ic-gen.cc
index 81bf6379ece..e172b5a129b 100644
--- a/chromium/v8/src/builtins/builtins-ic-gen.cc
+++ b/chromium/v8/src/builtins/builtins-ic-gen.cc
@@ -10,70 +10,221 @@
namespace v8 {
namespace internal {
-#define IC_BUILTIN(Name) \
- void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
- AccessorAssembler assembler(state); \
- assembler.Generate##Name(); \
- }
+void Builtins::Generate_LoadIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadIC();
+}
+void Builtins::Generate_LoadIC_Megamorphic(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadIC_Megamorphic();
+}
+void Builtins::Generate_LoadIC_Noninlined(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadIC_Noninlined();
+}
+void Builtins::Generate_LoadIC_NoFeedback(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadIC_NoFeedback();
+}
+void Builtins::Generate_LoadICTrampoline(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadICTrampoline();
+}
+void Builtins::Generate_LoadICBaseline(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadICBaseline();
+}
+void Builtins::Generate_LoadICTrampoline_Megamorphic(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadICTrampoline_Megamorphic();
+}
+void Builtins::Generate_LoadSuperIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadSuperIC();
+}
+void Builtins::Generate_LoadSuperICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadSuperICBaseline();
+}
+void Builtins::Generate_KeyedLoadIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedLoadIC();
+}
+void Builtins::Generate_KeyedLoadIC_Megamorphic(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedLoadIC_Megamorphic();
+}
+void Builtins::Generate_KeyedLoadIC_PolymorphicName(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedLoadIC_PolymorphicName();
+}
+void Builtins::Generate_KeyedLoadICTrampoline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedLoadICTrampoline();
+}
+void Builtins::Generate_KeyedLoadICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedLoadICBaseline();
+}
+void Builtins::Generate_KeyedLoadICTrampoline_Megamorphic(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedLoadICTrampoline_Megamorphic();
+}
+void Builtins::Generate_LoadGlobalIC_NoFeedback(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadGlobalIC_NoFeedback();
+}
+void Builtins::Generate_StoreGlobalIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateStoreGlobalIC();
+}
+void Builtins::Generate_StoreGlobalICTrampoline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateStoreGlobalICTrampoline();
+}
+void Builtins::Generate_StoreGlobalICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateStoreGlobalICBaseline();
+}
+void Builtins::Generate_StoreIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateStoreIC();
+}
+void Builtins::Generate_StoreICTrampoline(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateStoreICTrampoline();
+}
+void Builtins::Generate_StoreICBaseline(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateStoreICBaseline();
+}
+void Builtins::Generate_KeyedStoreIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedStoreIC();
+}
+void Builtins::Generate_KeyedStoreICTrampoline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedStoreICTrampoline();
+}
+void Builtins::Generate_KeyedStoreICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedStoreICBaseline();
+}
+void Builtins::Generate_StoreInArrayLiteralIC(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateStoreInArrayLiteralIC();
+}
+void Builtins::Generate_StoreInArrayLiteralICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateStoreInArrayLiteralICBaseline();
+}
+void Builtins::Generate_CloneObjectIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateCloneObjectIC();
+}
+void Builtins::Generate_CloneObjectICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateCloneObjectICBaseline();
+}
+void Builtins::Generate_CloneObjectIC_Slow(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateCloneObjectIC_Slow();
+}
+void Builtins::Generate_KeyedHasIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedHasIC();
+}
+void Builtins::Generate_KeyedHasICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedHasICBaseline();
+}
+void Builtins::Generate_KeyedHasIC_Megamorphic(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedHasIC_Megamorphic();
+}
+void Builtins::Generate_KeyedHasIC_PolymorphicName(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedHasIC_PolymorphicName();
+}
+
+void Builtins::Generate_LoadGlobalIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadGlobalIC(TypeofMode::kNotInside);
+}
-#define IC_BUILTIN_PARAM(BuiltinName, GeneratorName, parameter) \
- void Builtins::Generate_##BuiltinName(compiler::CodeAssemblerState* state) { \
- AccessorAssembler assembler(state); \
- assembler.Generate##GeneratorName(parameter); \
- }
+void Builtins::Generate_LoadGlobalICInsideTypeof(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadGlobalIC(TypeofMode::kInside);
+}
-IC_BUILTIN(LoadIC)
-IC_BUILTIN(LoadIC_Megamorphic)
-IC_BUILTIN(LoadIC_Noninlined)
-IC_BUILTIN(LoadIC_NoFeedback)
-IC_BUILTIN(LoadICTrampoline)
-IC_BUILTIN(LoadICBaseline)
-IC_BUILTIN(LoadICTrampoline_Megamorphic)
-IC_BUILTIN(LoadSuperIC)
-IC_BUILTIN(LoadSuperICBaseline)
-IC_BUILTIN(KeyedLoadIC)
-IC_BUILTIN(KeyedLoadIC_Megamorphic)
-IC_BUILTIN(KeyedLoadIC_PolymorphicName)
-IC_BUILTIN(KeyedLoadICTrampoline)
-IC_BUILTIN(KeyedLoadICBaseline)
-IC_BUILTIN(KeyedLoadICTrampoline_Megamorphic)
-IC_BUILTIN(LoadGlobalIC_NoFeedback)
-IC_BUILTIN(StoreGlobalIC)
-IC_BUILTIN(StoreGlobalICTrampoline)
-IC_BUILTIN(StoreGlobalICBaseline)
-IC_BUILTIN(StoreIC)
-IC_BUILTIN(StoreICTrampoline)
-IC_BUILTIN(StoreICBaseline)
-IC_BUILTIN(KeyedStoreIC)
-IC_BUILTIN(KeyedStoreICTrampoline)
-IC_BUILTIN(KeyedStoreICBaseline)
-IC_BUILTIN(StoreInArrayLiteralIC)
-IC_BUILTIN(StoreInArrayLiteralICBaseline)
-IC_BUILTIN(CloneObjectIC)
-IC_BUILTIN(CloneObjectICBaseline)
-IC_BUILTIN(CloneObjectIC_Slow)
-IC_BUILTIN(KeyedHasIC)
-IC_BUILTIN(KeyedHasICBaseline)
-IC_BUILTIN(KeyedHasIC_Megamorphic)
-IC_BUILTIN(KeyedHasIC_PolymorphicName)
+void Builtins::Generate_LoadGlobalICTrampoline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadGlobalICTrampoline(TypeofMode::kNotInside);
+}
-IC_BUILTIN_PARAM(LoadGlobalIC, LoadGlobalIC, NOT_INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LoadGlobalICInsideTypeof, LoadGlobalIC, INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LoadGlobalICTrampoline, LoadGlobalICTrampoline,
- NOT_INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LoadGlobalICInsideTypeofTrampoline, LoadGlobalICTrampoline,
- INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LoadGlobalICBaseline, LoadGlobalICBaseline, NOT_INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LoadGlobalICInsideTypeofBaseline, LoadGlobalICBaseline,
- INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LookupGlobalICBaseline, LookupGlobalICBaseline,
- NOT_INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LookupGlobalICInsideTypeofBaseline, LookupGlobalICBaseline,
- INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LookupContextBaseline, LookupContextBaseline,
- NOT_INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LookupContextInsideTypeofBaseline, LookupContextBaseline,
- INSIDE_TYPEOF)
+void Builtins::Generate_LoadGlobalICInsideTypeofTrampoline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadGlobalICTrampoline(TypeofMode::kInside);
+}
+
+void Builtins::Generate_LoadGlobalICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadGlobalICBaseline(TypeofMode::kNotInside);
+}
+
+void Builtins::Generate_LoadGlobalICInsideTypeofBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadGlobalICBaseline(TypeofMode::kInside);
+}
+
+void Builtins::Generate_LookupGlobalICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLookupGlobalICBaseline(TypeofMode::kNotInside);
+}
+
+void Builtins::Generate_LookupGlobalICInsideTypeofBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLookupGlobalICBaseline(TypeofMode::kInside);
+}
+
+void Builtins::Generate_LookupContextBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLookupContextBaseline(TypeofMode::kNotInside);
+}
+
+void Builtins::Generate_LookupContextInsideTypeofBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLookupContextBaseline(TypeofMode::kInside);
+}
TF_BUILTIN(DynamicCheckMaps, CodeStubAssembler) {
auto map = Parameter<Map>(Descriptor::kMap);
diff --git a/chromium/v8/src/builtins/builtins-internal-gen.cc b/chromium/v8/src/builtins/builtins-internal-gen.cc
index 0c4131dba96..274709b46a0 100644
--- a/chromium/v8/src/builtins/builtins-internal-gen.cc
+++ b/chromium/v8/src/builtins/builtins-internal-gen.cc
@@ -7,6 +7,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/execution/frame-constants.h"
#include "src/heap/memory-chunk.h"
@@ -172,11 +173,11 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
}
TNode<BoolT> ShouldSkipFPRegs(TNode<Smi> mode) {
- return TaggedEqual(mode, SmiConstant(kDontSaveFPRegs));
+ return TaggedEqual(mode, SmiConstant(SaveFPRegsMode::kIgnore));
}
TNode<BoolT> ShouldEmitRememberSet(TNode<Smi> remembered_set) {
- return TaggedEqual(remembered_set, SmiConstant(EMIT_REMEMBERED_SET));
+ return TaggedEqual(remembered_set, SmiConstant(RememberedSetAction::kEmit));
}
template <typename Ret, typename Arg0, typename Arg1>
@@ -188,7 +189,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
BIND(&dont_save_fp);
{
CallCFunctionWithCallerSavedRegisters(
- function, MachineTypeOf<Ret>::value, kDontSaveFPRegs,
+ function, MachineTypeOf<Ret>::value, SaveFPRegsMode::kIgnore,
std::make_pair(MachineTypeOf<Arg0>::value, arg0),
std::make_pair(MachineTypeOf<Arg1>::value, arg1));
Goto(next);
@@ -197,7 +198,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
BIND(&save_fp);
{
CallCFunctionWithCallerSavedRegisters(
- function, MachineTypeOf<Ret>::value, kSaveFPRegs,
+ function, MachineTypeOf<Ret>::value, SaveFPRegsMode::kSave,
std::make_pair(MachineTypeOf<Arg0>::value, arg0),
std::make_pair(MachineTypeOf<Arg1>::value, arg1));
Goto(next);
@@ -213,7 +214,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
BIND(&dont_save_fp);
{
CallCFunctionWithCallerSavedRegisters(
- function, MachineTypeOf<Ret>::value, kDontSaveFPRegs,
+ function, MachineTypeOf<Ret>::value, SaveFPRegsMode::kIgnore,
std::make_pair(MachineTypeOf<Arg0>::value, arg0),
std::make_pair(MachineTypeOf<Arg1>::value, arg1),
std::make_pair(MachineTypeOf<Arg2>::value, arg2));
@@ -223,7 +224,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
BIND(&save_fp);
{
CallCFunctionWithCallerSavedRegisters(
- function, MachineTypeOf<Ret>::value, kSaveFPRegs,
+ function, MachineTypeOf<Ret>::value, SaveFPRegsMode::kSave,
std::make_pair(MachineTypeOf<Arg0>::value, arg0),
std::make_pair(MachineTypeOf<Arg1>::value, arg1),
std::make_pair(MachineTypeOf<Arg2>::value, arg2));
@@ -821,8 +822,9 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
const bool builtin_exit_frame = true;
- TNode<Code> code = HeapConstant(CodeFactory::CEntry(
- isolate(), 1, kDontSaveFPRegs, kArgvOnStack, builtin_exit_frame));
+ TNode<Code> code =
+ HeapConstant(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame));
// Unconditionally push argc, target and new target as extra stack arguments.
// They will be used by stack frame iterators when constructing stack trace.
@@ -891,54 +893,54 @@ TF_BUILTIN(AbortCSAAssert, CodeStubAssembler) {
void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 1, kDontSaveFPRegs, kArgvOnStack, false);
+ Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, false);
}
void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 1, kDontSaveFPRegs, kArgvOnStack, true);
+ Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true);
}
void Builtins::
Generate_CEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 1, kDontSaveFPRegs, kArgvInRegister, false);
+ Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kRegister, false);
}
void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 1, kSaveFPRegs, kArgvOnStack, false);
+ Generate_CEntry(masm, 1, SaveFPRegsMode::kSave, ArgvMode::kStack, false);
}
void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 1, kSaveFPRegs, kArgvOnStack, true);
+ Generate_CEntry(masm, 1, SaveFPRegsMode::kSave, ArgvMode::kStack, true);
}
void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 2, kDontSaveFPRegs, kArgvOnStack, false);
+ Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kStack, false);
}
void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 2, kDontSaveFPRegs, kArgvOnStack, true);
+ Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true);
}
void Builtins::
Generate_CEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 2, kDontSaveFPRegs, kArgvInRegister, false);
+ Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kRegister, false);
}
void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 2, kSaveFPRegs, kArgvOnStack, false);
+ Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, false);
}
void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 2, kSaveFPRegs, kArgvOnStack, true);
+ Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, true);
}
#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
@@ -956,7 +958,7 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) {
EmitReturnBaseline(masm);
}
diff --git a/chromium/v8/src/builtins/builtins-intl.cc b/chromium/v8/src/builtins/builtins-intl.cc
index fe32a484a3e..6febc81c3a5 100644
--- a/chromium/v8/src/builtins/builtins-intl.cc
+++ b/chromium/v8/src/builtins/builtins-intl.cc
@@ -668,6 +668,49 @@ BUILTIN(LocalePrototypeMinimize) {
RETURN_RESULT_OR_FAILURE(isolate, JSLocale::Minimize(isolate, locale));
}
+BUILTIN(LocalePrototypeCalendars) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.calendars");
+ RETURN_RESULT_OR_FAILURE(isolate, JSLocale::Calendars(isolate, locale));
+}
+
+BUILTIN(LocalePrototypeCollations) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.collations");
+ RETURN_RESULT_OR_FAILURE(isolate, JSLocale::Collations(isolate, locale));
+}
+
+BUILTIN(LocalePrototypeHourCycles) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.hourCycles");
+ RETURN_RESULT_OR_FAILURE(isolate, JSLocale::HourCycles(isolate, locale));
+}
+
+BUILTIN(LocalePrototypeNumberingSystems) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.numberingSystems");
+ RETURN_RESULT_OR_FAILURE(isolate,
+ JSLocale::NumberingSystems(isolate, locale));
+}
+
+BUILTIN(LocalePrototypeTextInfo) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.textInfo");
+ RETURN_RESULT_OR_FAILURE(isolate, JSLocale::TextInfo(isolate, locale));
+}
+
+BUILTIN(LocalePrototypeTimeZones) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.timeZones");
+ RETURN_RESULT_OR_FAILURE(isolate, JSLocale::TimeZones(isolate, locale));
+}
+
+BUILTIN(LocalePrototypeWeekInfo) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.weekInfo");
+ RETURN_RESULT_OR_FAILURE(isolate, JSLocale::WeekInfo(isolate, locale));
+}
+
BUILTIN(RelativeTimeFormatSupportedLocalesOf) {
HandleScope scope(isolate);
Handle<Object> locales = args.atOrUndefined(isolate, 1);
diff --git a/chromium/v8/src/builtins/builtins-lazy-gen.cc b/chromium/v8/src/builtins/builtins-lazy-gen.cc
index 8af0bef95d2..4749ee094bc 100644
--- a/chromium/v8/src/builtins/builtins-lazy-gen.cc
+++ b/chromium/v8/src/builtins/builtins-lazy-gen.cc
@@ -154,20 +154,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
GotoIf(InstanceTypeEqual(sfi_data_type.value(), BASELINE_DATA_TYPE),
&baseline);
- // Finally, check for presence of an NCI cached Code object - if an entry
- // possibly exists, call into runtime to query the cache.
- TNode<Uint8T> flags2 =
- LoadObjectField<Uint8T>(shared, SharedFunctionInfo::kFlags2Offset);
- TNode<BoolT> may_have_cached_code =
- IsSetWord32<SharedFunctionInfo::MayHaveCachedCodeBit>(flags2);
- code = Select<Code>(
- may_have_cached_code,
- [=]() {
- return CAST(CallRuntime(Runtime::kTryInstallNCICode,
- Parameter<Context>(Descriptor::kContext),
- function));
- },
- [=]() { return sfi_code; });
+ code = sfi_code;
Goto(&tailcall_code);
BIND(&baseline);
diff --git a/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc b/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc
index 9f16186d13b..281e9234dc7 100644
--- a/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc
+++ b/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -46,8 +46,11 @@ class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler {
void EnterMicrotaskContext(TNode<Context> native_context);
void RewindEnteredContext(TNode<IntPtrT> saved_entered_context_count);
+ void RunAllPromiseHooks(PromiseHookType type, TNode<Context> context,
+ TNode<HeapObject> promise_or_capability);
void RunPromiseHook(Runtime::FunctionId id, TNode<Context> context,
- TNode<HeapObject> promise_or_capability);
+ TNode<HeapObject> promise_or_capability,
+ TNode<Uint32T> promiseHookFlags);
};
TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueue(
@@ -199,7 +202,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
const TNode<Object> thenable = LoadObjectField(
microtask, PromiseResolveThenableJobTask::kThenableOffset);
- RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
+ RunAllPromiseHooks(PromiseHookType::kBefore, microtask_context,
CAST(promise_to_resolve));
{
@@ -208,7 +211,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
promise_to_resolve, thenable, then);
}
- RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
+ RunAllPromiseHooks(PromiseHookType::kAfter, microtask_context,
CAST(promise_to_resolve));
RewindEnteredContext(saved_entered_context_count);
@@ -243,8 +246,8 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
BIND(&preserved_data_done);
// Run the promise before/debug hook if enabled.
- RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
- promise_or_capability);
+ RunAllPromiseHooks(PromiseHookType::kBefore, microtask_context,
+ promise_or_capability);
{
ScopedExceptionHandler handler(this, &if_exception, &var_exception);
@@ -253,8 +256,8 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
}
// Run the promise after/debug hook if enabled.
- RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
- promise_or_capability);
+ RunAllPromiseHooks(PromiseHookType::kAfter, microtask_context,
+ promise_or_capability);
Label preserved_data_reset_done(this);
GotoIf(IsUndefined(preserved_embedder_data), &preserved_data_reset_done);
@@ -296,8 +299,8 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
BIND(&preserved_data_done);
// Run the promise before/debug hook if enabled.
- RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
- promise_or_capability);
+ RunAllPromiseHooks(PromiseHookType::kBefore, microtask_context,
+ promise_or_capability);
{
ScopedExceptionHandler handler(this, &if_exception, &var_exception);
@@ -306,8 +309,8 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
}
// Run the promise after/debug hook if enabled.
- RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
- promise_or_capability);
+ RunAllPromiseHooks(PromiseHookType::kAfter, microtask_context,
+ promise_or_capability);
Label preserved_data_reset_done(this);
GotoIf(IsUndefined(preserved_embedder_data), &preserved_data_reset_done);
@@ -465,12 +468,42 @@ void MicrotaskQueueBuiltinsAssembler::RewindEnteredContext(
saved_entered_context_count);
}
+void MicrotaskQueueBuiltinsAssembler::RunAllPromiseHooks(
+ PromiseHookType type, TNode<Context> context,
+ TNode<HeapObject> promise_or_capability) {
+ Label hook(this, Label::kDeferred), done_hook(this);
+ TNode<Uint32T> promiseHookFlags = PromiseHookFlags();
+ Branch(NeedsAnyPromiseHooks(promiseHookFlags), &hook, &done_hook);
+ BIND(&hook);
+ {
+ switch (type) {
+ case PromiseHookType::kBefore:
+ RunContextPromiseHookBefore(context, promise_or_capability,
+ promiseHookFlags);
+ RunPromiseHook(Runtime::kPromiseHookBefore, context,
+ promise_or_capability, promiseHookFlags);
+ break;
+ case PromiseHookType::kAfter:
+ RunContextPromiseHookAfter(context, promise_or_capability,
+ promiseHookFlags);
+ RunPromiseHook(Runtime::kPromiseHookAfter, context,
+ promise_or_capability, promiseHookFlags);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ Goto(&done_hook);
+ }
+ BIND(&done_hook);
+}
+
void MicrotaskQueueBuiltinsAssembler::RunPromiseHook(
Runtime::FunctionId id, TNode<Context> context,
- TNode<HeapObject> promise_or_capability) {
+ TNode<HeapObject> promise_or_capability,
+ TNode<Uint32T> promiseHookFlags) {
Label hook(this, Label::kDeferred), done_hook(this);
- Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(), &hook,
- &done_hook);
+ Branch(IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
+ promiseHookFlags), &hook, &done_hook);
BIND(&hook);
{
// Get to the underlying JSPromise instance.
diff --git a/chromium/v8/src/builtins/builtins-regexp-gen.cc b/chromium/v8/src/builtins/builtins-regexp-gen.cc
index 23648efb98b..e59d2a00ac3 100644
--- a/chromium/v8/src/builtins/builtins-regexp-gen.cc
+++ b/chromium/v8/src/builtins/builtins-regexp-gen.cc
@@ -1014,6 +1014,12 @@ TF_BUILTIN(RegExpExecInternal, RegExpBuiltinsAssembler) {
TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
TNode<Object> regexp,
bool is_fastpath) {
+ TVARIABLE(String, result);
+ Label runtime(this, Label::kDeferred), done(this, &result);
+ if (is_fastpath) {
+ GotoIfForceSlowPath(&runtime);
+ }
+
Isolate* isolate = this->isolate();
const TNode<IntPtrT> int_one = IntPtrConstant(1);
@@ -1110,7 +1116,7 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
// corresponding char for each set flag.
{
- const TNode<String> result = AllocateSeqOneByteString(var_length.value());
+ const TNode<String> string = AllocateSeqOneByteString(var_length.value());
TVARIABLE(IntPtrT, var_offset,
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
@@ -1120,7 +1126,7 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
Label next(this); \
GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \
const TNode<Int32T> value = Int32Constant(CHAR); \
- StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \
+ StoreNoWriteBarrier(MachineRepresentation::kWord8, string, \
var_offset.value(), value); \
var_offset = IntPtrAdd(var_offset.value(), int_one); \
Goto(&next); \
@@ -1137,7 +1143,26 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
CASE_FOR_FLAG(JSRegExp::kSticky, 'y');
#undef CASE_FOR_FLAG
- return result;
+ if (is_fastpath) {
+#ifdef V8_ENABLE_FORCE_SLOW_PATH
+ result = string;
+ Goto(&done);
+
+ BIND(&runtime);
+ {
+ result =
+ CAST(CallRuntime(Runtime::kRegExpStringFromFlags, context, regexp));
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return result.value();
+#else
+ return string;
+#endif
+ } else {
+ return string;
+ }
}
}
diff --git a/chromium/v8/src/builtins/builtins-trace.cc b/chromium/v8/src/builtins/builtins-trace.cc
index cf85ce9948b..24baf59522b 100644
--- a/chromium/v8/src/builtins/builtins-trace.cc
+++ b/chromium/v8/src/builtins/builtins-trace.cc
@@ -9,6 +9,7 @@
#include "src/json/json-stringifier.h"
#include "src/logging/counters.h"
#include "src/objects/objects-inl.h"
+#include "src/tracing/traced-value.h"
#if defined(V8_USE_PERFETTO)
#include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
diff --git a/chromium/v8/src/builtins/builtins-typed-array-gen.cc b/chromium/v8/src/builtins/builtins-typed-array-gen.cc
index 65b1ab2f2b9..d333a61e395 100644
--- a/chromium/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/chromium/v8/src/builtins/builtins-typed-array-gen.cc
@@ -123,13 +123,26 @@ TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) {
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
- // Default to zero if the {receiver}s buffer was detached.
+ TNode<JSTypedArray> receiver_array = CAST(receiver);
TNode<JSArrayBuffer> receiver_buffer =
- LoadJSArrayBufferViewBuffer(CAST(receiver));
- TNode<UintPtrT> byte_length = Select<UintPtrT>(
- IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); },
- [=] { return LoadJSArrayBufferViewByteLength(CAST(receiver)); });
- Return(ChangeUintPtrToTagged(byte_length));
+ LoadJSArrayBufferViewBuffer(receiver_array);
+
+ Label variable_length(this), normal(this);
+ Branch(IsVariableLengthTypedArray(receiver_array), &variable_length, &normal);
+ BIND(&variable_length);
+ {
+ Return(ChangeUintPtrToTagged(LoadVariableLengthJSTypedArrayByteLength(
+ context, receiver_array, receiver_buffer)));
+ }
+
+ BIND(&normal);
+ {
+ // Default to zero if the {receiver}s buffer was detached.
+ TNode<UintPtrT> byte_length = Select<UintPtrT>(
+ IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); },
+ [=] { return LoadJSArrayBufferViewByteLength(receiver_array); });
+ Return(ChangeUintPtrToTagged(byte_length));
+ }
}
// ES6 #sec-get-%typedarray%.prototype.byteoffset
@@ -159,13 +172,29 @@ TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) {
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
- // Default to zero if the {receiver}s buffer was detached.
+ TNode<JSTypedArray> receiver_array = CAST(receiver);
TNode<JSArrayBuffer> receiver_buffer =
- LoadJSArrayBufferViewBuffer(CAST(receiver));
- TNode<UintPtrT> length = Select<UintPtrT>(
- IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); },
- [=] { return LoadJSTypedArrayLength(CAST(receiver)); });
- Return(ChangeUintPtrToTagged(length));
+ LoadJSArrayBufferViewBuffer(receiver_array);
+
+ Label variable_length(this), normal(this);
+ Branch(IsVariableLengthTypedArray(receiver_array), &variable_length, &normal);
+ BIND(&variable_length);
+ {
+ Label miss(this);
+ Return(ChangeUintPtrToTagged(LoadVariableLengthJSTypedArrayLength(
+ receiver_array, receiver_buffer, &miss)));
+ BIND(&miss);
+ Return(ChangeUintPtrToTagged(UintPtrConstant(0)));
+ }
+
+ BIND(&normal);
+ {
+ // Default to zero if the {receiver}s buffer was detached.
+ TNode<UintPtrT> length = Select<UintPtrT>(
+ IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); },
+ [=] { return LoadJSTypedArrayLength(receiver_array); });
+ Return(ChangeUintPtrToTagged(length));
+ }
}
TNode<BoolT> TypedArrayBuiltinsAssembler::IsUint8ElementsKind(
@@ -322,17 +351,18 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
int32_t elements_kinds[] = {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) TYPE##_ELEMENTS,
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ TYPED_ARRAYS(TYPED_ARRAY_CASE) RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
};
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) Label if_##type##array(this);
TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
Label* elements_kind_labels[] = {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) &if_##type##array,
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ TYPED_ARRAYS(TYPED_ARRAY_CASE) RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
};
STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels));
@@ -350,6 +380,15 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ BIND(&if_##type##array); \
+ { \
+ case_function(TYPE##_ELEMENTS, sizeof(ctype), 0); \
+ Goto(&next); \
+ }
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
BIND(&if_unknown_type);
Unreachable();
@@ -374,7 +413,7 @@ void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
IntPtrSub(full_base, Signed(ChangeUint32ToWord(compressed_base)));
// Add JSTypedArray::ExternalPointerCompensationForOnHeapArray() to offset.
DCHECK_EQ(
- isolate()->isolate_root(),
+ isolate()->cage_base(),
JSTypedArray::ExternalPointerCompensationForOnHeapArray(isolate()));
// See JSTypedArray::SetOnHeapDataPtr() for details.
offset = Unsigned(IntPtrAdd(offset, ptr_compr_cage_base));
diff --git a/chromium/v8/src/builtins/builtins-typed-array.cc b/chromium/v8/src/builtins/builtins-typed-array.cc
index fdadc7a554c..bb936e6e463 100644
--- a/chromium/v8/src/builtins/builtins-typed-array.cc
+++ b/chromium/v8/src/builtins/builtins-typed-array.cc
@@ -154,7 +154,8 @@ BUILTIN(TypedArrayPrototypeFill) {
DCHECK_LE(end, len);
DCHECK_LE(count, len);
- return ElementsAccessor::ForKind(kind)->Fill(array, obj_value, start, end);
+ RETURN_RESULT_OR_FAILURE(isolate, ElementsAccessor::ForKind(kind)->Fill(
+ array, obj_value, start, end));
}
BUILTIN(TypedArrayPrototypeIncludes) {
diff --git a/chromium/v8/src/builtins/builtins-utils.h b/chromium/v8/src/builtins/builtins-utils.h
index e5f420a20de..b9146ab6253 100644
--- a/chromium/v8/src/builtins/builtins-utils.h
+++ b/chromium/v8/src/builtins/builtins-utils.h
@@ -85,8 +85,7 @@ class BuiltinArguments : public JavaScriptArguments {
V8_NOINLINE static Address Builtin_Impl_Stats_##name( \
int args_length, Address* args_object, Isolate* isolate) { \
BuiltinArguments args(args_length, args_object); \
- RuntimeCallTimerScope timer(isolate, \
- RuntimeCallCounterId::kBuiltin_##name); \
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kBuiltin_##name); \
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
"V8.Builtin_" #name); \
return CONVERT_OBJECT(Builtin_Impl_##name(args, isolate)); \
diff --git a/chromium/v8/src/builtins/builtins-wasm-gen.cc b/chromium/v8/src/builtins/builtins-wasm-gen.cc
index 0704d8681ba..eb9311d0c62 100644
--- a/chromium/v8/src/builtins/builtins-wasm-gen.cc
+++ b/chromium/v8/src/builtins/builtins-wasm-gen.cc
@@ -9,7 +9,6 @@
#include "src/codegen/interface-descriptors.h"
#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-objects.h"
-#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/builtins/cast.tq b/chromium/v8/src/builtins/cast.tq
index b490055a19e..d7d2eb6aa6d 100644
--- a/chromium/v8/src/builtins/cast.tq
+++ b/chromium/v8/src/builtins/cast.tq
@@ -329,6 +329,24 @@ Cast<Number|TheHole>(o: Object): Number|TheHole labels CastError {
}
}
+Cast<Context|Zero|Undefined>(o: Object): Context|Zero|Undefined
+ labels CastError {
+ typeswitch (o) {
+ case (o: Context): {
+ return o;
+ }
+ case (o: Zero): {
+ return o;
+ }
+ case (o: Undefined): {
+ return o;
+ }
+ case (Object): {
+ goto CastError;
+ }
+ }
+}
+
macro Cast<A : type extends HeapObject>(o: HeapObject): A
labels CastError;
@@ -386,6 +404,12 @@ Cast<Undefined|Callable>(o: HeapObject): Undefined|Callable
return HeapObjectToCallable(o) otherwise CastError;
}
+Cast<Undefined|JSFunction>(o: HeapObject): Undefined|JSFunction
+ labels CastError {
+ if (o == Undefined) return Undefined;
+ return Cast<JSFunction>(o) otherwise CastError;
+}
+
macro Cast<T : type extends Symbol>(o: Symbol): T labels CastError;
Cast<PublicSymbol>(s: Symbol): PublicSymbol labels CastError {
if (s.flags.is_private) goto CastError;
diff --git a/chromium/v8/src/builtins/constructor.tq b/chromium/v8/src/builtins/constructor.tq
index add6db03052..d929c7f485f 100644
--- a/chromium/v8/src/builtins/constructor.tq
+++ b/chromium/v8/src/builtins/constructor.tq
@@ -15,6 +15,8 @@ extern runtime CreateObjectLiteral(
namespace constructor {
+extern builtin FastNewClosure(
+ Context, SharedFunctionInfo, FeedbackCell): JSFunction;
extern builtin FastNewObject(Context, JSFunction, JSReceiver): JSObject;
extern enum AllocationSiteMode {
@@ -42,6 +44,15 @@ extern macro ConstructorBuiltinsAssembler::CreateShallowObjectLiteral(
extern macro ConstructorBuiltinsAssembler::CreateEmptyObjectLiteral(Context):
JSObject;
+extern macro LoadContextFromBaseline(): Context;
+
+builtin FastNewClosureBaseline(
+ sharedFunctionInfo: SharedFunctionInfo,
+ feedbackCell: FeedbackCell): JSFunction {
+ const context = LoadContextFromBaseline();
+ tail FastNewClosure(context, sharedFunctionInfo, feedbackCell);
+}
+
builtin FastNewFunctionContextEval(implicit context: Context)(
scopeInfo: ScopeInfo, slots: uint32): Context {
return FastNewFunctionContext(scopeInfo, slots, context, kEvalScope);
diff --git a/chromium/v8/src/builtins/conversion.tq b/chromium/v8/src/builtins/conversion.tq
index 5a2dccd068c..636f49a024d 100644
--- a/chromium/v8/src/builtins/conversion.tq
+++ b/chromium/v8/src/builtins/conversion.tq
@@ -45,11 +45,30 @@ builtin NumberToString(implicit context: Context)(input: Number): String {
}
// ES6 section 7.1.2 ToBoolean ( argument )
-builtin ToBoolean(implicit context: Context)(input: JSAny): Boolean {
+builtin ToBoolean(input: JSAny): Boolean {
BranchIfToBooleanIsTrue(input) otherwise return TrueConstant(),
return FalseConstant();
}
+struct ToBooleanForBaselineJumpResult {
+ value: JSAny;
+ is_to_boolean: Smi;
+}
+// ToBoolean for baseline code jumps, which
+// a) returns the original value as the first return value, to avoid needing
+// to save it in the caller, and
+// b) returns the true/false value as a Smi, to make the baseline-side
+// comparison cheaper.
+builtin ToBooleanForBaselineJump(input: JSAny): ToBooleanForBaselineJumpResult {
+ try {
+ BranchIfToBooleanIsTrue(input) otherwise IsTrue, IsFalse;
+ } label IsTrue {
+ return ToBooleanForBaselineJumpResult{value: input, is_to_boolean: 1};
+ } label IsFalse {
+ return ToBooleanForBaselineJumpResult{value: input, is_to_boolean: 0};
+ }
+}
+
transitioning builtin ToLength(implicit context: Context)(input: JSAny):
Number {
// We might need to loop once for ToNumber conversion.
diff --git a/chromium/v8/src/builtins/ia32/builtins-ia32.cc b/chromium/v8/src/builtins/ia32/builtins-ia32.cc
index 44b71bed915..4993de4816f 100644
--- a/chromium/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/chromium/v8/src/builtins/ia32/builtins-ia32.cc
@@ -8,6 +8,7 @@
#include "src/base/bits-iterator.h"
#include "src/base/iterator.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
@@ -116,7 +117,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// edx: new target
// Reload context from the frame.
__ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
- __ InvokeFunction(edi, edx, eax, CALL_FUNCTION);
+ __ InvokeFunction(edi, edx, eax, InvokeType::kCall);
// Restore context from the frame.
__ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
@@ -245,7 +246,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore and and call the constructor function.
__ mov(edi, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
- __ InvokeFunction(edi, edx, eax, CALL_FUNCTION);
+ __ InvokeFunction(edi, edx, eax, InvokeType::kCall);
// ----------- S t a t e -------------
// -- eax: constructor result
@@ -597,7 +598,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Store input value into generator object.
__ mov(FieldOperand(edx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
__ RecordWriteField(edx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx,
- kDontSaveFPRegs);
+ SaveFPRegsMode::kIgnore);
// Load suspended function and context.
__ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
@@ -645,15 +646,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
FieldOperand(edx, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
- __ mov(edi, ecx);
-
__ bind(&loop);
- __ dec(edi);
+ __ dec(ecx);
__ j(less, &done_loop);
__ Push(
- FieldOperand(ebx, edi, times_tagged_size, FixedArray::kHeaderSize));
+ FieldOperand(ebx, ecx, times_tagged_size, FixedArray::kHeaderSize));
__ jmp(&loop);
-
__ bind(&done_loop);
}
@@ -740,7 +738,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
__ mov(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code);
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ SaveFPRegsMode::kIgnore, RememberedSetAction::kOmit,
+ SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -1458,7 +1457,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// and edi are used as scratch registers.
Generate_InterpreterPushZeroAndArgsAndReturnAddress(
masm, eax, ecx, edx, edi,
- InterpreterPushArgsThenConstructDescriptor::kStackArgumentsCount,
+ InterpreterPushArgsThenConstructDescriptor::GetStackParameterCount(),
&stack_overflow);
// Call the appropriate constructor. eax and ecx already contain intended
@@ -1591,7 +1590,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ jmp(kJavaScriptCallCodeStartRegister);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
__ mov(kInterpreterBytecodeArrayRegister,
Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
@@ -1636,7 +1635,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
// static
@@ -1666,7 +1665,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ mov(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, scratch);
__ Assert(equal, AbortReason::kExpectedFeedbackVector);
}
@@ -1939,7 +1938,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments to the receiver.
__ bind(&no_arguments);
{
- __ Set(eax, 0);
+ __ Move(eax, 0);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
@@ -2108,6 +2107,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// static
+// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -2133,7 +2133,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ pop(kArgumentsList);
__ PushReturnAddressFrom(edx);
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow kArgumentsList to be a FixedArray, or a FixedDoubleArray if
// kArgumentsLength == 0.
Label ok, fail;
@@ -2294,7 +2294,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ AllocateStackSpace(scratch);
// Include return address and receiver.
__ add(eax, Immediate(2));
- __ Set(current, 0);
+ __ Move(current, 0);
__ jmp(&check);
// Loop.
__ bind(&copy);
@@ -2443,7 +2443,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movzx_w(
ecx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(edi, no_reg, ecx, eax, JUMP_FUNCTION);
+ __ InvokeFunctionCode(edi, no_reg, ecx, eax, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
{
@@ -2788,6 +2788,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ __ mov(kContextRegister,
+ MemOperand(ebp, BaselineFrameConstants::kContextOffset));
return OnStackReplacement(masm, false);
}
@@ -2896,6 +2898,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// TODO(v8:10701): Implement for this platform.
__ Trap();
}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
@@ -2908,7 +2915,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// esi: current context (C callee-saved)
// edi: JS function of the caller (C callee-saved)
//
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// ecx: pointer to the first argument
STATIC_ASSERT(eax == kRuntimeCallArgCountRegister);
@@ -2928,8 +2935,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
int arg_stack_space = 3;
// Enter the exit frame that transitions from JavaScript to C++.
- if (argv_mode == kArgvInRegister) {
- DCHECK(save_doubles == kDontSaveFPRegs);
+ if (argv_mode == ArgvMode::kRegister) {
+ DCHECK(save_doubles == SaveFPRegsMode::kIgnore);
DCHECK(!builtin_exit_frame);
__ EnterApiExitFrame(arg_stack_space, edi);
@@ -2938,7 +2945,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ mov(edi, eax);
} else {
__ EnterExitFrame(
- arg_stack_space, save_doubles == kSaveFPRegs,
+ arg_stack_space, save_doubles == SaveFPRegsMode::kSave,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
}
@@ -2985,7 +2992,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
}
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles == kSaveFPRegs, argv_mode == kArgvOnStack);
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave,
+ argv_mode == ArgvMode::kStack);
__ ret(0);
// Handling of exception.
@@ -3148,7 +3156,7 @@ Operand ApiParameterOperand(int index) {
// stores the pointer to the reserved slot into esi.
void PrepareCallApiFunction(MacroAssembler* masm, int argc, Register scratch) {
__ EnterApiExitFrame(argc, scratch);
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
@@ -3961,9 +3969,16 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ movsd(Operand(esi, dst_offset), xmm0);
}
+ if (FLAG_debug_code) {
+ const int kTopMask = 0x3800;
+ __ push(eax);
+ __ fwait();
+ __ fnstsw_ax();
+ __ test(eax, Immediate(kTopMask));
+ __ Assert(zero, AbortReason::kFpuTopIsNotZeroInDeoptimizer);
+ __ pop(eax);
+ }
// Clear FPU all exceptions.
- // TODO(ulan): Find out why the TOP register is not zero here in some cases,
- // and check that the generated code never deoptimizes with unbalanced stack.
__ fnclex();
// Mark the stack as not iterable for the CPU profiler which won't be able to
diff --git a/chromium/v8/src/builtins/ic-callable.tq b/chromium/v8/src/builtins/ic-callable.tq
index 85525c4c683..dd29e8bf5e2 100644
--- a/chromium/v8/src/builtins/ic-callable.tq
+++ b/chromium/v8/src/builtins/ic-callable.tq
@@ -6,6 +6,10 @@ namespace ic {
namespace callable {
extern macro IncrementCallCount(FeedbackVector, uintptr): void;
+const kCallFeedbackContentFieldMask: constexpr int32
+ generates 'FeedbackNexus::CallFeedbackContentField::kMask';
+const kCallFeedbackContentFieldShift: constexpr uint32
+ generates 'FeedbackNexus::CallFeedbackContentField::kShift';
macro IsMonomorphic(feedback: MaybeObject, target: JSAny): bool {
return IsWeakReferenceToObject(feedback, target);
@@ -50,8 +54,42 @@ macro TransitionToMegamorphic(implicit context: Context)(
ReportFeedbackUpdate(feedbackVector, slotId, 'Call:TransitionMegamorphic');
}
+macro TaggedEqualPrototypeApplyFunction(implicit context: Context)(
+ target: JSAny): bool {
+ return TaggedEqual(target, GetPrototypeApplyFunction());
+}
+
+macro FeedbackValueIsReceiver(implicit context: Context)(
+ feedbackVector: FeedbackVector, slotId: uintptr): bool {
+ const callCount: intptr = SmiUntag(Cast<Smi>(LoadFeedbackVectorSlot(
+ feedbackVector, slotId, kTaggedSize)) otherwise return false);
+ return (callCount & IntPtrConstant(kCallFeedbackContentFieldMask)) !=
+ IntPtrConstant(0);
+}
+
+macro SetCallFeedbackContent(implicit context: Context)(
+ feedbackVector: FeedbackVector, slotId: uintptr,
+ callFeedbackContent: constexpr CallFeedbackContent): void {
+ // Load the call count field from the feecback vector.
+ const callCount: intptr = SmiUntag(Cast<Smi>(LoadFeedbackVectorSlot(
+ feedbackVector, slotId, kTaggedSize)) otherwise return );
+ // The second lowest bits of the call count are used to state whether the
+ // feedback collected is a target or a receiver. Change that bit based on the
+ // callFeedbackContent input.
+ const callFeedbackContentFieldMask: intptr =
+ ~IntPtrConstant(kCallFeedbackContentFieldMask);
+ const newCount: intptr = (callCount & callFeedbackContentFieldMask) |
+ Convert<intptr>(Signed(
+ %RawConstexprCast<constexpr uint32>(callFeedbackContent)
+ << kCallFeedbackContentFieldShift));
+ StoreFeedbackVectorSlot(
+ feedbackVector, slotId, SmiTag(newCount), SKIP_WRITE_BARRIER,
+ kTaggedSize);
+ ReportFeedbackUpdate(feedbackVector, slotId, 'Call:SetCallFeedbackContent');
+}
+
macro CollectCallFeedback(
- maybeTarget: JSAny, context: Context,
+ maybeTarget: JSAny, maybeReceiver: Lazy<JSAny>, context: Context,
maybeFeedbackVector: Undefined|FeedbackVector, slotId: uintptr): void {
// TODO(v8:9891): Remove this assert once all callers are ported to Torque.
// This assert ensures correctness of maybeFeedbackVector's type which can
@@ -72,7 +110,24 @@ macro CollectCallFeedback(
// If cleared, we have a new chance to become monomorphic.
const feedbackValue: HeapObject =
- MaybeObjectToStrong(feedback) otherwise TryInitializeAsMonomorphic;
+ MaybeObjectToStrong(feedback) otherwise TryReinitializeAsMonomorphic;
+
+ if (FeedbackValueIsReceiver(feedbackVector, slotId) &&
+ TaggedEqualPrototypeApplyFunction(maybeTarget)) {
+ // If the Receiver is recorded and the target is
+ // Function.prototype.apply, check whether we can stay monomorphic based
+ // on the receiver.
+ if (IsMonomorphic(feedback, RunLazy(maybeReceiver))) {
+ return;
+ } else {
+ // If not, reinitialize the feedback with target.
+ SetCallFeedbackContent(
+ feedbackVector, slotId, CallFeedbackContent::kTarget);
+ TryInitializeAsMonomorphic(maybeTarget, feedbackVector, slotId)
+ otherwise TransitionToMegamorphic;
+ return;
+ }
+ }
// Try transitioning to a feedback cell.
// Check if {target}s feedback cell matches the {feedbackValue}.
@@ -92,8 +147,20 @@ macro CollectCallFeedback(
StoreWeakReferenceInFeedbackVector(feedbackVector, slotId, feedbackCell);
ReportFeedbackUpdate(feedbackVector, slotId, 'Call:FeedbackVectorCell');
+ } label TryReinitializeAsMonomorphic {
+ SetCallFeedbackContent(
+ feedbackVector, slotId, CallFeedbackContent::kTarget);
+ goto TryInitializeAsMonomorphic;
} label TryInitializeAsMonomorphic {
- TryInitializeAsMonomorphic(maybeTarget, feedbackVector, slotId)
+ let recordedFunction = maybeTarget;
+ if (TaggedEqualPrototypeApplyFunction(maybeTarget)) {
+ recordedFunction = RunLazy(maybeReceiver);
+ SetCallFeedbackContent(
+ feedbackVector, slotId, CallFeedbackContent::kReceiver);
+ } else {
+ assert(!FeedbackValueIsReceiver(feedbackVector, slotId));
+ }
+ TryInitializeAsMonomorphic(recordedFunction, feedbackVector, slotId)
otherwise TransitionToMegamorphic;
} label TransitionToMegamorphic {
TransitionToMegamorphic(feedbackVector, slotId);
diff --git a/chromium/v8/src/builtins/ic.tq b/chromium/v8/src/builtins/ic.tq
index 49d4e78fa55..a9e92cf63ec 100644
--- a/chromium/v8/src/builtins/ic.tq
+++ b/chromium/v8/src/builtins/ic.tq
@@ -8,10 +8,10 @@ namespace ic {
@export
macro CollectCallFeedback(
- maybeTarget: JSAny, context: Context,
+ maybeTarget: JSAny, maybeReceiver: Lazy<JSAny>, context: Context,
maybeFeedbackVector: Undefined|FeedbackVector, slotId: uintptr): void {
callable::CollectCallFeedback(
- maybeTarget, context, maybeFeedbackVector, slotId);
+ maybeTarget, maybeReceiver, context, maybeFeedbackVector, slotId);
}
@export
@@ -51,10 +51,15 @@ macro IsUninitialized(feedback: MaybeObject): bool {
}
extern macro LoadFeedbackVectorSlot(FeedbackVector, uintptr): MaybeObject;
+extern macro LoadFeedbackVectorSlot(
+ FeedbackVector, uintptr, constexpr int32): MaybeObject;
extern operator '[]' macro LoadFeedbackVectorSlot(
FeedbackVector, intptr): MaybeObject;
extern macro StoreFeedbackVectorSlot(
FeedbackVector, uintptr, MaybeObject): void;
+extern macro StoreFeedbackVectorSlot(
+ FeedbackVector, uintptr, MaybeObject, constexpr WriteBarrierMode,
+ constexpr int32): void;
extern macro StoreWeakReferenceInFeedbackVector(
FeedbackVector, uintptr, HeapObject): MaybeObject;
extern macro ReportFeedbackUpdate(FeedbackVector, uintptr, constexpr string);
diff --git a/chromium/v8/src/builtins/iterator.tq b/chromium/v8/src/builtins/iterator.tq
index 05993ea6d77..150e3d2cb57 100644
--- a/chromium/v8/src/builtins/iterator.tq
+++ b/chromium/v8/src/builtins/iterator.tq
@@ -78,8 +78,8 @@ extern macro LoadContextFromBaseline(): Context;
extern macro LoadFeedbackVectorFromBaseline(): FeedbackVector;
transitioning builtin GetIteratorBaseline(
- context: Context, receiver: JSAny, loadSlot: TaggedIndex,
- callSlot: TaggedIndex): JSAny {
+ receiver: JSAny, loadSlot: TaggedIndex, callSlot: TaggedIndex): JSAny {
+ const context: Context = LoadContextFromBaseline();
const feedback: FeedbackVector = LoadFeedbackVectorFromBaseline();
const iteratorMethod: JSAny =
LoadIC(context, receiver, IteratorSymbolConstant(), loadSlot, feedback);
@@ -97,12 +97,18 @@ transitioning builtin CreateAsyncFromSyncIteratorBaseline(syncIterator: JSAny):
return CreateAsyncFromSyncIterator(context, syncIterator);
}
+macro GetLazyReceiver(receiver: JSAny): JSAny {
+ return receiver;
+}
+
transitioning builtin CallIteratorWithFeedback(
context: Context, receiver: JSAny, iteratorMethod: JSAny, callSlot: Smi,
feedback: Undefined|FeedbackVector): JSAny {
// TODO(v8:10047): Use TaggedIndex here once TurboFan supports it.
const callSlotUnTagged: uintptr = Unsigned(SmiUntag(callSlot));
- ic::CollectCallFeedback(iteratorMethod, context, feedback, callSlotUnTagged);
+ ic::CollectCallFeedback(
+ iteratorMethod, %MakeLazy<JSAny, JSAny>('GetLazyReceiver', receiver),
+ context, feedback, callSlotUnTagged);
const iteratorCallable: Callable = Cast<Callable>(iteratorMethod)
otherwise ThrowCalledNonCallable(iteratorMethod);
return Call(context, iteratorCallable, receiver);
diff --git a/chromium/v8/src/builtins/mips/builtins-mips.cc b/chromium/v8/src/builtins/mips/builtins-mips.cc
index 1d8e80bdf87..6ff2ed4b5c3 100644
--- a/chromium/v8/src/builtins/mips/builtins-mips.cc
+++ b/chromium/v8/src/builtins/mips/builtins-mips.cc
@@ -6,6 +6,7 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
@@ -102,7 +103,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// a0: number of arguments (untagged)
// a1: constructor function
// a3: new target
- __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
// Restore context from the frame.
__ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
@@ -226,7 +227,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(s0);
// Call the function.
- __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
// ----------- S t a t e -------------
// -- v0: constructor result
@@ -637,7 +638,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Store input value into generator object.
__ sw(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
- kRAHasNotBeenSaved, kDontSaveFPRegs);
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Load suspended function and context.
__ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
@@ -761,8 +762,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
__ sw(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
- kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -1406,7 +1407,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Jump(kJavaScriptCallCodeStartRegister);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Advance the current bytecode offset stored within the given interpreter
// stack frame. This simulates what all bytecode handlers do upon completion
// of the underlying operation.
@@ -1453,7 +1454,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
@@ -1756,7 +1757,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- t0 : len (number of elements to push from args)
// -- a3 : new.target (for [[Construct]])
// -----------------------------------
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow a2 to be a FixedArray, or a FixedDoubleArray if t0 == 0.
Label ok, fail;
__ AssertNotSmi(a2);
@@ -2005,7 +2006,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ lhu(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(a1, no_reg, a2, a0, JUMP_FUNCTION);
+ __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
@@ -2364,6 +2365,12 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ Trap();
}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
+
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
@@ -2376,10 +2383,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
//
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// a2: pointer to the first argument
- if (argv_mode == kArgvInRegister) {
+ if (argv_mode == ArgvMode::kRegister) {
// Move argv into the correct register.
__ mov(s1, a2);
} else {
@@ -2391,7 +2398,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
- save_doubles == kSaveFPRegs, 0,
+ save_doubles == SaveFPRegsMode::kSave, 0,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// s0: number of arguments including receiver (C callee-saved)
@@ -2440,12 +2447,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- Register argc = argv_mode == kArgvInRegister
+ Register argc = argv_mode == ArgvMode::kRegister
// We don't want to pop arguments so set argc to no_reg.
? no_reg
// s0: still holds argc (callee-saved).
: s0;
- __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN);
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
@@ -2698,7 +2705,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ sw(s0, MemOperand(s5, kNextOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ lw(a1, MemOperand(s5, kLevelOffset));
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
Operand(s2));
diff --git a/chromium/v8/src/builtins/mips64/builtins-mips64.cc b/chromium/v8/src/builtins/mips64/builtins-mips64.cc
index c029188f146..9d0156e9278 100644
--- a/chromium/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/chromium/v8/src/builtins/mips64/builtins-mips64.cc
@@ -6,6 +6,7 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
@@ -102,7 +103,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// a0: number of arguments (untagged)
// a1: constructor function
// a3: new target
- __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
// Restore context from the frame.
__ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
@@ -227,7 +228,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(a6);
// Call the function.
- __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
// ----------- S t a t e -------------
// -- v0: constructor result
@@ -324,7 +325,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Store input value into generator object.
__ Sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
- kRAHasNotBeenSaved, kDontSaveFPRegs);
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Load suspended function and context.
__ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
@@ -777,8 +778,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
__ Sd(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
- kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -1424,7 +1425,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Jump(kJavaScriptCallCodeStartRegister);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Advance the current bytecode offset stored within the given interpreter
// stack frame. This simulates what all bytecode handlers do upon completion
// of the underlying operation.
@@ -1471,7 +1472,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
@@ -1815,7 +1816,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- a4 : len (number of elements to push from args)
// -- a3 : new.target (for [[Construct]])
// -----------------------------------
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
Label ok, fail;
__ AssertNotSmi(a2);
@@ -2073,7 +2074,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Lhu(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(a1, no_reg, a2, a0, JUMP_FUNCTION);
+ __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
@@ -2395,7 +2396,11 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Lbu(a1, MemOperand(a1));
__ Branch(&push_doubles, le, a1, Operand(zero_reg));
// Save vector registers.
- __ MultiPushMSA(fp_regs);
+ {
+ CpuFeatureScope msa_scope(
+ masm, MIPS_SIMD, CpuFeatureScope::CheckPolicy::kDontCheckSupported);
+ __ MultiPushMSA(fp_regs);
+ }
__ Branch(&simd_pushed);
__ bind(&push_doubles);
__ MultiPushFPU(fp_regs);
@@ -2419,7 +2424,11 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Lbu(a1, MemOperand(a1));
__ Branch(&pop_doubles, le, a1, Operand(zero_reg));
// Pop vector registers.
- __ MultiPopMSA(fp_regs);
+ {
+ CpuFeatureScope msa_scope(
+ masm, MIPS_SIMD, CpuFeatureScope::CheckPolicy::kDontCheckSupported);
+ __ MultiPopMSA(fp_regs);
+ }
__ Branch(&simd_popped);
__ bind(&pop_doubles);
__ Daddu(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize);
@@ -2456,6 +2465,12 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ Trap();
}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
+
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
@@ -2468,10 +2483,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
//
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// a2: pointer to the first argument
- if (argv_mode == kArgvInRegister) {
+ if (argv_mode == ArgvMode::kRegister) {
// Move argv into the correct register.
__ mov(s1, a2);
} else {
@@ -2483,7 +2498,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
- save_doubles == kSaveFPRegs, 0,
+ save_doubles == SaveFPRegsMode::kSave, 0,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// s0: number of arguments including receiver (C callee-saved)
@@ -2532,12 +2547,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- Register argc = argv_mode == kArgvInRegister
+ Register argc = argv_mode == ArgvMode::kRegister
// We don't want to pop arguments so set argc to no_reg.
? no_reg
// s0: still holds argc (callee-saved).
: s0;
- __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN);
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
@@ -2794,7 +2809,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ Sd(s0, MemOperand(s5, kNextOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ Lw(a1, MemOperand(s5, kLevelOffset));
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
Operand(s2));
diff --git a/chromium/v8/src/builtins/ppc/builtins-ppc.cc b/chromium/v8/src/builtins/ppc/builtins-ppc.cc
index bc467c9ff9f..35d817d3a26 100644
--- a/chromium/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/chromium/v8/src/builtins/ppc/builtins-ppc.cc
@@ -6,6 +6,7 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
@@ -113,13 +114,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// r6: new target
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- __ InvokeFunctionWithNewTarget(r4, r6, r3, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(r4, r6, r3, InvokeType::kCall);
}
// Restore context from the frame.
- __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
// Restore smi-tagged arguments count from the frame.
- __ LoadP(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ LoadU64(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
@@ -229,8 +230,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -----------------------------------
// Restore constructor function and argument count.
- __ LoadP(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
- __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ LoadU64(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ LoadU64(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(r3);
Label stack_overflow;
@@ -245,7 +246,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Call the function.
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- __ InvokeFunctionWithNewTarget(r4, r6, r3, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(r4, r6, r3, InvokeType::kCall);
}
// ----------- S t a t e -------------
@@ -275,12 +276,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
- __ LoadP(r3, MemOperand(sp));
+ __ LoadU64(r3, MemOperand(sp));
__ JumpIfRoot(r3, RootIndex::kTheHoleValue, &do_throw);
__ bind(&leave_and_return);
// Restore smi-tagged arguments count from the frame.
- __ LoadP(r4, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ LoadU64(r4, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
__ LeaveFrame(StackFrame::CONSTRUCT);
@@ -305,13 +306,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ bind(&do_throw);
// Restore the context from the frame.
- __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
__ bkpt(0);
__ bind(&stack_overflow);
// Restore the context from the frame.
- __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kThrowStackOverflow);
// Unreachable code.
__ bkpt(0);
@@ -347,7 +348,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ StoreTaggedField(
r3, FieldMemOperand(r4, JSGeneratorObject::kInputOrDebugPosOffset), r0);
__ RecordWriteField(r4, JSGeneratorObject::kInputOrDebugPosOffset, r3, r6,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
+ kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Load suspended function and context.
__ LoadTaggedPointerField(
@@ -373,7 +374,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
ExternalReference::debug_suspended_generator_address(masm->isolate());
__ Move(scratch, debug_suspended_generator);
- __ LoadP(scratch, MemOperand(scratch));
+ __ LoadU64(scratch, MemOperand(scratch));
__ cmp(scratch, r4);
__ beq(&prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
@@ -402,19 +403,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
- __ mr(r9, r6);
-
__ bind(&loop);
- __ subi(r9, r9, Operand(1));
- __ cmpi(r9, Operand::Zero());
+ __ subi(r6, r6, Operand(1));
+ __ cmpi(r6, Operand::Zero());
__ blt(&done_loop);
- __ ShiftLeftImm(r10, r9, Operand(kTaggedSizeLog2));
+ __ ShiftLeftImm(r10, r6, Operand(kTaggedSizeLog2));
__ add(scratch, r5, r10);
__ LoadAnyTaggedField(scratch,
FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ Push(scratch);
__ b(&loop);
-
__ bind(&done_loop);
// Push receiver.
@@ -554,7 +552,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Save copies of the top frame descriptor on the stack.
__ Move(r3, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
masm->isolate()));
- __ LoadP(r0, MemOperand(r3));
+ __ LoadU64(r0, MemOperand(r3));
__ push(r0);
// Clear c_entry_fp, now we've pushed its previous value to the stack.
@@ -574,7 +572,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress,
masm->isolate());
__ Move(r3, js_entry_sp);
- __ LoadP(scratch, MemOperand(r3));
+ __ LoadU64(scratch, MemOperand(r3));
__ cmpi(scratch, Operand::Zero());
__ bne(&non_outermost_js);
__ StoreP(fp, MemOperand(r3));
@@ -663,7 +661,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ MultiPop(kCalleeSaved);
// Return
- __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kSystemPointerSize));
+ __ LoadU64(r0, MemOperand(sp, kStackFrameLRSlot * kSystemPointerSize));
__ mtlr(r0);
__ blr();
}
@@ -703,7 +701,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate());
__ Move(cp, context_address);
- __ LoadP(cp, MemOperand(cp));
+ __ LoadU64(cp, MemOperand(cp));
// Push the function.
__ Push(r5);
@@ -734,7 +732,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mtctr(r7);
__ bind(&loop);
__ LoadPU(r9, MemOperand(r8, -kSystemPointerSize)); // read next parameter
- __ LoadP(r0, MemOperand(r9)); // dereference handle
+ __ LoadU64(r0, MemOperand(r9)); // dereference handle
__ push(r0); // push parameter
__ bdnz(&loop);
__ bind(&done);
@@ -800,23 +798,23 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
__ mr(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Register scratch2) {
Register params_size = scratch1;
// Get the size of the formal parameters + receiver (in bytes).
- __ LoadP(params_size,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ LoadU64(params_size,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ lwz(params_size,
FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
Register actual_params_size = scratch2;
// Compute the size of the actual parameters + receiver (in bytes).
- __ LoadP(actual_params_size,
- MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ LoadU64(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ ShiftLeftImm(actual_params_size, actual_params_size,
Operand(kSystemPointerSizeLog2));
__ addi(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
@@ -869,7 +867,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadTaggedPointerField(
scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
- __ LoadWordArith(
+ __ LoadS32(
scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
@@ -1082,9 +1080,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register optimization_state = r7;
// Read off the optimization state in the feedback vector.
- __ LoadWord(optimization_state,
- FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset),
- r0);
+ __ LoadU32(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset),
+ r0);
// Check if the optimized code slot is not empty or has a optimization marker.
Label has_optimized_code_or_marker;
@@ -1097,7 +1095,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&not_optimized);
// Increment invocation count for the function.
- __ LoadWord(
+ __ LoadU32(
r8,
FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset),
r0);
@@ -1165,10 +1163,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// If the bytecode array has a valid incoming new target or generator object
// register, initialize it with incoming value which was passed in r6.
Label no_incoming_new_target_or_generator_register;
- __ LoadWordArith(
- r8, FieldMemOperand(
- kInterpreterBytecodeArrayRegister,
- BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
+ __ LoadS32(r8,
+ FieldMemOperand(
+ kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
__ cmpi(r8, Operand::Zero());
__ beq(&no_incoming_new_target_or_generator_register);
__ ShiftLeftImm(r8, r8, Operand(kSystemPointerSizeLog2));
@@ -1205,10 +1203,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// or the interpreter tail calling a builtin and then a dispatch.
// Get bytecode array and bytecode offset from the stack frame.
- __ LoadP(kInterpreterBytecodeArrayRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ LoadP(kInterpreterBytecodeOffsetRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ LoadU64(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ LoadU64(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Either return, or advance to the next bytecode and dispatch.
@@ -1238,8 +1236,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// After the call, restore the bytecode array, bytecode offset and accumulator
// registers again. Also, restore the bytecode offset in the stack to its
// previous value.
- __ LoadP(kInterpreterBytecodeArrayRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ LoadU64(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
@@ -1313,7 +1311,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Pass the spread in the register r3.
// r2 already points to the penultimate argument, the spread
// lies in the next interpreter register.
- __ LoadP(r5, MemOperand(r5, -kSystemPointerSize));
+ __ LoadU64(r5, MemOperand(r5, -kSystemPointerSize));
}
// Call the target.
@@ -1364,7 +1362,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// r4 already points to the penultimate argument, the spread
// lies in the next interpreter register.
__ subi(r7, r7, Operand(kSystemPointerSize));
- __ LoadP(r5, MemOperand(r7));
+ __ LoadU64(r5, MemOperand(r7));
} else {
__ AssertUndefinedOrAllocationSite(r5, r8);
}
@@ -1406,7 +1404,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// custom copy of the interpreter entry trampoline for profiling. If so,
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
- __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ LoadU64(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LoadTaggedPointerField(
r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
@@ -1425,7 +1423,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Move(r5, ExternalReference::
address_of_interpreter_entry_trampoline_instruction_start(
masm->isolate()));
- __ LoadP(r5, MemOperand(r5));
+ __ LoadU64(r5, MemOperand(r5));
__ bind(&trampoline_loaded);
__ addi(r0, r5, Operand(interpreter_entry_return_pc_offset.value()));
@@ -1437,8 +1435,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
// Get the bytecode array pointer from the frame.
- __ LoadP(kInterpreterBytecodeArrayRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ LoadU64(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1453,8 +1451,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
// Get the target bytecode offset from the frame.
- __ LoadP(kInterpreterBytecodeOffsetRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ LoadU64(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
if (FLAG_debug_code) {
@@ -1478,12 +1476,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Jump(kJavaScriptCallCodeStartRegister);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
- __ LoadP(kInterpreterBytecodeArrayRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ LoadP(kInterpreterBytecodeOffsetRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ LoadU64(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ LoadU64(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
Label enter_bytecode, function_entry_bytecode;
@@ -1524,7 +1522,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
@@ -1567,7 +1565,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ subi(r3, r3,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
}
- __ LoadP(
+ __ LoadU64(
fp,
MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
// Load builtin index (stored as a Smi) and use it to get the builtin start
@@ -1609,7 +1607,7 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r3.code());
- __ LoadP(r3, MemOperand(sp, 0 * kSystemPointerSize));
+ __ LoadU64(r3, MemOperand(sp, 0 * kSystemPointerSize));
__ addi(sp, sp, Operand(1 * kSystemPointerSize));
__ Ret();
}
@@ -1677,13 +1675,13 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ mr(r5, r8);
Label done;
- __ LoadP(r4, MemOperand(sp)); // receiver
+ __ LoadU64(r4, MemOperand(sp)); // receiver
__ cmpi(r3, Operand(1));
__ blt(&done);
- __ LoadP(r8, MemOperand(sp, kSystemPointerSize)); // thisArg
+ __ LoadU64(r8, MemOperand(sp, kSystemPointerSize)); // thisArg
__ cmpi(r3, Operand(2));
__ blt(&done);
- __ LoadP(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
+ __ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
__ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2));
@@ -1762,13 +1760,13 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Label done;
__ cmpi(r3, Operand(1));
__ blt(&done);
- __ LoadP(r4, MemOperand(sp, kSystemPointerSize)); // thisArg
+ __ LoadU64(r4, MemOperand(sp, kSystemPointerSize)); // thisArg
__ cmpi(r3, Operand(2));
__ blt(&done);
- __ LoadP(r8, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
+ __ LoadU64(r8, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ cmpi(r3, Operand(3));
__ blt(&done);
- __ LoadP(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
+ __ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
__ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2));
@@ -1812,14 +1810,14 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ mr(r7, r4);
__ cmpi(r3, Operand(1));
__ blt(&done);
- __ LoadP(r4, MemOperand(sp, kSystemPointerSize)); // thisArg
+ __ LoadU64(r4, MemOperand(sp, kSystemPointerSize)); // thisArg
__ mr(r6, r4);
__ cmpi(r3, Operand(2));
__ blt(&done);
- __ LoadP(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
+ __ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ cmpi(r3, Operand(3));
__ blt(&done);
- __ LoadP(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
+ __ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
__ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2));
__ add(sp, sp, r0);
@@ -1847,6 +1845,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// static
+// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -1859,7 +1858,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Register scratch = ip;
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0.
Label ok, fail;
__ AssertNotSmi(r5);
@@ -1963,7 +1962,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
}
Label stack_done, stack_overflow;
- __ LoadP(r8, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ LoadU64(r8, MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ sub(r8, r8, r5, LeaveOE, SetRC);
__ ble(&stack_done, cr0);
{
@@ -2125,7 +2124,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadHalfWord(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(r4, no_reg, r5, r3, JUMP_FUNCTION);
+ __ InvokeFunctionCode(r4, no_reg, r5, r3, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
@@ -2511,6 +2510,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// TODO(v8:10701): Implement for this platform.
__ Trap();
}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
@@ -2523,12 +2527,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
//
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// r5: pointer to the first argument
__ mr(r15, r4);
- if (argv_mode == kArgvInRegister) {
+ if (argv_mode == ArgvMode::kRegister) {
// Move argv into the correct register.
__ mr(r4, r5);
} else {
@@ -2552,7 +2556,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
}
__ EnterExitFrame(
- save_doubles, arg_stack_space,
+ save_doubles == SaveFPRegsMode::kSave, arg_stack_space,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// Store a copy of argc in callee-saved registers for later.
@@ -2584,8 +2588,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If return value is on the stack, pop it to registers.
if (needs_return_buffer) {
- __ LoadP(r4, MemOperand(r3, kSystemPointerSize));
- __ LoadP(r3, MemOperand(r3));
+ __ LoadU64(r4, MemOperand(r3, kSystemPointerSize));
+ __ LoadU64(r3, MemOperand(r3));
}
// Check result for exception sentinel.
@@ -2601,7 +2605,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
IsolateAddressId::kPendingExceptionAddress, masm->isolate());
__ Move(r6, pending_exception_address);
- __ LoadP(r6, MemOperand(r6));
+ __ LoadU64(r6, MemOperand(r6));
__ CompareRoot(r6, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ beq(&okay);
@@ -2613,12 +2617,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// r3:r4: result
// sp: stack pointer
// fp: frame pointer
- Register argc = argv_mode == kArgvInRegister
+ Register argc = argv_mode == ArgvMode::kRegister
// We don't want to pop arguments so set argc to no_reg.
? no_reg
// r14: still holds argc (callee-saved).
: r14;
- __ LeaveExitFrame(save_doubles, argc);
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc);
__ blr();
// Handling of exception.
@@ -2653,11 +2657,11 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Retrieve the handler context, SP and FP.
__ Move(cp, pending_handler_context_address);
- __ LoadP(cp, MemOperand(cp));
+ __ LoadU64(cp, MemOperand(cp));
__ Move(sp, pending_handler_sp_address);
- __ LoadP(sp, MemOperand(sp));
+ __ LoadU64(sp, MemOperand(sp));
__ Move(fp, pending_handler_fp_address);
- __ LoadP(fp, MemOperand(fp));
+ __ LoadU64(fp, MemOperand(fp));
// If the handler is a JS frame, restore the context to the frame. Note that
// the context will be set to (cp == 0) for non-JS frames.
@@ -2685,10 +2689,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Compute the handler entry address and jump to it.
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ Move(ip, pending_handler_entrypoint_address);
- __ LoadP(ip, MemOperand(ip));
+ __ LoadU64(ip, MemOperand(ip));
if (FLAG_enable_embedded_constant_pool) {
__ Move(kConstantPoolRegister, pending_handler_constant_pool_address);
- __ LoadP(kConstantPoolRegister, MemOperand(kConstantPoolRegister));
+ __ LoadU64(kConstantPoolRegister, MemOperand(kConstantPoolRegister));
}
__ Jump(ip);
}
@@ -2873,8 +2877,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// r15 - next_address->kLimitOffset
// r16 - next_address->kLevelOffset
__ Move(r17, next_address);
- __ LoadP(r14, MemOperand(r17, kNextOffset));
- __ LoadP(r15, MemOperand(r17, kLimitOffset));
+ __ LoadU64(r14, MemOperand(r17, kNextOffset));
+ __ LoadU64(r15, MemOperand(r17, kLimitOffset));
__ lwz(r16, MemOperand(r17, kLevelOffset));
__ addi(r16, r16, Operand(1));
__ stw(r16, MemOperand(r17, kLevelOffset));
@@ -2887,19 +2891,19 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
Label return_value_loaded;
// load value from ReturnValue
- __ LoadP(r3, return_value_operand);
+ __ LoadU64(r3, return_value_operand);
__ bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ StoreP(r14, MemOperand(r17, kNextOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ lwz(r4, MemOperand(r17, kLevelOffset));
__ cmp(r4, r16);
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
}
__ subi(r16, r16, Operand(1));
__ stw(r16, MemOperand(r17, kLevelOffset));
- __ LoadP(r0, MemOperand(r17, kLimitOffset));
+ __ LoadU64(r0, MemOperand(r17, kLimitOffset));
__ cmp(r15, r0);
__ bne(&delete_allocated_handles);
@@ -2907,7 +2911,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&leave_exit_frame);
// LeaveExitFrame expects unwind space to be in a register.
if (stack_space_operand != nullptr) {
- __ LoadP(r14, *stack_space_operand);
+ __ LoadU64(r14, *stack_space_operand);
} else {
__ mov(r14, Operand(stack_space));
}
@@ -2916,7 +2920,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception.
__ LoadRoot(r14, RootIndex::kTheHoleValue);
__ Move(r15, ExternalReference::scheduled_exception_address(isolate));
- __ LoadP(r15, MemOperand(r15));
+ __ LoadU64(r15, MemOperand(r15));
__ cmp(r14, r15);
__ bne(&promote_scheduled_exception);
@@ -3151,8 +3155,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ LoadTaggedPointerField(
scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
- __ LoadP(api_function_address,
- FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+ __ LoadU64(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
@@ -3174,13 +3178,14 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
if (ABI_USES_FUNCTION_DESCRIPTORS) {
// AIX/PPC64BE Linux use a function descriptor;
- __ LoadP(ToRegister(ABI_TOC_REGISTER),
- MemOperand(temp2, kSystemPointerSize));
- __ LoadP(temp2, MemOperand(temp2, 0)); // Instruction address
+ __ LoadU64(ToRegister(ABI_TOC_REGISTER),
+ MemOperand(temp2, kSystemPointerSize));
+ __ LoadU64(temp2, MemOperand(temp2, 0)); // Instruction address
}
__ Call(temp2); // Call the C++ function.
- __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
+ __ LoadU64(r0,
+ MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
__ mtlr(r0);
__ blr();
}
@@ -3230,9 +3235,6 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
- // Get the bailout id is passed as r29 by the caller.
- __ mr(r5, r29);
-
__ mov(r5, Operand(Deoptimizer::kFixedExitSizeMarker));
// Get the address of the location in the code object (r6) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
@@ -3246,9 +3248,10 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ PrepareCallCFunction(6, r8);
__ li(r3, Operand::Zero());
Label context_check;
- __ LoadP(r4, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ LoadU64(r4,
+ MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(r4, &context_check);
- __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ LoadU64(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ li(r4, Operand(static_cast<int>(deopt_kind)));
// r5: bailout id already loaded.
@@ -3263,14 +3266,14 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Preserve "deoptimizer" object in register r3 and get the input
// frame descriptor pointer to r4 (deoptimizer->input_);
- __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
+ __ LoadU64(r4, MemOperand(r3, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
- __ LoadP(r5, MemOperand(sp, i * kSystemPointerSize));
+ __ LoadU64(r5, MemOperand(sp, i * kSystemPointerSize));
__ StoreP(r5, MemOperand(r4, offset));
}
@@ -3302,7 +3305,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Compute a pointer to the unwinding limit in register r5; that is
// the first stack slot not part of the input frame.
- __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
+ __ LoadU64(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
__ add(r5, r5, sp);
// Unwind the stack down to - but not including - the unwinding
@@ -3331,28 +3334,29 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
}
__ pop(r3); // Restore deoptimizer object (class Deoptimizer).
- __ LoadP(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset()));
+ __ LoadU64(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: r7 = current "FrameDescription** output_",
// r4 = one past the last FrameDescription**.
__ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
- __ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
+ __ LoadU64(r7,
+ MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
__ ShiftLeftImm(r4, r4, Operand(kSystemPointerSizeLog2));
__ add(r4, r7, r4);
__ b(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: r5 = current FrameDescription*, r6 = loop index.
- __ LoadP(r5, MemOperand(r7, 0)); // output_[ix]
- __ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
+ __ LoadU64(r5, MemOperand(r7, 0)); // output_[ix]
+ __ LoadU64(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
__ b(&inner_loop_header);
__ bind(&inner_push_loop);
__ addi(r6, r6, Operand(-sizeof(intptr_t)));
__ add(r9, r5, r6);
- __ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
+ __ LoadU64(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
__ push(r9);
__ bind(&inner_loop_header);
@@ -3364,7 +3368,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ cmp(r7, r4);
__ blt(&outer_push_loop);
- __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
+ __ LoadU64(r4, MemOperand(r3, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
@@ -3373,9 +3377,9 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
}
// Push pc, and continuation from the last output frame.
- __ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset()));
+ __ LoadU64(r9, MemOperand(r5, FrameDescription::pc_offset()));
__ push(r9);
- __ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset()));
+ __ LoadU64(r9, MemOperand(r5, FrameDescription::continuation_offset()));
__ push(r9);
// Restore the registers from the last output frame.
@@ -3388,7 +3392,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
- __ LoadP(ToRegister(i), MemOperand(scratch, offset));
+ __ LoadU64(ToRegister(i), MemOperand(scratch, offset));
}
}
}
@@ -3465,11 +3469,12 @@ void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kSlot);
Register handler_arg =
descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kHandler);
- __ LoadP(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
- __ LoadP(
+ __ LoadU64(handler_arg,
+ MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
+ __ LoadU64(
slot_arg,
MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
- __ LoadP(
+ __ LoadU64(
handler_arg,
MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
diff --git a/chromium/v8/src/builtins/promise-abstract-operations.tq b/chromium/v8/src/builtins/promise-abstract-operations.tq
index b7a1b571e64..0e435afad9b 100644
--- a/chromium/v8/src/builtins/promise-abstract-operations.tq
+++ b/chromium/v8/src/builtins/promise-abstract-operations.tq
@@ -196,6 +196,8 @@ FulfillPromise(implicit context: Context)(
// Assert: The value of promise.[[PromiseState]] is "pending".
assert(promise.Status() == PromiseState::kPending);
+ RunContextPromiseHookResolve(promise);
+
// 2. Let reactions be promise.[[PromiseFulfillReactions]].
const reactions =
UnsafeCast<(Zero | PromiseReaction)>(promise.reactions_or_result);
@@ -214,17 +216,24 @@ FulfillPromise(implicit context: Context)(
}
extern macro PromiseBuiltinsAssembler::
- IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(): bool;
+ IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(): bool;
+
+extern macro PromiseBuiltinsAssembler::
+ IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(uint32):
+ bool;
// https://tc39.es/ecma262/#sec-rejectpromise
transitioning builtin
RejectPromise(implicit context: Context)(
promise: JSPromise, reason: JSAny, debugEvent: Boolean): JSAny {
+ const promiseHookFlags = PromiseHookFlags();
+
// If promise hook is enabled or the debugger is active, let
// the runtime handle this operation, which greatly reduces
// the complexity here and also avoids a couple of back and
// forth between JavaScript and C++ land.
- if (IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
+ if (IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
+ promiseHookFlags) ||
!promise.HasHandler()) {
// 7. If promise.[[PromiseIsHandled]] is false, perform
// HostPromiseRejectionTracker(promise, "reject").
@@ -233,6 +242,8 @@ RejectPromise(implicit context: Context)(
return runtime::RejectPromise(promise, reason, debugEvent);
}
+ RunContextPromiseHookResolve(promise, promiseHookFlags);
+
// 2. Let reactions be promise.[[PromiseRejectReactions]].
const reactions =
UnsafeCast<(Zero | PromiseReaction)>(promise.reactions_or_result);
diff --git a/chromium/v8/src/builtins/promise-all.tq b/chromium/v8/src/builtins/promise-all.tq
index 41dee8b9e76..5ab64a167d3 100644
--- a/chromium/v8/src/builtins/promise-all.tq
+++ b/chromium/v8/src/builtins/promise-all.tq
@@ -231,8 +231,7 @@ Reject(Object) {
// the PromiseReaction (aka we can pass undefined to
// PerformPromiseThen), since this is only necessary for DevTools and
// PromiseHooks.
- if (promiseResolveFunction != Undefined ||
- IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
+ if (promiseResolveFunction != Undefined || NeedsAnyPromiseHooks() ||
IsPromiseSpeciesProtectorCellInvalid() || Is<Smi>(nextValue) ||
!IsPromiseThenLookupChainIntact(
nativeContext, UnsafeCast<HeapObject>(nextValue).map)) {
diff --git a/chromium/v8/src/builtins/promise-constructor.tq b/chromium/v8/src/builtins/promise-constructor.tq
index 3c5a5e560d4..b5f7292a77c 100644
--- a/chromium/v8/src/builtins/promise-constructor.tq
+++ b/chromium/v8/src/builtins/promise-constructor.tq
@@ -40,7 +40,8 @@ extern macro ConstructorBuiltinsAssembler::FastNewObject(
Context, JSFunction, JSReceiver): JSObject;
extern macro
-PromiseBuiltinsAssembler::IsPromiseHookEnabledOrHasAsyncEventDelegate(): bool;
+PromiseBuiltinsAssembler::IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(
+ uint32): bool;
// https://tc39.es/ecma262/#sec-promise-executor
transitioning javascript builtin
@@ -73,9 +74,7 @@ PromiseConstructor(
result = UnsafeCast<JSPromise>(
FastNewObject(context, promiseFun, UnsafeCast<JSReceiver>(newTarget)));
PromiseInit(result);
- if (IsPromiseHookEnabledOrHasAsyncEventDelegate()) {
- runtime::PromiseHookInit(result, Undefined);
- }
+ RunAnyPromiseHookInit(result, Undefined);
}
const isDebugActive = IsDebugActive();
diff --git a/chromium/v8/src/builtins/promise-jobs.tq b/chromium/v8/src/builtins/promise-jobs.tq
index 80e98f373b9..77d2e7cf9c4 100644
--- a/chromium/v8/src/builtins/promise-jobs.tq
+++ b/chromium/v8/src/builtins/promise-jobs.tq
@@ -7,6 +7,7 @@
// https://tc39.es/ecma262/#sec-promise-jobs
namespace promise {
extern macro IsJSPromiseMap(Map): bool;
+extern macro NeedsAnyPromiseHooks(): bool;
// https://tc39.es/ecma262/#sec-promiseresolvethenablejob
transitioning builtin
@@ -25,7 +26,7 @@ PromiseResolveThenableJob(implicit context: Context)(
const promiseThen = *NativeContextSlot(ContextSlot::PROMISE_THEN_INDEX);
const thenableMap = thenable.map;
if (TaggedEqual(then, promiseThen) && IsJSPromiseMap(thenableMap) &&
- !IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() &&
+ !NeedsAnyPromiseHooks() &&
IsPromiseSpeciesLookupChainIntact(nativeContext, thenableMap)) {
// We know that the {thenable} is a JSPromise, which doesn't require
// any special treatment and that {then} corresponds to the initial
diff --git a/chromium/v8/src/builtins/promise-misc.tq b/chromium/v8/src/builtins/promise-misc.tq
index 67e5e38687d..0eae717b3fc 100644
--- a/chromium/v8/src/builtins/promise-misc.tq
+++ b/chromium/v8/src/builtins/promise-misc.tq
@@ -8,6 +8,9 @@
namespace runtime {
extern transitioning runtime
AllowDynamicFunction(implicit context: Context)(JSAny): JSAny;
+
+extern transitioning runtime
+ReportMessageFromMicrotask(implicit context: Context)(JSAny): JSAny;
}
// Unsafe functions that should be used very carefully.
@@ -17,6 +20,12 @@ extern macro PromiseBuiltinsAssembler::ZeroOutEmbedderOffsets(JSPromise): void;
extern macro PromiseBuiltinsAssembler::AllocateJSPromise(Context): HeapObject;
}
+extern macro
+PromiseBuiltinsAssembler::IsContextPromiseHookEnabled(uint32): bool;
+
+extern macro
+PromiseBuiltinsAssembler::PromiseHookFlags(): uint32;
+
namespace promise {
extern macro IsFunctionWithPrototypeSlotMap(Map): bool;
@@ -90,6 +99,109 @@ macro NewPromiseRejectReactionJobTask(implicit context: Context)(
};
}
+@export
+transitioning macro RunContextPromiseHookInit(implicit context: Context)(
+ promise: JSPromise, parent: Object) {
+ const maybeHook = *NativeContextSlot(
+ ContextSlot::PROMISE_HOOK_INIT_FUNCTION_INDEX);
+ const hook = Cast<Callable>(maybeHook) otherwise return;
+ const parentObject = Is<JSPromise>(parent) ? Cast<JSPromise>(parent)
+ otherwise unreachable: Undefined;
+
+ try {
+ Call(context, hook, Undefined, promise, parentObject);
+ } catch (e) {
+ runtime::ReportMessageFromMicrotask(e);
+ }
+}
+
+@export
+transitioning macro RunContextPromiseHookResolve(implicit context: Context)(
+ promise: JSPromise) {
+ RunContextPromiseHook(
+ ContextSlot::PROMISE_HOOK_RESOLVE_FUNCTION_INDEX, promise,
+ PromiseHookFlags());
+}
+
+@export
+transitioning macro RunContextPromiseHookResolve(implicit context: Context)(
+ promise: JSPromise, flags: uint32) {
+ RunContextPromiseHook(
+ ContextSlot::PROMISE_HOOK_RESOLVE_FUNCTION_INDEX, promise, flags);
+}
+
+@export
+transitioning macro RunContextPromiseHookBefore(implicit context: Context)(
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined) {
+ RunContextPromiseHook(
+ ContextSlot::PROMISE_HOOK_BEFORE_FUNCTION_INDEX, promiseOrCapability,
+ PromiseHookFlags());
+}
+
+@export
+transitioning macro RunContextPromiseHookBefore(implicit context: Context)(
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32) {
+ RunContextPromiseHook(
+ ContextSlot::PROMISE_HOOK_BEFORE_FUNCTION_INDEX, promiseOrCapability,
+ flags);
+}
+
+@export
+transitioning macro RunContextPromiseHookAfter(implicit context: Context)(
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined) {
+ RunContextPromiseHook(
+ ContextSlot::PROMISE_HOOK_AFTER_FUNCTION_INDEX, promiseOrCapability,
+ PromiseHookFlags());
+}
+
+@export
+transitioning macro RunContextPromiseHookAfter(implicit context: Context)(
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32) {
+ RunContextPromiseHook(
+ ContextSlot::PROMISE_HOOK_AFTER_FUNCTION_INDEX, promiseOrCapability,
+ flags);
+}
+
+transitioning macro RunContextPromiseHook(implicit context: Context)(
+ slot: Slot<NativeContext, Undefined|Callable>,
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32) {
+ if (!IsContextPromiseHookEnabled(flags)) return;
+ const maybeHook = *NativeContextSlot(slot);
+ const hook = Cast<Callable>(maybeHook) otherwise return;
+
+ let promise: JSPromise;
+ typeswitch (promiseOrCapability) {
+ case (jspromise: JSPromise): {
+ promise = jspromise;
+ }
+ case (capability: PromiseCapability): {
+ promise = Cast<JSPromise>(capability.promise) otherwise return;
+ }
+ case (Undefined): {
+ return;
+ }
+ }
+
+ try {
+ Call(context, hook, Undefined, promise);
+ } catch (e) {
+ runtime::ReportMessageFromMicrotask(e);
+ }
+}
+
+transitioning macro RunAnyPromiseHookInit(implicit context: Context)(
+ promise: JSPromise, parent: Object) {
+ const promiseHookFlags = PromiseHookFlags();
+ // Fast return if no hooks are set.
+ if (promiseHookFlags == 0) return;
+ if (IsContextPromiseHookEnabled(promiseHookFlags)) {
+ RunContextPromiseHookInit(promise, parent);
+ }
+ if (IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(promiseHookFlags)) {
+ runtime::PromiseHookInit(promise, parent);
+ }
+}
+
// These allocate and initialize a promise with pending state and
// undefined fields.
//
@@ -100,9 +212,7 @@ transitioning macro NewJSPromise(implicit context: Context)(parent: Object):
JSPromise {
const instance = InnerNewJSPromise();
PromiseInit(instance);
- if (IsPromiseHookEnabledOrHasAsyncEventDelegate()) {
- runtime::PromiseHookInit(instance, parent);
- }
+ RunAnyPromiseHookInit(instance, parent);
return instance;
}
@@ -124,10 +234,7 @@ transitioning macro NewJSPromise(implicit context: Context)(
instance.reactions_or_result = result;
instance.SetStatus(status);
promise_internal::ZeroOutEmbedderOffsets(instance);
-
- if (IsPromiseHookEnabledOrHasAsyncEventDelegate()) {
- runtime::PromiseHookInit(instance, Undefined);
- }
+ RunAnyPromiseHookInit(instance, Undefined);
return instance;
}
diff --git a/chromium/v8/src/builtins/promise-resolve.tq b/chromium/v8/src/builtins/promise-resolve.tq
index e933dfbae0a..fa3d19411fc 100644
--- a/chromium/v8/src/builtins/promise-resolve.tq
+++ b/chromium/v8/src/builtins/promise-resolve.tq
@@ -30,7 +30,8 @@ transitioning builtin
PromiseResolve(implicit context: Context)(
constructor: JSReceiver, value: JSAny): JSAny {
const nativeContext = LoadNativeContext(context);
- const promiseFun = *NativeContextSlot(ContextSlot::PROMISE_FUNCTION_INDEX);
+ const promiseFun = *NativeContextSlot(
+ nativeContext, ContextSlot::PROMISE_FUNCTION_INDEX);
try {
// Check if {value} is a JSPromise.
const value = Cast<JSPromise>(value) otherwise NeedToAllocate;
@@ -40,7 +41,8 @@ PromiseResolve(implicit context: Context)(
// intact, as that guards the lookup path for "constructor" on
// JSPromise instances which have the (initial) Promise.prototype.
const promisePrototype =
- *NativeContextSlot(ContextSlot::PROMISE_PROTOTYPE_INDEX);
+ *NativeContextSlot(
+ nativeContext, ContextSlot::PROMISE_PROTOTYPE_INDEX);
// Check that Torque load elimination works.
static_assert(nativeContext == LoadNativeContext(context));
if (value.map.prototype != promisePrototype) {
@@ -97,7 +99,7 @@ ResolvePromise(implicit context: Context)(
// We also let the runtime handle it if promise == resolution.
// We can use pointer comparison here, since the {promise} is guaranteed
// to be a JSPromise inside this function and thus is reference comparable.
- if (IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
+ if (IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
TaggedEqual(promise, resolution))
deferred {
return runtime::ResolvePromise(promise, resolution);
@@ -139,7 +141,8 @@ ResolvePromise(implicit context: Context)(
assert(IsJSReceiverMap(resolutionMap));
assert(!IsPromiseThenProtectorCellInvalid());
if (resolutionMap ==
- *NativeContextSlot(ContextSlot::ITERATOR_RESULT_MAP_INDEX)) {
+ *NativeContextSlot(
+ nativeContext, ContextSlot::ITERATOR_RESULT_MAP_INDEX)) {
return FulfillPromise(promise, resolution);
} else {
goto Slow;
@@ -147,10 +150,11 @@ ResolvePromise(implicit context: Context)(
}
const promisePrototype =
- *NativeContextSlot(ContextSlot::PROMISE_PROTOTYPE_INDEX);
+ *NativeContextSlot(
+ nativeContext, ContextSlot::PROMISE_PROTOTYPE_INDEX);
if (resolutionMap.prototype == promisePrototype) {
// The {resolution} is a native Promise in this case.
- then = *NativeContextSlot(ContextSlot::PROMISE_THEN_INDEX);
+ then = *NativeContextSlot(nativeContext, ContextSlot::PROMISE_THEN_INDEX);
// Check that Torque load elimination works.
static_assert(nativeContext == LoadNativeContext(context));
goto Enqueue;
diff --git a/chromium/v8/src/builtins/riscv64/builtins-riscv64.cc b/chromium/v8/src/builtins/riscv64/builtins-riscv64.cc
index 04907f5268a..afd9a1fca1c 100644
--- a/chromium/v8/src/builtins/riscv64/builtins-riscv64.cc
+++ b/chromium/v8/src/builtins/riscv64/builtins-riscv64.cc
@@ -6,6 +6,7 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
@@ -100,7 +101,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// a0: number of arguments (untagged)
// a1: constructor function
// a3: new target
- __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
// Restore context from the frame.
__ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
@@ -225,7 +226,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(a6);
// Call the function.
- __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
// ----------- S t a t e -------------
// -- a0: constructor result
@@ -300,12 +301,16 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
-static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
- Register sfi_data,
- Register scratch1) {
+// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
+// the more general dispatch.
+static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1,
+ Label* is_baseline) {
Label done;
__ GetObjectType(sfi_data, scratch1, scratch1);
+ __ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE));
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
__ Ld(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
@@ -325,7 +330,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Store input value into generator object.
__ Sd(a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0, a3,
- kRAHasNotBeenSaved, kDontSaveFPRegs);
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Load suspended function and context.
__ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
@@ -388,12 +393,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
+ Label is_baseline;
__ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, a3, a0);
+ GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline);
__ GetObjectType(a3, a3, a3);
__ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
Operand(BYTECODE_ARRAY_TYPE));
+ __ bind(&is_baseline);
}
// Resume (Ignition/TurboFan) generator object.
@@ -763,8 +770,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
__ Sd(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ Move(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
- kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -964,6 +971,184 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&end);
}
+// Read off the optimization state in the feedback vector and check if there
+// is optimized code or a optimization marker that needs to be processed.
+static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+ MacroAssembler* masm, Register optimization_state, Register feedback_vector,
+ Label* has_optimized_code_or_marker) {
+ __ RecordComment("[ Check optimization state");
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ Lw(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+ __ And(
+ scratch, optimization_state,
+ Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ Branch(has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
+ __ RecordComment("]");
+}
+
+static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
+ MacroAssembler* masm, Register optimization_state,
+ Register feedback_vector) {
+ Label maybe_has_optimized_code;
+ // Check if optimized code marker is available
+ __ And(
+ t0, optimization_state,
+ Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ Branch(&maybe_has_optimized_code, eq, t0, Operand(zero_reg));
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+
+ __ bind(&maybe_has_optimized_code);
+ Register optimized_code_entry = optimization_state;
+ __ Ld(optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, t4, a5);
+}
+
+// static
+void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
+ UseScratchRegisterScope temps(masm);
+ temps.Include(kScratchReg.bit() | kScratchReg2.bit());
+ auto descriptor = Builtins::CallInterfaceDescriptorFor(
+ Builtins::kBaselineOutOfLinePrologue);
+ Register closure = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ // Load the feedback vector from the closure.
+ Register feedback_vector = temps.Acquire();
+ __ Ld(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ if (FLAG_debug_code) {
+ __ GetObjectType(feedback_vector, t0, t0);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector, t0,
+ Operand(FEEDBACK_VECTOR_TYPE));
+ }
+
+ // Check for an optimization marker.
+ Label has_optimized_code_or_marker;
+ Register optimization_state = temps.Acquire();
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
+
+ // Increment invocation count for the function.
+ {
+ Register invocation_count = t0;
+ __ Lw(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ __ Add32(invocation_count, invocation_count, Operand(1));
+ __ Sw(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ }
+
+ __ RecordComment("[ Frame Setup");
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ // Normally the first thing we'd do here is Push(lr, fp), but we already
+ // entered the frame in BaselineCompiler::Prologue, as we had to use the
+ // value lr before the call to this BaselineOutOfLinePrologue builtin.
+
+ Register callee_context = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kCalleeContext);
+ Register callee_js_function = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ __ Push(callee_context, callee_js_function);
+ DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
+ DCHECK_EQ(callee_js_function, kJSFunctionRegister);
+
+ Register argc = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
+ // We'll use the bytecode for both code age/OSR resetting, and pushing onto
+ // the frame, so load it into a register.
+ Register bytecodeArray = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
+ // are 8-bit fields next to each other, so we could just optimize by writing
+ // a 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ Sh(zero_reg,
+ FieldMemOperand(bytecodeArray, BytecodeArray::kOsrNestingLevelOffset));
+
+ __ Push(argc, bytecodeArray);
+
+ // Baseline code frames store the feedback vector where interpreter would
+ // store the bytecode offset.
+ if (FLAG_debug_code) {
+ __ GetObjectType(feedback_vector, t0, t0);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector, t0,
+ Operand(FEEDBACK_VECTOR_TYPE));
+ }
+ // Our stack is currently aligned. We have have to push something along with
+ // the feedback vector to keep it that way -- we may as well start
+ // initialising the register frame.
+ // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
+ // `undefined` in the accumulator register, to skip the load in the baseline
+ // code.
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ __ Push(feedback_vector, kInterpreterAccumulatorRegister);
+ __ RecordComment("]");
+
+ __ RecordComment("[ Stack/interrupt check");
+ Label call_stack_guard;
+ {
+ // Stack check. This folds the checks for both the interrupt stack limit
+ // check and the real stack limit into one by just checking for the
+ // interrupt limit. The interrupt limit is either equal to the real stack
+ // limit or tighter. By ensuring we have space until that limit after
+ // building the frame we can quickly precheck both at once.
+ Register frame_size = t0;
+ __ Ld(frame_size,
+ FieldMemOperand(bytecodeArray, BytecodeArray::kFrameSizeOffset));
+ Register sp_minus_frame_size = frame_size;
+ __ Sub64(sp_minus_frame_size, sp, frame_size);
+ Register interrupt_limit = t1;
+ __ LoadStackLimit(interrupt_limit,
+ MacroAssembler::StackLimitKind::kInterruptStackLimit);
+ __ Branch(&call_stack_guard, Uless, sp_minus_frame_size,
+ Operand(interrupt_limit));
+ __ RecordComment("]");
+ }
+
+ // Do "fast" return to the caller pc in lr.
+ // TODO(v8:11429): Document this frame setup better.
+ __ Ret();
+
+ __ bind(&has_optimized_code_or_marker);
+ {
+ __ RecordComment("[ Optimized marker check");
+ // Drop the frame created by the baseline call.
+ __ Pop(fp, ra);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
+ __ Trap();
+ __ RecordComment("]");
+ }
+
+ __ bind(&call_stack_guard);
+ {
+ Register new_target = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget);
+
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ RecordComment("[ Stack/interrupt call");
+ // Save incoming new target or generator
+ __ Push(zero_reg, new_target);
+ __ CallRuntime(Runtime::kStackGuard);
+ __ Pop(new_target, zero_reg);
+ __ RecordComment("]");
+ }
+ __ Ret();
+ temps.Exclude(kScratchReg.bit() | kScratchReg2.bit());
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
@@ -989,8 +1174,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
- kScratchReg);
+ Label is_baseline;
+ GetSharedFunctionInfoBytecodeOrBaseline(
+ masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline);
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
@@ -1188,6 +1374,44 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t4, a5);
+ __ bind(&is_baseline);
+ {
+ // Load the feedback vector from the closure.
+ __ Ld(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ Ld(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Lh(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
+
+ // Read off the optimization state in the feedback vector.
+ // TODO(v8:11429): Is this worth doing here? Baseline code will check it
+ // anyway...
+ __ Ld(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+
+ // Check if there is optimized code or a optimization marker that needes to
+ // be processed.
+ __ And(
+ t0, optimization_state,
+ Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ Branch(&has_optimized_code_or_marker, ne, t0, Operand(zero_reg));
+
+ // Load the baseline code into the closure.
+ __ Ld(a2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BaselineData::kBaselineCodeOffset));
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t0, t1);
+ __ JumpCodeObject(a2);
+
+ __ bind(&install_baseline_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
+ }
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
@@ -1407,7 +1631,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Jump(kJavaScriptCallCodeStartRegister);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Advance the current bytecode offset stored within the given interpreter
// stack frame. This simulates what all bytecode handlers do upon completion
// of the underlying operation.
@@ -1454,7 +1678,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
@@ -1503,12 +1727,12 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
// Load builtin index (stored as a Smi) and use it to get the builtin start
// address from the builtins table.
- __ Pop(t0);
+ __ Pop(t6);
__ Add64(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(ra);
- __ LoadEntryFromBuiltinIndex(t0);
- __ Jump(t0);
+ __ LoadEntryFromBuiltinIndex(t6);
+ __ Jump(t6);
}
} // namespace
@@ -1542,7 +1766,20 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
+ Register optimized_code_entry = kJavaScriptCallCodeStartRegister;
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, t4, t0);
+}
+namespace {
+
+void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
+ Operand offset = Operand(int64_t(0))) {
+ __ Add64(ra, entry_address, offset);
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
@@ -1550,11 +1787,11 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// If the code object is null, just return to the caller.
__ Ret(eq, a0, Operand(Smi::zero()));
-
- // Drop the handler frame that is be sitting on top of the actual
- // JavaScript frame. This is the case then OSR is triggered from bytecode.
- __ LeaveFrame(StackFrame::STUB);
-
+ if (is_interpreter) {
+ // Drop the handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ __ LeaveFrame(StackFrame::STUB);
+ }
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ Ld(a1, MemOperand(a0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
@@ -1568,9 +1805,18 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ Add64(a0, a0, a1);
- __ Add64(ra, a0, Code::kHeaderSize - kHeapObjectTag);
- // And "return" to the OSR entry point of the function.
- __ Ret();
+ Generate_OSREntry(masm, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
+}
+} // namespace
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ return OnStackReplacement(masm, true);
+}
+
+void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ __ Ld(kContextRegister,
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+ return OnStackReplacement(masm, false);
}
// static
@@ -1808,7 +2054,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- a4 : len (number of elements to push from args)
// -- a3 : new.target (for [[Construct]])
// -----------------------------------
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
Label ok, fail;
__ AssertNotSmi(a2);
@@ -2070,7 +2316,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Lhu(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(a1, no_reg, a2, a0, JUMP_FUNCTION);
+ __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
@@ -2438,10 +2684,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
//
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// a2: pointer to the first argument
- if (argv_mode == kArgvInRegister) {
+ if (argv_mode == ArgvMode::kRegister) {
// Move argv into the correct register.
__ Move(s1, a2);
} else {
@@ -2453,7 +2699,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
- save_doubles == kSaveFPRegs, 0,
+ save_doubles == SaveFPRegsMode::kSave, 0,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// s3: number of arguments including receiver (C callee-saved)
@@ -2502,12 +2748,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// a0:a1: result
// sp: stack pointer
// fp: frame pointer
- Register argc = argv_mode == kArgvInRegister
+ Register argc = argv_mode == ArgvMode::kRegister
// We don't want to pop arguments so set argc to no_reg.
? no_reg
// s3: still holds argc (callee-saved).
: s3;
- __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN);
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
@@ -2689,6 +2935,10 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ Trap();
}
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
namespace {
int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
@@ -2762,7 +3012,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ Sd(s3, MemOperand(s5, kNextOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ Lw(a1, MemOperand(s5, kLevelOffset));
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
Operand(s2));
@@ -3228,9 +3478,9 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
}
}
- __ pop(t3); // Get continuation, leave pc on stack.
+ __ pop(t6); // Get continuation, leave pc on stack.
__ pop(ra);
- __ Jump(t3);
+ __ Jump(t6);
__ stop();
}
@@ -3252,6 +3502,146 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
+namespace {
+
+// Converts an interpreter frame into a baseline frame and continues execution
+// in baseline code (baseline code has to exist on the shared function info),
+// either at the start or the end of the current bytecode.
+void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
+ bool is_osr = false) {
+ __ Push(zero_reg, kInterpreterAccumulatorRegister);
+ Label start;
+ __ bind(&start);
+
+ // Get function from the frame.
+ Register closure = a1;
+ __ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+
+ // Replace BytecodeOffset with the feedback vector.
+ Register feedback_vector = a2;
+ __ Ld(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ GetObjectType(feedback_vector, t0, t0);
+ __ Branch(&install_baseline_code, eq, t0, Operand(FEEDBACK_VECTOR_TYPE));
+ // Save BytecodeOffset from the stack frame.
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ // Replace BytecodeOffset with the feedback vector.
+ __ Sd(feedback_vector,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ feedback_vector = no_reg;
+
+ // Get the Code object from the shared function info.
+ UseScratchRegisterScope temps(masm);
+ Register code_obj = temps.Acquire();
+ __ Ld(code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+ __ Ld(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
+ // Compute baseline pc for bytecode offset.
+ __ Push(zero_reg, kInterpreterAccumulatorRegister);
+ ExternalReference get_baseline_pc_extref;
+ if (next_bytecode || is_osr) {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_next_executed_bytecode();
+ } else {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_bytecode_offset();
+ }
+
+ Register get_baseline_pc = a3;
+ __ li(get_baseline_pc, get_baseline_pc_extref);
+
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset.
+ // TODO(pthier): Investigate if it is feasible to handle this special case
+ // in TurboFan instead of here.
+ Label valid_bytecode_offset, function_entry_bytecode;
+ if (!is_osr) {
+ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ }
+
+ __ Sub64(kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeOffsetRegister,
+ (BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&valid_bytecode_offset);
+ // Get bytecode array from the stack frame.
+ __ Ld(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ {
+ Register arg_reg_1 = a0;
+ Register arg_reg_2 = a1;
+ Register arg_reg_3 = a2;
+ __ Move(arg_reg_1, code_obj);
+ __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
+ __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallCFunction(get_baseline_pc, 3, 0);
+ }
+ __ Add64(code_obj, code_obj, kReturnRegister0);
+ __ Pop(kInterpreterAccumulatorRegister, zero_reg);
+
+ if (is_osr) {
+ // Reset the OSR loop nesting depth to disarm back edges.
+ // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
+ // Sparkplug here.
+ __ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrNestingLevelOffset));
+ Generate_OSREntry(masm, code_obj,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ Add64(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(code_obj);
+ }
+ __ Trap(); // Unreachable.
+
+ if (!is_osr) {
+ __ bind(&function_entry_bytecode);
+ // If the bytecode offset is kFunctionEntryOffset, get the start address of
+ // the first bytecode.
+ __ li(kInterpreterBytecodeOffsetRegister, Operand(int64_t(0)));
+ if (next_bytecode) {
+ __ li(get_baseline_pc,
+ ExternalReference::baseline_pc_for_bytecode_offset());
+ }
+ __ Branch(&valid_bytecode_offset);
+ }
+
+ __ bind(&install_baseline_code);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(closure);
+ __ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ }
+ // Retry from the start after installing baseline code.
+ __ Branch(&start);
+}
+
+} // namespace
+
+void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, false);
+}
+
+void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, true);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
+ MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, false, true);
+}
+
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
diff --git a/chromium/v8/src/builtins/s390/builtins-s390.cc b/chromium/v8/src/builtins/s390/builtins-s390.cc
index 7711af6e901..0272621ac01 100644
--- a/chromium/v8/src/builtins/s390/builtins-s390.cc
+++ b/chromium/v8/src/builtins/s390/builtins-s390.cc
@@ -6,6 +6,7 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
@@ -110,7 +111,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// r3: constructor function
// r5: new target
- __ InvokeFunctionWithNewTarget(r3, r5, r2, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(r3, r5, r2, InvokeType::kCall);
// Restore context from the frame.
__ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
@@ -238,7 +239,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(r8);
// Call the function.
- __ InvokeFunctionWithNewTarget(r3, r5, r2, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(r3, r5, r2, InvokeType::kCall);
// ----------- S t a t e -------------
// -- r0: constructor result
@@ -339,7 +340,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ StoreTaggedField(
r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset), r0);
__ RecordWriteField(r3, JSGeneratorObject::kInputOrDebugPosOffset, r2, r5,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
+ kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Load suspended function and context.
__ LoadTaggedPointerField(
@@ -395,18 +396,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
- __ mov(r8, r5);
-
__ bind(&loop);
- __ SubS64(r8, r8, Operand(1));
+ __ SubS64(r5, r5, Operand(1));
__ blt(&done_loop);
- __ ShiftLeftU64(r1, r8, Operand(kTaggedSizeLog2));
+ __ ShiftLeftU64(r1, r5, Operand(kTaggedSizeLog2));
__ la(scratch, MemOperand(r4, r1));
__ LoadAnyTaggedField(scratch,
FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ Push(scratch);
__ b(&loop);
-
__ bind(&done_loop);
// Push receiver.
@@ -857,8 +855,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
__ mov(scratch1,
optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -1527,7 +1525,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Jump(kJavaScriptCallCodeStartRegister);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
__ LoadU64(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
@@ -1573,7 +1571,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
@@ -1890,6 +1888,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// static
+// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -1902,7 +1901,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Register scratch = ip;
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow r4 to be a FixedArray, or a FixedDoubleArray if r6 == 0.
Label ok, fail;
__ AssertNotSmi(r4);
@@ -2177,7 +2176,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadU16(
r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(r3, no_reg, r4, r2, JUMP_FUNCTION);
+ __ InvokeFunctionCode(r3, no_reg, r4, r2, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
@@ -2549,6 +2548,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// TODO(v8:10701): Implement for this platform.
__ Trap();
}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
@@ -2561,12 +2565,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
//
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// r4: pointer to the first argument
__ mov(r7, r3);
- if (argv_mode == kArgvInRegister) {
+ if (argv_mode == ArgvMode::kRegister) {
// Move argv into the correct register.
__ mov(r3, r4);
} else {
@@ -2594,7 +2598,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
#endif
__ EnterExitFrame(
- save_doubles, arg_stack_space,
+ save_doubles == SaveFPRegsMode::kSave, arg_stack_space,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// Store a copy of argc, argv in callee-saved registers for later.
@@ -2657,12 +2661,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// r2:r3: result
// sp: stack pointer
// fp: frame pointer
- Register argc = argv_mode == kArgvInRegister
+ Register argc = argv_mode == ArgvMode::kRegister
// We don't want to pop arguments so set argc to no_reg.
? no_reg
// r6: still holds argc (callee-saved).
: r6;
- __ LeaveExitFrame(save_doubles, argc);
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc);
__ b(r14);
// Handling of exception.
@@ -2916,7 +2920,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ StoreU64(r6, MemOperand(r9, kNextOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ LoadU32(r3, MemOperand(r9, kLevelOffset));
__ CmpS64(r3, r8);
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
@@ -3464,11 +3468,12 @@ void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kSlot);
Register handler_arg =
descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kHandler);
- __ LoadP(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
- __ LoadP(
+ __ LoadU64(handler_arg,
+ MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
+ __ LoadU64(
slot_arg,
MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
- __ LoadP(
+ __ LoadU64(
handler_arg,
MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
diff --git a/chromium/v8/src/builtins/setup-builtins-internal.cc b/chromium/v8/src/builtins/setup-builtins-internal.cc
index 348866c9bde..fbcfab56f43 100644
--- a/chromium/v8/src/builtins/setup-builtins-internal.cc
+++ b/chromium/v8/src/builtins/setup-builtins-internal.cc
@@ -11,7 +11,6 @@
#include "src/compiler/code-assembler.h"
#include "src/execution/isolate.h"
#include "src/handles/handles-inl.h"
-#include "src/heap/heap-inl.h" // For Heap::code_range.
#include "src/init/setup-isolate.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-generator.h"
@@ -42,10 +41,10 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
return options;
}
- const base::AddressRegion& code_range = isolate->heap()->code_range();
+ const base::AddressRegion& code_region = isolate->heap()->code_region();
bool pc_relative_calls_fit_in_code_range =
- !code_range.is_empty() &&
- std::ceil(static_cast<float>(code_range.size() / MB)) <=
+ !code_region.is_empty() &&
+ std::ceil(static_cast<float>(code_region.size() / MB)) <=
kMaxPCRelativeCodeRangeInMB;
options.isolate_independent_code = true;
@@ -219,7 +218,7 @@ void SetupIsolateDelegate::PopulateWithPlaceholders(Isolate* isolate) {
// static
void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
- // Replace references from all code objects to placeholders.
+ // Replace references from all builtin code objects to placeholders.
Builtins* builtins = isolate->builtins();
DisallowGarbageCollection no_gc;
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
@@ -228,11 +227,8 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
- HeapObjectIterator iterator(isolate->heap());
- for (HeapObject obj = iterator.Next(); !obj.is_null();
- obj = iterator.Next()) {
- if (!obj.IsCode()) continue;
- Code code = Code::cast(obj);
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ Code code = builtins->builtin(i);
bool flush_icache = false;
for (RelocIterator it(code, kRelocMask); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
diff --git a/chromium/v8/src/builtins/typed-array-createtypedarray.tq b/chromium/v8/src/builtins/typed-array-createtypedarray.tq
index 6e416ddd98f..6333ebf97fd 100644
--- a/chromium/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/chromium/v8/src/builtins/typed-array-createtypedarray.tq
@@ -19,11 +19,17 @@ extern macro TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields(
extern runtime ThrowInvalidTypedArrayAlignment(implicit context: Context)(
Map, String): never;
+extern runtime GrowableSharedArrayBufferByteLength(implicit context: Context)(
+ Object): JSAny;
+
transitioning macro AllocateTypedArray(implicit context: Context)(
isOnHeap: constexpr bool, map: Map, buffer: JSArrayBuffer,
- byteOffset: uintptr, byteLength: uintptr, length: uintptr): JSTypedArray {
+ byteOffset: uintptr, byteLength: uintptr, length: uintptr,
+ isLengthTracking: bool): JSTypedArray {
let elements: ByteArray;
if constexpr (isOnHeap) {
+ assert(!IsResizableArrayBuffer(buffer));
+ assert(!isLengthTracking);
elements = AllocateByteArray(byteLength);
} else {
elements = kEmptyByteArray;
@@ -53,6 +59,9 @@ transitioning macro AllocateTypedArray(implicit context: Context)(
typedArray.byte_offset = byteOffset;
typedArray.byte_length = byteLength;
typedArray.length = length;
+ typedArray.bit_field.is_length_tracking = isLengthTracking;
+ typedArray.bit_field.is_backed_by_rab =
+ IsResizableArrayBuffer(buffer) && !IsSharedArrayBuffer(buffer);
typed_array::AllocateJSTypedArrayExternalPointerEntry(typedArray);
if constexpr (isOnHeap) {
typed_array::SetJSTypedArrayOnHeapDataPtr(typedArray, elements, byteOffset);
@@ -88,8 +97,10 @@ transitioning macro TypedArrayInitialize(implicit context: Context)(
const buffer = AllocateEmptyOnHeapBuffer(byteLength);
const isOnHeap: constexpr bool = true;
+ const isLengthTracking: constexpr bool = false;
const typedArray = AllocateTypedArray(
- isOnHeap, map, buffer, byteOffset, byteLength, length);
+ isOnHeap, map, buffer, byteOffset, byteLength, length,
+ isLengthTracking);
if constexpr (initialize) {
const backingStore = typedArray.data_ptr;
@@ -107,8 +118,10 @@ transitioning macro TypedArrayInitialize(implicit context: Context)(
} label AttachOffHeapBuffer(bufferObj: Object) {
const buffer = Cast<JSArrayBuffer>(bufferObj) otherwise unreachable;
const isOnHeap: constexpr bool = false;
+ const isLengthTracking: constexpr bool = false;
return AllocateTypedArray(
- isOnHeap, map, buffer, byteOffset, byteLength, length);
+ isOnHeap, map, buffer, byteOffset, byteLength, length,
+ isLengthTracking);
}
}
@@ -204,8 +217,26 @@ transitioning macro ConstructByTypedArray(implicit context: Context)(
// 22.2.4.5 TypedArray ( buffer, byteOffset, length )
// ES #sec-typedarray-buffer-byteoffset-length
transitioning macro ConstructByArrayBuffer(implicit context: Context)(
- map: Map, buffer: JSArrayBuffer, byteOffset: JSAny, length: JSAny,
- elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray {
+ target: JSFunction, newTarget: JSReceiver, buffer: JSArrayBuffer,
+ byteOffset: JSAny, length: JSAny): JSTypedArray {
+ let map: Map;
+ const isLengthTracking: bool =
+ IsResizableArrayBuffer(buffer) && (length == Undefined);
+ // Pick the RAB / GSAB map (containing the corresponding RAB / GSAB
+ // ElementsKind). GSAB-backed non-length-tracking TypedArrays behave just like
+ // normal TypedArrays, so exclude them.
+ const rabGsab: bool = IsResizableArrayBuffer(buffer) &&
+ (!IsSharedArrayBuffer(buffer) || isLengthTracking);
+ if (rabGsab) {
+ map = GetDerivedRabGsabMap(target, newTarget);
+ } else {
+ map = GetDerivedMap(target, newTarget);
+ }
+
+ // 5. Let elementSize be the Number value of the Element Size value in Table
+ // 56 for constructorName.
+ const elementsInfo = GetTypedArrayElementsInfo(map);
+
try {
// 6. Let offset be ? ToIndex(byteOffset).
const offset: uintptr = ToIndex(byteOffset) otherwise IfInvalidOffset;
@@ -226,7 +257,13 @@ transitioning macro ConstructByArrayBuffer(implicit context: Context)(
}
// 10. Let bufferByteLength be buffer.[[ArrayBufferByteLength]].
- const bufferByteLength: uintptr = buffer.byte_length;
+ let bufferByteLength: uintptr;
+ if (IsResizableArrayBuffer(buffer) && IsSharedArrayBuffer(buffer)) {
+ bufferByteLength = ToIndex(GrowableSharedArrayBufferByteLength(buffer))
+ otherwise unreachable;
+ } else {
+ bufferByteLength = buffer.byte_length;
+ }
// 11. If length is either not present or undefined, then
if (length == Undefined) {
@@ -261,7 +298,8 @@ transitioning macro ConstructByArrayBuffer(implicit context: Context)(
const isOnHeap: constexpr bool = false;
return AllocateTypedArray(
- isOnHeap, map, buffer, offset, newByteLength, newLength);
+ isOnHeap, map, buffer, offset, newByteLength, newLength,
+ isLengthTracking);
} label IfInvalidAlignment(problemString: String) deferred {
ThrowInvalidTypedArrayAlignment(map, problemString);
} label IfInvalidLength deferred {
@@ -286,6 +324,8 @@ transitioning macro TypedArrayCreateByLength(implicit context: Context)(
// ValidateTypedArray currently returns the array, not the ViewBuffer.
const newTypedArray: JSTypedArray =
ValidateTypedArray(context, newTypedArrayObj, methodName);
+ newTypedArray.bit_field.is_length_tracking = false;
+ newTypedArray.bit_field.is_backed_by_rab = false;
if (IsDetachedBuffer(newTypedArray.buffer)) deferred {
ThrowTypeError(MessageTemplate::kDetachedOperation, methodName);
@@ -336,21 +376,16 @@ transitioning builtin CreateTypedArray(
assert(IsConstructor(target));
// 4. Let O be ? AllocateTypedArray(constructorName, NewTarget,
// "%TypedArrayPrototype%").
- const map = GetDerivedMap(target, newTarget);
-
- // 5. Let elementSize be the Number value of the Element Size value in Table
- // 56 for constructorName.
- const elementsInfo = GetTypedArrayElementsInfo(map);
-
try {
typeswitch (arg1) {
case (length: Smi): {
goto IfConstructByLength(length);
}
case (buffer: JSArrayBuffer): {
- return ConstructByArrayBuffer(map, buffer, arg2, arg3, elementsInfo);
+ return ConstructByArrayBuffer(target, newTarget, buffer, arg2, arg3);
}
case (typedArray: JSTypedArray): {
+ // TODO(v8:11111): Support RAB / GSAB.
ConstructByTypedArray(typedArray) otherwise IfConstructByArrayLike;
}
case (obj: JSReceiver): {
@@ -363,9 +398,18 @@ transitioning builtin CreateTypedArray(
}
}
} label IfConstructByLength(length: JSAny) {
+ const map = GetDerivedMap(target, newTarget);
+ // 5. Let elementSize be the Number value of the Element Size value in Table
+ // 56 for constructorName.
+ const elementsInfo = GetTypedArrayElementsInfo(map);
+
return ConstructByLength(map, length, elementsInfo);
} label IfConstructByArrayLike(
arrayLike: JSReceiver, length: uintptr, bufferConstructor: JSReceiver) {
+ const map = GetDerivedMap(target, newTarget);
+ // 5. Let elementSize be the Number value of the Element Size value in Table
+ // 56 for constructorName.
+ const elementsInfo = GetTypedArrayElementsInfo(map);
return ConstructByArrayLike(
map, arrayLike, length, elementsInfo, bufferConstructor);
}
diff --git a/chromium/v8/src/builtins/wasm.tq b/chromium/v8/src/builtins/wasm.tq
index 05a15162040..f859d1e0bf2 100644
--- a/chromium/v8/src/builtins/wasm.tq
+++ b/chromium/v8/src/builtins/wasm.tq
@@ -287,8 +287,9 @@ builtin WasmAllocateRtt(typeIndex: intptr, parent: Map): Map {
}
builtin WasmAllocateStructWithRtt(rtt: Map): HeapObject {
- const instanceSize: intptr =
- unsafe::TimesTaggedSize(Convert<intptr>(rtt.instance_size_in_words));
+ const typeInfo: WasmTypeInfo = %RawDownCast<WasmTypeInfo>(
+ rtt.constructor_or_back_pointer_or_native_context);
+ const instanceSize: intptr = SmiUntag(typeInfo.instance_size);
const result: HeapObject = unsafe::Allocate(
instanceSize, AllocationFlag::kAllowLargeObjectAllocation);
*UnsafeConstCast(&result.map) = rtt;
diff --git a/chromium/v8/src/builtins/x64/builtins-x64.cc b/chromium/v8/src/builtins/x64/builtins-x64.cc
index 5b5e964ef95..7fc7c5dec78 100644
--- a/chromium/v8/src/builtins/x64/builtins-x64.cc
+++ b/chromium/v8/src/builtins/x64/builtins-x64.cc
@@ -8,18 +8,19 @@
#include "src/base/bits-iterator.h"
#include "src/base/iterator.h"
#include "src/codegen/code-factory.h"
-#include "src/common/globals.h"
-#include "src/objects/code.h"
+#include "src/codegen/interface-descriptors-inl.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/x64/assembler-x64.h"
+#include "src/common/globals.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
#include "src/heap/heap-inl.h"
#include "src/logging/counters.h"
#include "src/objects/cell.h"
+#include "src/objects/code.h"
#include "src/objects/debug-objects.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
@@ -118,7 +119,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// rax: number of arguments (untagged)
// rdi: constructor function
// rdx: new target
- __ InvokeFunction(rdi, rdx, rax, CALL_FUNCTION);
+ __ InvokeFunction(rdi, rdx, rax, InvokeType::kCall);
// Restore smi-tagged arguments count from the frame.
__ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
@@ -242,7 +243,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(r8);
// Call the function.
- __ InvokeFunction(rdi, rdx, rax, CALL_FUNCTION);
+ __ InvokeFunction(rdi, rdx, rax, InvokeType::kCall);
// ----------- S t a t e -------------
// -- rax constructor result
@@ -383,8 +384,8 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Initialize the pointer cage base register.
- // TODO(syg): Actually make a cage.
- __ movq(kPointerCageBaseRegister, arg_reg_1);
+ __ LoadRootRelative(kPtrComprCageBaseRegister,
+ IsolateData::cage_base_offset());
#endif
}
@@ -560,7 +561,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// rdx : new_target
// Clear the context before we push it when entering the internal frame.
- __ Set(rsi, 0);
+ __ Move(rsi, 0);
// Enter an internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -687,9 +688,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ StoreTaggedField(
FieldOperand(rdx, JSGeneratorObject::kInputOrDebugPosOffset), rax);
__ RecordWriteField(rdx, JSGeneratorObject::kInputOrDebugPosOffset, rax, rcx,
- kDontSaveFPRegs);
+ SaveFPRegsMode::kIgnore);
- Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
+ Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r8 : no_reg;
// Load suspended function and context.
__ LoadTaggedPointerField(
@@ -740,20 +741,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
{
- {
- Label done_loop, loop;
- __ movq(r9, rcx);
-
- __ bind(&loop);
- __ decq(r9);
- __ j(less, &done_loop, Label::kNear);
- __ PushTaggedAnyField(
- FieldOperand(rbx, r9, times_tagged_size, FixedArray::kHeaderSize),
- decompr_scratch1);
- __ jmp(&loop);
-
- __ bind(&done_loop);
- }
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ decq(rcx);
+ __ j(less, &done_loop, Label::kNear);
+ __ PushTaggedAnyField(
+ FieldOperand(rbx, rcx, times_tagged_size, FixedArray::kHeaderSize),
+ decompr_scratch1);
+ __ jmp(&loop);
+ __ bind(&done_loop);
// Push the receiver.
__ PushTaggedPointerField(
@@ -841,7 +837,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
optimized_code);
__ movq(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ SaveFPRegsMode::kIgnore, RememberedSetAction::kOmit,
+ SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -1084,7 +1081,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
__ LoadAnyTaggedField(
optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
- TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15, jump_mode);
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8, r15, jump_mode);
}
// Generate code for entering a JS function with the interpreter.
@@ -1236,10 +1233,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
- __ movzxbq(r11, Operand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, times_1, 0));
+ __ movzxbq(kScratchRegister,
+ Operand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movq(kJavaScriptCallCodeStartRegister,
- Operand(kInterpreterDispatchTableRegister, r11,
+ Operand(kInterpreterDispatchTableRegister, kScratchRegister,
times_system_pointer_size, 0));
__ call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
@@ -1259,7 +1257,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, times_1, 0));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, rbx, rcx,
- r11, &do_return);
+ r8, &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1558,15 +1556,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
// Dispatch to the target bytecode.
- __ movzxbq(r11, Operand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, times_1, 0));
+ __ movzxbq(kScratchRegister,
+ Operand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movq(kJavaScriptCallCodeStartRegister,
- Operand(kInterpreterDispatchTableRegister, r11,
+ Operand(kInterpreterDispatchTableRegister, kScratchRegister,
times_system_pointer_size, 0));
__ jmp(kJavaScriptCallCodeStartRegister);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
__ movq(kInterpreterBytecodeArrayRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
@@ -1587,7 +1586,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, rbx, rcx,
- r11, &if_return);
+ r8, &if_return);
__ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
@@ -1611,29 +1610,38 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
+ Register feedback_vector = r8;
+ Register optimization_state = rcx;
+ Register return_address = r15;
+
+#ifdef DEBUG
+ for (auto reg : BaselineOutOfLinePrologueDescriptor::registers()) {
+ DCHECK(
+ !AreAliased(feedback_vector, optimization_state, return_address, reg));
+ }
+#endif
+
auto descriptor = Builtins::CallInterfaceDescriptorFor(
Builtins::kBaselineOutOfLinePrologue);
Register closure = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure.
- Register feedback_vector = r11;
__ LoadTaggedPointerField(
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_vector, Cell::kValueOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kExpectedFeedbackVector);
}
// Check for an optimization marker.
- Register optimization_state = rcx;
Label has_optimized_code_or_marker;
LoadOptimizationStateAndJumpIfNeedsProcessing(
masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
@@ -1642,8 +1650,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ incl(
FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
- Register return_address = r15;
-
__ RecordComment("[ Frame Setup");
// Save the return address, so that we can push it to the end of the newly
// set-up frame once we're done setting it up.
@@ -1723,8 +1729,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// return since we may do a runtime call along the way that requires the
// stack to only contain valid frames.
__ Drop(1);
- MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, rcx, feedback_vector,
- JumpMode::kPushAndReturn);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
+ masm, optimization_state, feedback_vector, JumpMode::kPushAndReturn);
__ Trap();
__ RecordComment("]");
}
@@ -1840,7 +1846,7 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
Register optimized_code_entry = kJavaScriptCallCodeStartRegister;
- TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15,
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8, r15,
JumpMode::kJump);
}
@@ -1905,7 +1911,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// Function.prototype.apply() yet, we use a normal Call builtin here.
__ bind(&no_arguments);
{
- __ Set(rax, 0);
+ __ Move(rax, 0);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
@@ -2062,6 +2068,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// static
+// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -2072,14 +2079,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- rdx : new.target (for [[Construct]])
// -- rsp[0] : return address
// -----------------------------------
- Register scratch = r11;
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow rbx to be a FixedArray, or a FixedDoubleArray if rcx == 0.
Label ok, fail;
__ AssertNotSmi(rbx);
Register map = r9;
- __ LoadTaggedPointerField(map, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ LoadMap(map, rbx);
__ CmpInstanceType(map, FIXED_ARRAY_TYPE);
__ j(equal, &ok);
__ CmpInstanceType(map, FIXED_DOUBLE_ARRAY_TYPE);
@@ -2101,13 +2107,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// including the receiver and the return address.
{
Label copy, check;
- Register src = r8, dest = rsp, num = r9, current = r11;
+ Register src = r8, dest = rsp, num = r9, current = r12;
__ movq(src, rsp);
__ leaq(kScratchRegister, Operand(rcx, times_system_pointer_size, 0));
__ AllocateStackSpace(kScratchRegister);
__ leaq(num, Operand(rax, 2)); // Number of words to copy.
// +2 for receiver and return address.
- __ Set(current, 0);
+ __ Move(current, 0);
__ jmp(&check);
__ bind(&copy);
__ movq(kScratchRegister,
@@ -2123,9 +2129,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Copy the additional arguments onto the stack.
{
- Register value = scratch;
+ Register value = r12;
Register src = rbx, dest = r8, num = rcx, current = r9;
- __ Set(current, 0);
+ __ Move(current, 0);
Label done, push, loop;
__ bind(&loop);
__ cmpl(current, num);
@@ -2166,7 +2172,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear);
- __ LoadTaggedPointerField(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ LoadMap(rbx, rdx);
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsConstructorBit::kMask));
__ j(not_zero, &new_target_constructor, Label::kNear);
@@ -2203,13 +2209,13 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// including the receiver and the return address.
{
Label copy, check;
- Register src = r9, dest = rsp, num = r12, current = r11;
+ Register src = r9, dest = rsp, num = r12, current = r15;
__ movq(src, rsp);
__ leaq(kScratchRegister, Operand(r8, times_system_pointer_size, 0));
__ AllocateStackSpace(kScratchRegister);
__ leaq(num, Operand(rax, 2)); // Number of words to copy.
// +2 for receiver and return address.
- __ Set(current, 0);
+ __ Move(current, 0);
__ jmp(&check);
__ bind(&copy);
__ movq(kScratchRegister,
@@ -2359,7 +2365,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movzxwq(
rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(rdi, no_reg, rbx, rax, JUMP_FUNCTION);
+ __ InvokeFunctionCode(rdi, no_reg, rbx, rax, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
@@ -2592,7 +2598,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(rdi, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadTaggedPointerField(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ LoadMap(rcx, rdi);
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsConstructorBit::kMask));
__ j(zero, &non_constructor);
@@ -2682,15 +2688,17 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ __ movq(kContextRegister,
+ MemOperand(rbp, BaselineFrameConstants::kContextOffset));
return OnStackReplacement(masm, false);
}
#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was pushed to the stack by the caller as int32.
- __ Pop(r11);
+ __ Pop(r15);
// Convert to Smi for the runtime call.
- __ SmiTag(r11);
+ __ SmiTag(r15);
{
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
@@ -2717,13 +2725,13 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Push the Wasm instance as an explicit argument to WasmCompileLazy.
__ Push(kWasmInstanceRegister);
// Push the function index as second argument.
- __ Push(r11);
+ __ Push(r15);
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(kContextRegister, Smi::zero());
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
// The entrypoint address is the return value.
- __ movq(r11, kReturnRegister0);
+ __ movq(r15, kReturnRegister0);
// Restore registers.
for (DoubleRegister reg : base::Reversed(wasm::kFpParamRegisters)) {
@@ -2737,7 +2745,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
}
}
// Finally, jump to the entrypoint.
- __ jmp(r11);
+ __ jmp(r15);
}
void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
@@ -2915,7 +2923,7 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ LoadExternalPointerField(
signature,
FieldOperand(foreign_signature, Foreign::kForeignAddressOffset),
- kForeignForeignAddressTag);
+ kForeignForeignAddressTag, kScratchRegister);
foreign_signature = no_reg;
Register return_count = r8;
__ movq(return_count,
@@ -3243,28 +3251,17 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
thread_in_wasm_flag_addr,
MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset()));
__ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(1));
-
- Register jump_table_start = thread_in_wasm_flag_addr;
- __ movq(jump_table_start,
- MemOperand(wasm_instance,
- wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kJumpTableStartOffset)));
thread_in_wasm_flag_addr = no_reg;
- Register jump_table_offset = function_data;
- __ LoadAnyTaggedField(
- jump_table_offset,
- MemOperand(
- function_data,
- WasmExportedFunctionData::kJumpTableOffsetOffset - kHeapObjectTag));
-
- // Change from smi to integer.
- __ SmiUntag(jump_table_offset);
-
- Register function_entry = jump_table_offset;
- __ addq(function_entry, jump_table_start);
- jump_table_offset = no_reg;
- jump_table_start = no_reg;
+ Register function_entry = function_data;
+ Register scratch = r12;
+ __ LoadExternalPointerField(
+ function_entry,
+ FieldOperand(function_data,
+ WasmExportedFunctionData::kForeignAddressOffset),
+ kForeignForeignAddressTag, scratch);
+ function_data = no_reg;
+ scratch = no_reg;
// We set the indicating value for the GC to the proper one for Wasm call.
constexpr int kWasmCallGCScanSlotCount = 0;
@@ -3349,6 +3346,9 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// Param conversion builtins.
// -------------------------------------------
__ bind(&convert_param);
+ // Restore function_data register (which was clobbered by the code above,
+ // but was valid when jumping here earlier).
+ function_data = rdi;
// The order of pushes is important. We want the heap objects, that should be
// scanned by GC, to be on the top of the stack.
// We have to set the indicating value for the GC to the number of values on
@@ -3527,6 +3527,13 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ jmp(&compile_wrapper_done);
}
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ MemOperand OSRTargetSlot(rbp, -wasm::kOSRTargetOffset);
+ __ movq(kScratchRegister, OSRTargetSlot);
+ __ movq(OSRTargetSlot, Immediate(0));
+ __ jmp(kScratchRegister);
+}
+
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
@@ -3538,7 +3545,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// rsp: stack pointer (restored after C call)
// rsi: current context (restored)
//
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// r15: pointer to the first argument
#ifdef V8_TARGET_OS_WIN
@@ -3569,15 +3576,15 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
int arg_stack_space =
kArgExtraStackSpace +
(result_size <= kMaxRegisterResultSize ? 0 : result_size);
- if (argv_mode == kArgvInRegister) {
- DCHECK(save_doubles == kDontSaveFPRegs);
+ if (argv_mode == ArgvMode::kRegister) {
+ DCHECK(save_doubles == SaveFPRegsMode::kIgnore);
DCHECK(!builtin_exit_frame);
__ EnterApiExitFrame(arg_stack_space);
// Move argc into r12 (argv is already in r15).
__ movq(r12, rax);
} else {
__ EnterExitFrame(
- arg_stack_space, save_doubles == kSaveFPRegs,
+ arg_stack_space, save_doubles == SaveFPRegsMode::kSave,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
}
@@ -3641,7 +3648,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
}
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles == kSaveFPRegs, argv_mode == kArgvOnStack);
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave,
+ argv_mode == ArgvMode::kStack);
__ ret(0);
// Handling of exception.
@@ -3866,9 +3874,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
Register map = rcx;
__ JumpIfSmi(return_value, &ok, Label::kNear);
- __ LoadTaggedPointerField(map,
- FieldOperand(return_value, HeapObject::kMapOffset));
-
+ __ LoadMap(map, return_value);
__ CmpInstanceType(map, LAST_NAME_TYPE);
__ j(below_equal, &ok, Label::kNear);
@@ -4053,7 +4059,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
Register holder = ApiGetterDescriptor::HolderRegister();
Register callback = ApiGetterDescriptor::CallbackRegister();
Register scratch = rax;
- Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
+ Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r15 : no_reg;
DCHECK(!AreAliased(receiver, holder, callback, scratch, decompr_scratch1));
@@ -4116,7 +4122,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ LoadExternalPointerField(
api_function_address,
FieldOperand(scratch, Foreign::kForeignAddressOffset),
- kForeignForeignAddressTag);
+ kForeignForeignAddressTag, kScratchRegister);
// +3 is to skip prolog, return address and name handle.
Operand return_value_operand(
@@ -4172,7 +4178,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// We use this to keep the value of the fifth argument temporarily.
// Unfortunately we can't store it directly in r8 (used for passing
// this on linux), since it is another parameter passing register on windows.
- Register arg5 = r11;
+ Register arg5 = r15;
__ movq(arg_reg_3, Immediate(Deoptimizer::kFixedExitSizeMarker));
// Get the address of the location in the code object
@@ -4192,7 +4198,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ movq(rax, Operand(rbp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ movq(arg_reg_1, rax);
- __ Set(arg_reg_2, static_cast<int>(deopt_kind));
+ __ Move(arg_reg_2, static_cast<int>(deopt_kind));
// Args 3 and 4 are already in the right registers.
// On windows put the arguments on the stack (PrepareCallCFunction