summaryrefslogtreecommitdiff
path: root/chromium/v8/src/codegen
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-12 14:27:29 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-13 09:35:20 +0000
commitc30a6232df03e1efbd9f3b226777b07e087a1122 (patch)
treee992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/v8/src/codegen
parent7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff)
downloadqtwebengine-chromium-85-based.tar.gz
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8/src/codegen')
-rw-r--r--chromium/v8/src/codegen/arm/assembler-arm.cc35
-rw-r--r--chromium/v8/src/codegen/arm/assembler-arm.h11
-rw-r--r--chromium/v8/src/codegen/arm/interface-descriptors-arm.cc29
-rw-r--r--chromium/v8/src/codegen/arm/macro-assembler-arm.cc2
-rw-r--r--chromium/v8/src/codegen/arm64/assembler-arm64.cc72
-rw-r--r--chromium/v8/src/codegen/arm64/assembler-arm64.h19
-rw-r--r--chromium/v8/src/codegen/arm64/constants-arm64.h17
-rw-r--r--chromium/v8/src/codegen/arm64/decoder-arm64-inl.h1
-rw-r--r--chromium/v8/src/codegen/arm64/interface-descriptors-arm64.cc29
-rw-r--r--chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h18
-rw-r--r--chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc17
-rw-r--r--chromium/v8/src/codegen/arm64/macro-assembler-arm64.h26
-rw-r--r--chromium/v8/src/codegen/arm64/register-arm64.h8
-rw-r--r--chromium/v8/src/codegen/assembler.cc2
-rw-r--r--chromium/v8/src/codegen/assembler.h19
-rw-r--r--chromium/v8/src/codegen/code-factory.cc17
-rw-r--r--chromium/v8/src/codegen/code-factory.h1
-rw-r--r--chromium/v8/src/codegen/code-stub-assembler.cc604
-rw-r--r--chromium/v8/src/codegen/code-stub-assembler.h350
-rw-r--r--chromium/v8/src/codegen/compiler.cc21
-rw-r--r--chromium/v8/src/codegen/cpu-features.h5
-rw-r--r--chromium/v8/src/codegen/external-reference.cc19
-rw-r--r--chromium/v8/src/codegen/external-reference.h13
-rw-r--r--chromium/v8/src/codegen/ia32/assembler-ia32.cc56
-rw-r--r--chromium/v8/src/codegen/ia32/assembler-ia32.h10
-rw-r--r--chromium/v8/src/codegen/ia32/interface-descriptors-ia32.cc30
-rw-r--r--chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc26
-rw-r--r--chromium/v8/src/codegen/ia32/macro-assembler-ia32.h34
-rw-r--r--chromium/v8/src/codegen/ia32/sse-instr.h1
-rw-r--r--chromium/v8/src/codegen/interface-descriptors.cc32
-rw-r--r--chromium/v8/src/codegen/interface-descriptors.h427
-rw-r--r--chromium/v8/src/codegen/machine-type.h40
-rw-r--r--chromium/v8/src/codegen/mips/assembler-mips.cc16
-rw-r--r--chromium/v8/src/codegen/mips/assembler-mips.h36
-rw-r--r--chromium/v8/src/codegen/mips/interface-descriptors-mips.cc46
-rw-r--r--chromium/v8/src/codegen/mips/macro-assembler-mips.cc4
-rw-r--r--chromium/v8/src/codegen/mips64/assembler-mips64.cc15
-rw-r--r--chromium/v8/src/codegen/mips64/assembler-mips64.h36
-rw-r--r--chromium/v8/src/codegen/mips64/interface-descriptors-mips64.cc46
-rw-r--r--chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc3
-rw-r--r--chromium/v8/src/codegen/optimized-compilation-info.cc72
-rw-r--r--chromium/v8/src/codegen/optimized-compilation-info.h189
-rw-r--r--chromium/v8/src/codegen/ppc/assembler-ppc.cc16
-rw-r--r--chromium/v8/src/codegen/ppc/assembler-ppc.h62
-rw-r--r--chromium/v8/src/codegen/ppc/constants-ppc.h231
-rw-r--r--chromium/v8/src/codegen/ppc/interface-descriptors-ppc.cc29
-rw-r--r--chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc2
-rw-r--r--chromium/v8/src/codegen/register.h10
-rw-r--r--chromium/v8/src/codegen/reloc-info.h9
-rw-r--r--chromium/v8/src/codegen/s390/constants-s390.h11
-rw-r--r--chromium/v8/src/codegen/s390/interface-descriptors-s390.cc29
-rw-r--r--chromium/v8/src/codegen/s390/macro-assembler-s390.cc4
-rw-r--r--chromium/v8/src/codegen/safepoint-table.cc2
-rw-r--r--chromium/v8/src/codegen/source-position-table.cc18
-rw-r--r--chromium/v8/src/codegen/source-position-table.h6
-rw-r--r--chromium/v8/src/codegen/x64/assembler-x64.cc217
-rw-r--r--chromium/v8/src/codegen/x64/assembler-x64.h110
-rw-r--r--chromium/v8/src/codegen/x64/interface-descriptors-x64.cc49
-rw-r--r--chromium/v8/src/codegen/x64/macro-assembler-x64.cc4
-rw-r--r--chromium/v8/src/codegen/x64/macro-assembler-x64.h9
-rw-r--r--chromium/v8/src/codegen/x64/sse-instr.h1
61 files changed, 1804 insertions, 1469 deletions
diff --git a/chromium/v8/src/codegen/arm/assembler-arm.cc b/chromium/v8/src/codegen/arm/assembler-arm.cc
index 9032714f574..343cc5f2ded 100644
--- a/chromium/v8/src/codegen/arm/assembler-arm.cc
+++ b/chromium/v8/src/codegen/arm/assembler-arm.cc
@@ -3892,7 +3892,7 @@ void Assembler::vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src) {
emit(EncodeNeonVCVT(U32, dst, F32, src));
}
-enum UnaryOp { VMVN, VSWP, VABS, VABSF, VNEG, VNEGF };
+enum UnaryOp { VMVN, VSWP, VABS, VABSF, VNEG, VNEGF, VRINTM, VRINTP, VRINTZ };
static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size,
int dst_code, int src_code) {
@@ -3920,6 +3920,15 @@ static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size,
DCHECK_EQ(Neon32, size);
op_encoding = B16 | B10 | 0x7 * B7;
break;
+ case VRINTM:
+ op_encoding = B17 | 0xD * B7;
+ break;
+ case VRINTP:
+ op_encoding = B17 | 0xF * B7;
+ break;
+ case VRINTZ:
+ op_encoding = B17 | 0xB * B7;
+ break;
default:
UNREACHABLE();
}
@@ -4575,6 +4584,30 @@ void Assembler::vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
emit(EncodeNeonPairwiseOp(VPMAX, dt, dst, src1, src2));
}
+void Assembler::vrintm(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ // SIMD vector round floating-point to integer towards -Infinity.
+ // See ARM DDI 0487F.b, F6-5493.
+ DCHECK(IsEnabled(ARMv8));
+ emit(EncodeNeonUnaryOp(VRINTM, NEON_Q, NeonSize(dt), dst.code(), src.code()));
+}
+
+void Assembler::vrintp(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ // SIMD vector round floating-point to integer towards +Infinity.
+ // See ARM DDI 0487F.b, F6-5501.
+ DCHECK(IsEnabled(ARMv8));
+ emit(EncodeNeonUnaryOp(VRINTP, NEON_Q, NeonSize(dt), dst.code(), src.code()));
+}
+
+void Assembler::vrintz(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ // SIMD vector round floating-point to integer towards Zero.
+ // See ARM DDI 0487F.b, F6-5511.
+ DCHECK(IsEnabled(ARMv8));
+ emit(EncodeNeonUnaryOp(VRINTZ, NEON_Q, NeonSize(dt), dst.code(), src.code()));
+}
+
void Assembler::vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
diff --git a/chromium/v8/src/codegen/arm/assembler-arm.h b/chromium/v8/src/codegen/arm/assembler-arm.h
index 61205760df0..d344b53dbf2 100644
--- a/chromium/v8/src/codegen/arm/assembler-arm.h
+++ b/chromium/v8/src/codegen/arm/assembler-arm.h
@@ -820,7 +820,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vsqrt(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
- // ARMv8 rounding instructions.
+ // ARMv8 rounding instructions (Scalar).
void vrinta(const SwVfpRegister dst, const SwVfpRegister src);
void vrinta(const DwVfpRegister dst, const DwVfpRegister src);
void vrintn(const SwVfpRegister dst, const SwVfpRegister src);
@@ -908,6 +908,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
DwVfpRegister src2);
void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2);
+
+ // ARMv8 rounding instructions (NEON).
+ void vrintm(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src);
+ void vrintp(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src);
+ void vrintz(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src);
+
void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
QwNeonRegister shift);
diff --git a/chromium/v8/src/codegen/arm/interface-descriptors-arm.cc b/chromium/v8/src/codegen/arm/interface-descriptors-arm.cc
index 5a4e08dc77c..09b80af2d49 100644
--- a/chromium/v8/src/codegen/arm/interface-descriptors-arm.cc
+++ b/chromium/v8/src/codegen/arm/interface-descriptors-arm.cc
@@ -191,11 +191,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
@@ -295,6 +290,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/chromium/v8/src/codegen/arm/macro-assembler-arm.cc b/chromium/v8/src/codegen/arm/macro-assembler-arm.cc
index 7e5fa8cef1c..7b9e73e1d91 100644
--- a/chromium/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/chromium/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -2455,7 +2455,7 @@ void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc,
Register scratch = temps.Acquire();
DCHECK(cc == eq || cc == ne);
Bfc(scratch, object, 0, kPageSizeBits);
- ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
tst(scratch, Operand(mask));
b(cc, condition_met);
}
diff --git a/chromium/v8/src/codegen/arm64/assembler-arm64.cc b/chromium/v8/src/codegen/arm64/assembler-arm64.cc
index 97a57d6f3c6..2e21ab913d7 100644
--- a/chromium/v8/src/codegen/arm64/assembler-arm64.cc
+++ b/chromium/v8/src/codegen/arm64/assembler-arm64.cc
@@ -41,19 +41,66 @@
namespace v8 {
namespace internal {
+namespace {
+
+#ifdef USE_SIMULATOR
+static unsigned SimulatorFeaturesFromCommandLine() {
+ if (strcmp(FLAG_sim_arm64_optional_features, "none") == 0) {
+ return 0;
+ }
+ if (strcmp(FLAG_sim_arm64_optional_features, "all") == 0) {
+ return (1u << NUMBER_OF_CPU_FEATURES) - 1;
+ }
+ fprintf(
+ stderr,
+ "Error: unrecognised value for --sim-arm64-optional-features ('%s').\n",
+ FLAG_sim_arm64_optional_features);
+ fprintf(stderr,
+ "Supported values are: none\n"
+ " all\n");
+ FATAL("sim-arm64-optional-features");
+}
+#endif // USE_SIMULATOR
+
+static constexpr unsigned CpuFeaturesFromCompiler() {
+ unsigned features = 0;
+#if defined(__ARM_FEATURE_JCVT)
+ features |= 1u << JSCVT;
+#endif
+ return features;
+}
+
+} // namespace
+
// -----------------------------------------------------------------------------
// CpuFeatures implementation.
void CpuFeatures::ProbeImpl(bool cross_compile) {
- // AArch64 has no configuration options, no further probing is required.
- supported_ = 0;
-
// Only use statically determined features for cross compile (snapshot).
- if (cross_compile) return;
+ if (cross_compile) {
+ supported_ |= CpuFeaturesFromCompiler();
+ return;
+ }
// We used to probe for coherent cache support, but on older CPUs it
// causes crashes (crbug.com/524337), and newer CPUs don't even have
// the feature any more.
+
+#ifdef USE_SIMULATOR
+ supported_ |= SimulatorFeaturesFromCommandLine();
+#else
+ // Probe for additional features at runtime.
+ base::CPU cpu;
+ unsigned runtime = 0;
+ if (cpu.has_jscvt()) {
+ runtime |= 1u << JSCVT;
+ }
+
+ // Use the best of the features found by CPU detection and those inferred from
+ // the build system.
+ supported_ |= CpuFeaturesFromCompiler();
+ supported_ |= runtime;
+#endif // USE_SIMULATOR
}
void CpuFeatures::PrintTarget() {}
@@ -1115,10 +1162,10 @@ void Assembler::cls(const Register& rd, const Register& rn) {
DataProcessing1Source(rd, rn, CLS);
}
-void Assembler::pacia1716() { Emit(PACIA1716); }
-void Assembler::autia1716() { Emit(AUTIA1716); }
-void Assembler::paciasp() { Emit(PACIASP); }
-void Assembler::autiasp() { Emit(AUTIASP); }
+void Assembler::pacib1716() { Emit(PACIB1716); }
+void Assembler::autib1716() { Emit(AUTIB1716); }
+void Assembler::pacibsp() { Emit(PACIBSP); }
+void Assembler::autibsp() { Emit(AUTIBSP); }
void Assembler::bti(BranchTargetIdentifier id) {
SystemHint op;
@@ -1136,9 +1183,9 @@ void Assembler::bti(BranchTargetIdentifier id) {
op = BTI_jc;
break;
case BranchTargetIdentifier::kNone:
- case BranchTargetIdentifier::kPaciasp:
+ case BranchTargetIdentifier::kPacibsp:
// We always want to generate a BTI instruction here, so disallow
- // skipping its generation or generating a PACIASP instead.
+ // skipping its generation or generating a PACIBSP instead.
UNREACHABLE();
}
hint(op);
@@ -2714,6 +2761,11 @@ void Assembler::fcvtxn2(const VRegister& vd, const VRegister& vn) {
Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd));
}
+void Assembler::fjcvtzs(const Register& rd, const VRegister& vn) {
+ DCHECK(rd.IsW() && vn.Is1D());
+ Emit(FJCVTZS | Rn(vn) | Rd(rd));
+}
+
#define NEON_FP2REGMISC_FCVT_LIST(V) \
V(fcvtnu, NEON_FCVTNU, FCVTNU) \
V(fcvtns, NEON_FCVTNS, FCVTNS) \
diff --git a/chromium/v8/src/codegen/arm64/assembler-arm64.h b/chromium/v8/src/codegen/arm64/assembler-arm64.h
index a9e8a5e85ad..f787bad464f 100644
--- a/chromium/v8/src/codegen/arm64/assembler-arm64.h
+++ b/chromium/v8/src/codegen/arm64/assembler-arm64.h
@@ -780,21 +780,21 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void clz(const Register& rd, const Register& rn);
void cls(const Register& rd, const Register& rn);
- // Pointer Authentication Code for Instruction address, using key A, with
+ // Pointer Authentication Code for Instruction address, using key B, with
// address in x17 and modifier in x16 [Armv8.3].
- void pacia1716();
+ void pacib1716();
- // Pointer Authentication Code for Instruction address, using key A, with
+ // Pointer Authentication Code for Instruction address, using key B, with
// address in LR and modifier in SP [Armv8.3].
- void paciasp();
+ void pacibsp();
- // Authenticate Instruction address, using key A, with address in x17 and
+ // Authenticate Instruction address, using key B, with address in x17 and
// modifier in x16 [Armv8.3].
- void autia1716();
+ void autib1716();
- // Authenticate Instruction address, using key A, with address in LR and
+ // Authenticate Instruction address, using key B, with address in LR and
// modifier in SP [Armv8.3].
- void autiasp();
+ void autibsp();
// Memory instructions.
@@ -1750,6 +1750,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// FP convert to signed integer, nearest with ties to even.
void fcvtns(const Register& rd, const VRegister& vn);
+ // FP JavaScript convert to signed integer, rounding toward zero [Armv8.3].
+ void fjcvtzs(const Register& rd, const VRegister& vn);
+
// FP convert to unsigned integer, nearest with ties to even.
void fcvtnu(const Register& rd, const VRegister& vn);
diff --git a/chromium/v8/src/codegen/arm64/constants-arm64.h b/chromium/v8/src/codegen/arm64/constants-arm64.h
index e63962993a7..52790b9faf4 100644
--- a/chromium/v8/src/codegen/arm64/constants-arm64.h
+++ b/chromium/v8/src/codegen/arm64/constants-arm64.h
@@ -412,9 +412,9 @@ enum class BranchTargetIdentifier {
// Emit a "BTI jc" instruction, which is a combination of "BTI j" and "BTI c".
kBtiJumpCall,
- // Emit a PACIASP instruction, which acts like a "BTI c" or a "BTI jc", based
- // on the value of SCTLR_EL1.BT0.
- kPaciasp
+ // Emit a PACIBSP instruction, which acts like a "BTI c" or a "BTI jc",
+ // based on the value of SCTLR_EL1.BT0.
+ kPacibsp
};
enum BarrierDomain {
@@ -793,10 +793,10 @@ enum SystemPAuthOp : uint32_t {
SystemPAuthFixed = 0xD503211F,
SystemPAuthFMask = 0xFFFFFD1F,
SystemPAuthMask = 0xFFFFFFFF,
- PACIA1716 = SystemPAuthFixed | 0x00000100,
- AUTIA1716 = SystemPAuthFixed | 0x00000180,
- PACIASP = SystemPAuthFixed | 0x00000320,
- AUTIASP = SystemPAuthFixed | 0x000003A0
+ PACIB1716 = SystemPAuthFixed | 0x00000140,
+ AUTIB1716 = SystemPAuthFixed | 0x000001C0,
+ PACIBSP = SystemPAuthFixed | 0x00000360,
+ AUTIBSP = SystemPAuthFixed | 0x000003E0
};
// Any load or store (including pair).
@@ -1325,7 +1325,8 @@ enum FPIntegerConvertOp : uint32_t {
FMOV_xd = FMOV_ws | SixtyFourBits | FP64,
FMOV_dx = FMOV_sw | SixtyFourBits | FP64,
FMOV_d1_x = FPIntegerConvertFixed | SixtyFourBits | 0x008F0000,
- FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000
+ FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000,
+ FJCVTZS = FPIntegerConvertFixed | FP64 | 0x001E0000
};
// Conversion between fixed point and floating point.
diff --git a/chromium/v8/src/codegen/arm64/decoder-arm64-inl.h b/chromium/v8/src/codegen/arm64/decoder-arm64-inl.h
index 25d69b38983..1a7d483dea9 100644
--- a/chromium/v8/src/codegen/arm64/decoder-arm64-inl.h
+++ b/chromium/v8/src/codegen/arm64/decoder-arm64-inl.h
@@ -538,7 +538,6 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
(instr->Mask(0x20C60000) == 0x00840000) ||
(instr->Mask(0xA0C60000) == 0x80060000) ||
(instr->Mask(0xA0C60000) == 0x00860000) ||
- (instr->Mask(0xA0C60000) == 0x00460000) ||
(instr->Mask(0xA0CE0000) == 0x80860000) ||
(instr->Mask(0xA0CE0000) == 0x804E0000) ||
(instr->Mask(0xA0CE0000) == 0x000E0000) ||
diff --git a/chromium/v8/src/codegen/arm64/interface-descriptors-arm64.cc b/chromium/v8/src/codegen/arm64/interface-descriptors-arm64.cc
index 9f059224449..2c60ea2ec04 100644
--- a/chromium/v8/src/codegen/arm64/interface-descriptors-arm64.cc
+++ b/chromium/v8/src/codegen/arm64/interface-descriptors-arm64.cc
@@ -191,11 +191,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: left operand
@@ -299,6 +294,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 93b8136d9a9..e638312ed0b 100644
--- a/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -318,8 +318,8 @@ void TurboAssembler::Bind(Label* label, BranchTargetIdentifier id) {
// instructions between the bind and the target identifier instruction.
InstructionAccurateScope scope(this, 1);
bind(label);
- if (id == BranchTargetIdentifier::kPaciasp) {
- paciasp();
+ if (id == BranchTargetIdentifier::kPacibsp) {
+ pacibsp();
} else {
bti(id);
}
@@ -1136,7 +1136,7 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kSignLR) {
- Paciasp();
+ Pacibsp();
}
#endif
@@ -1153,7 +1153,7 @@ void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
DCHECK_IMPLIES((lr_mode == kDontStoreLR), ((src0 != lr) && (src1 != lr)));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kSignLR) {
- Paciasp();
+ Pacibsp();
}
#endif
@@ -1188,7 +1188,7 @@ void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kAuthLR) {
- Autiasp();
+ Autibsp();
}
#endif
}
@@ -1199,7 +1199,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
DCHECK_IMPLIES((lr_mode == kDontStoreLR), (src != lr));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kSignLR) {
- Paciasp();
+ Pacibsp();
}
#endif
@@ -1228,7 +1228,7 @@ void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) {
DCHECK_IMPLIES((lr_mode == kDontLoadLR), (dst != lr));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kAuthLR) {
- Autiasp();
+ Autibsp();
}
#endif
}
@@ -1238,7 +1238,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
DCHECK_IMPLIES((lr_mode == kDontStoreLR), !registers.IncludesAliasOf(lr));
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kSignLR && registers.IncludesAliasOf(lr)) {
- Paciasp();
+ Pacibsp();
}
#endif
@@ -1280,7 +1280,7 @@ void TurboAssembler::PopCPURegList(CPURegList registers) {
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
if (lr_mode == kAuthLR && contains_lr) {
- Autiasp();
+ Autibsp();
}
#endif
}
diff --git a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc
index c157df29966..a591e690c3f 100644
--- a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -1197,7 +1197,7 @@ void MacroAssembler::PeekPair(const CPURegister& dst1, const CPURegister& dst2,
void MacroAssembler::PushCalleeSavedRegisters() {
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- Paciasp();
+ Pacibsp();
#endif
{
@@ -1249,7 +1249,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
}
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- Autiasp();
+ Autibsp();
#endif
}
@@ -1971,7 +1971,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
Adr(x17, &return_location);
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
Add(x16, sp, kSystemPointerSize);
- Pacia1716();
+ Pacib1716();
#endif
Poke(x17, 0);
@@ -2263,6 +2263,11 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
DoubleRegister double_input,
StubCallMode stub_mode,
LinkRegisterStatus lr_status) {
+ if (CpuFeatures::IsSupported(JSCVT)) {
+ Fjcvtzs(result.W(), double_input);
+ return;
+ }
+
Label done;
// Try to convert the double to an int64. If successful, the bottom 32 bits
@@ -2650,7 +2655,7 @@ void TurboAssembler::CheckPageFlag(const Register& object, int mask,
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
And(scratch, object, ~kPageAlignmentMask);
- Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ Ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
if (cc == eq) {
TestAndBranchIfAnySet(scratch, mask, condition_met);
} else {
@@ -3243,7 +3248,7 @@ void TurboAssembler::RestoreFPAndLR() {
// We can load the return address directly into x17.
Add(x16, fp, StandardFrameConstants::kCallerSPOffset);
Ldp(fp, x17, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- Autia1716();
+ Autib1716();
Mov(lr, x17);
#else
Ldp(fp, lr, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -3256,7 +3261,7 @@ void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) {
Adr(x17, return_location);
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
Add(x16, fp, WasmExitFrameConstants::kCallingPCOffset + kSystemPointerSize);
- Pacia1716();
+ Pacib1716();
#endif
Str(x17, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
}
diff --git a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h
index 109e73c3c22..0cb9e823198 100644
--- a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -503,13 +503,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Cbnz(const Register& rt, Label* label);
void Cbz(const Register& rt, Label* label);
- void Paciasp() {
+ void Pacibsp() {
DCHECK(allow_macro_instructions_);
- paciasp();
+ pacibsp();
}
- void Autiasp() {
+ void Autibsp() {
DCHECK(allow_macro_instructions_);
- autiasp();
+ autibsp();
}
// The 1716 pac and aut instructions encourage people to use x16 and x17
@@ -519,7 +519,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Register temp = temps.AcquireX(); // temp will be x16
// __ Mov(x17, ptr);
// __ Mov(x16, modifier); // Will override temp!
- // __ Pacia1716();
+ // __ Pacib1716();
//
// To work around this issue, you must exclude x16 and x17 from the scratch
// register list. You may need to replace them with other registers:
@@ -529,18 +529,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// temps.Include(x10, x11);
// __ Mov(x17, ptr);
// __ Mov(x16, modifier);
- // __ Pacia1716();
- void Pacia1716() {
+ // __ Pacib1716();
+ void Pacib1716() {
DCHECK(allow_macro_instructions_);
DCHECK(!TmpList()->IncludesAliasOf(x16));
DCHECK(!TmpList()->IncludesAliasOf(x17));
- pacia1716();
+ pacib1716();
}
- void Autia1716() {
+ void Autib1716() {
DCHECK(allow_macro_instructions_);
DCHECK(!TmpList()->IncludesAliasOf(x16));
DCHECK(!TmpList()->IncludesAliasOf(x17));
- autia1716();
+ autib1716();
}
inline void Dmb(BarrierDomain domain, BarrierType type);
@@ -1009,6 +1009,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
fcvtzs(vd, vn, fbits);
}
+ void Fjcvtzs(const Register& rd, const VRegister& vn) {
+ DCHECK(allow_macro_instructions());
+ DCHECK(!rd.IsZero());
+ fjcvtzs(rd, vn);
+ }
+
inline void Fcvtzu(const Register& rd, const VRegister& fn);
void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
DCHECK(allow_macro_instructions());
diff --git a/chromium/v8/src/codegen/arm64/register-arm64.h b/chromium/v8/src/codegen/arm64/register-arm64.h
index c98b0f6162f..76bf3049c89 100644
--- a/chromium/v8/src/codegen/arm64/register-arm64.h
+++ b/chromium/v8/src/codegen/arm64/register-arm64.h
@@ -92,9 +92,7 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
}
static constexpr CPURegister Create(int code, int size, RegisterType type) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(IsValid(code, size, type));
-#endif
+ CONSTEXPR_DCHECK(IsValid(code, size, type));
return CPURegister{code, size, type};
}
@@ -304,9 +302,7 @@ class VRegister : public CPURegister {
}
static constexpr VRegister Create(int code, int size, int lane_count = 1) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(IsValidLaneCount(lane_count));
-#endif
+ CONSTEXPR_DCHECK(IsValidLaneCount(lane_count));
return VRegister(CPURegister::Create(code, size, CPURegister::kVRegister),
lane_count);
}
diff --git a/chromium/v8/src/codegen/assembler.cc b/chromium/v8/src/codegen/assembler.cc
index 3b27bf5db9e..3d0b7d28e47 100644
--- a/chromium/v8/src/codegen/assembler.cc
+++ b/chromium/v8/src/codegen/assembler.cc
@@ -81,7 +81,7 @@ namespace {
class DefaultAssemblerBuffer : public AssemblerBuffer {
public:
explicit DefaultAssemblerBuffer(int size)
- : buffer_(OwnedVector<uint8_t>::New(size)) {
+ : buffer_(OwnedVector<uint8_t>::NewForOverwrite(size)) {
#ifdef DEBUG
ZapCode(reinterpret_cast<Address>(buffer_.start()), size);
#endif
diff --git a/chromium/v8/src/codegen/assembler.h b/chromium/v8/src/codegen/assembler.h
index 1c287222e96..6419e55cec7 100644
--- a/chromium/v8/src/codegen/assembler.h
+++ b/chromium/v8/src/codegen/assembler.h
@@ -78,10 +78,16 @@ class JumpOptimizationInfo {
public:
bool is_collecting() const { return stage_ == kCollection; }
bool is_optimizing() const { return stage_ == kOptimization; }
- void set_optimizing() { stage_ = kOptimization; }
+ void set_optimizing() {
+ DCHECK(is_optimizable());
+ stage_ = kOptimization;
+ }
bool is_optimizable() const { return optimizable_; }
- void set_optimizable() { optimizable_ = true; }
+ void set_optimizable() {
+ DCHECK(is_collecting());
+ optimizable_ = true;
+ }
// Used to verify the instruction sequence is always the same in two stages.
size_t hash_code() const { return hash_code_; }
@@ -251,6 +257,15 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); }
+ int pc_offset_for_safepoint() {
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
+ // Mips needs it's own implementation to avoid trampoline's influence.
+ UNREACHABLE();
+#else
+ return pc_offset();
+#endif
+ }
+
byte* buffer_start() const { return buffer_->start(); }
int buffer_size() const { return buffer_->size(); }
int instruction_size() const { return pc_offset(); }
diff --git a/chromium/v8/src/codegen/code-factory.cc b/chromium/v8/src/codegen/code-factory.cc
index 060a66edc7a..006b6bee167 100644
--- a/chromium/v8/src/codegen/code-factory.cc
+++ b/chromium/v8/src/codegen/code-factory.cc
@@ -268,6 +268,23 @@ Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode) {
}
// static
+Callable CodeFactory::Call_WithFeedback(Isolate* isolate,
+ ConvertReceiverMode mode) {
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return Builtins::CallableFor(
+ isolate, Builtins::kCall_ReceiverIsNullOrUndefined_WithFeedback);
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return Builtins::CallableFor(
+ isolate, Builtins::kCall_ReceiverIsNotNullOrUndefined_WithFeedback);
+ case ConvertReceiverMode::kAny:
+ return Builtins::CallableFor(isolate,
+ Builtins::kCall_ReceiverIsAny_WithFeedback);
+ }
+ UNREACHABLE();
+}
+
+// static
Callable CodeFactory::CallWithArrayLike(Isolate* isolate) {
return Builtins::CallableFor(isolate, Builtins::kCallWithArrayLike);
}
diff --git a/chromium/v8/src/codegen/code-factory.h b/chromium/v8/src/codegen/code-factory.h
index b8d294ce714..02fc7e4b236 100644
--- a/chromium/v8/src/codegen/code-factory.h
+++ b/chromium/v8/src/codegen/code-factory.h
@@ -71,6 +71,7 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable ArgumentAdaptor(Isolate* isolate);
static Callable Call(Isolate* isolate,
ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+ static Callable Call_WithFeedback(Isolate* isolate, ConvertReceiverMode mode);
static Callable CallWithArrayLike(Isolate* isolate);
static Callable CallWithSpread(Isolate* isolate);
static Callable CallFunction(
diff --git a/chromium/v8/src/codegen/code-stub-assembler.cc b/chromium/v8/src/codegen/code-stub-assembler.cc
index 901ce0c7b49..6e9b817759d 100644
--- a/chromium/v8/src/codegen/code-stub-assembler.cc
+++ b/chromium/v8/src/codegen/code-stub-assembler.cc
@@ -20,7 +20,6 @@
#include "src/objects/descriptor-array.h"
#include "src/objects/function-kind.h"
#include "src/objects/heap-number.h"
-#include "src/objects/js-aggregate-error.h"
#include "src/objects/js-generator.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
@@ -136,17 +135,6 @@ void CodeStubAssembler::Check(SloppyTNode<Word32T> condition_node,
Check(branch, message, file, line, extra_nodes);
}
-template <>
-TNode<Smi> CodeStubAssembler::IntPtrToParameter<Smi>(TNode<IntPtrT> value) {
- return SmiTag(value);
-}
-template <>
-TNode<IntPtrT> CodeStubAssembler::IntPtrToParameter<IntPtrT>(
- TNode<IntPtrT> value) {
- return value;
-}
-
-
void CodeStubAssembler::IncrementCallCount(
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id) {
Comment("increment call count");
@@ -292,33 +280,6 @@ Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
}
}
-bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(TNode<Smi> test) {
- Smi smi_test;
- if (ToSmiConstant(test, &smi_test) && smi_test.value() == 0) {
- return true;
- }
- return false;
-}
-
-bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(TNode<IntPtrT> test) {
- int32_t constant_test;
- if (ToInt32Constant(test, &constant_test) && constant_test == 0) {
- return true;
- }
- return false;
-}
-
-bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test,
- ParameterMode mode) {
- if (mode == INTPTR_PARAMETERS) {
- return IsIntPtrOrSmiConstantZero(UncheckedCast<IntPtrT>(test));
- } else {
- DCHECK_EQ(mode, SMI_PARAMETERS);
- return IsIntPtrOrSmiConstantZero(UncheckedCast<Smi>(test));
- }
- return false;
-}
-
bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue(Node* maybe_constant,
int* value,
ParameterMode mode) {
@@ -604,6 +565,16 @@ TNode<BoolT> CodeStubAssembler::IsValidSmiIndex(TNode<Smi> smi) {
return Int32TrueConstant();
}
+template <>
+TNode<Smi> CodeStubAssembler::TaggedToParameter(TNode<Smi> value) {
+ return value;
+}
+
+template <>
+TNode<IntPtrT> CodeStubAssembler::TaggedToParameter(TNode<Smi> value) {
+ return SmiUntag(value);
+}
+
TNode<IntPtrT> CodeStubAssembler::TaggedIndexToIntPtr(
TNode<TaggedIndex> value) {
return Signed(WordSarShiftOutZeros(BitcastTaggedToWordForTagAndSmiBits(value),
@@ -2413,10 +2384,10 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
}
TNode<MaybeObject> CodeStubAssembler::LoadWeakFixedArrayElement(
- TNode<WeakFixedArray> object, Node* index, int additional_offset,
- ParameterMode parameter_mode, LoadSensitivity needs_poisoning) {
+ TNode<WeakFixedArray> object, TNode<IntPtrT> index, int additional_offset) {
return LoadArrayElement(object, WeakFixedArray::kHeaderSize, index,
- additional_offset, parameter_mode, needs_poisoning);
+ additional_offset, INTPTR_PARAMETERS,
+ LoadSensitivity::kSafe);
}
TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement(
@@ -2893,17 +2864,17 @@ TNode<Int32T> CodeStubAssembler::EnsureArrayPushable(TNode<Context> context,
}
void CodeStubAssembler::PossiblyGrowElementsCapacity(
- ParameterMode mode, ElementsKind kind, TNode<HeapObject> array,
- Node* length, TVariable<FixedArrayBase>* var_elements, Node* growth,
+ ElementsKind kind, TNode<HeapObject> array, TNode<BInt> length,
+ TVariable<FixedArrayBase>* var_elements, TNode<BInt> growth,
Label* bailout) {
+ ParameterMode mode = OptimalParameterMode();
Label fits(this, var_elements);
- Node* capacity =
- TaggedToParameter(LoadFixedArrayBaseLength(var_elements->value()), mode);
- // length and growth nodes are already in a ParameterMode appropriate
- // representation.
- Node* new_length = IntPtrOrSmiAdd(growth, length, mode);
- GotoIfNot(IntPtrOrSmiGreaterThan(new_length, capacity, mode), &fits);
- Node* new_capacity = CalculateNewElementsCapacity(new_length, mode);
+ TNode<BInt> capacity =
+ TaggedToParameter<BInt>(LoadFixedArrayBaseLength(var_elements->value()));
+
+ TNode<BInt> new_length = IntPtrOrSmiAdd(growth, length);
+ GotoIfNot(IntPtrOrSmiGreaterThan(new_length, capacity), &fits);
+ TNode<BInt> new_capacity = CalculateNewElementsCapacity(new_length);
*var_elements = GrowElementsCapacity(array, var_elements->value(), kind, kind,
capacity, new_capacity, mode, bailout);
Goto(&fits);
@@ -2919,15 +2890,14 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
Label pre_bailout(this);
Label success(this);
TVARIABLE(Smi, var_tagged_length);
- ParameterMode mode = OptimalParameterMode();
TVARIABLE(BInt, var_length, SmiToBInt(LoadFastJSArrayLength(array)));
TVARIABLE(FixedArrayBase, var_elements, LoadElements(array));
// Resize the capacity of the fixed array if it doesn't fit.
TNode<IntPtrT> first = arg_index->value();
TNode<BInt> growth = IntPtrToBInt(IntPtrSub(args->GetLength(), first));
- PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(),
- &var_elements, growth, &pre_bailout);
+ PossiblyGrowElementsCapacity(kind, array, var_length.value(), &var_elements,
+ growth, &pre_bailout);
// Push each argument onto the end of the array now that there is enough
// capacity.
@@ -2936,8 +2906,8 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
args->ForEach(
push_vars,
[&](TNode<Object> arg) {
- TryStoreArrayElement(kind, mode, &pre_bailout, elements,
- var_length.value(), arg);
+ TryStoreArrayElement(kind, &pre_bailout, elements, var_length.value(),
+ arg);
Increment(&var_length);
},
first);
@@ -2950,7 +2920,7 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
BIND(&pre_bailout);
{
- TNode<Smi> length = ParameterToTagged(var_length.value(), mode);
+ TNode<Smi> length = ParameterToTagged(var_length.value());
var_tagged_length = length;
TNode<Smi> diff = SmiSub(length, LoadFastJSArrayLength(array));
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
@@ -2962,15 +2932,17 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
return var_tagged_length.value();
}
-void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind,
- ParameterMode mode, Label* bailout,
+void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind, Label* bailout,
TNode<FixedArrayBase> elements,
- Node* index, TNode<Object> value) {
+ TNode<BInt> index,
+ TNode<Object> value) {
if (IsSmiElementsKind(kind)) {
GotoIf(TaggedIsNotSmi(value), bailout);
} else if (IsDoubleElementsKind(kind)) {
GotoIfNotNumber(value, bailout);
}
+
+ ParameterMode mode = OptimalParameterMode();
if (IsDoubleElementsKind(kind)) {
StoreElement(elements, kind, index, ChangeNumberToFloat64(CAST(value)),
mode);
@@ -2984,19 +2956,18 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
TNode<Object> value,
Label* bailout) {
Comment("BuildAppendJSArray: ", ElementsKindToString(kind));
- ParameterMode mode = OptimalParameterMode();
TVARIABLE(BInt, var_length, SmiToBInt(LoadFastJSArrayLength(array)));
TVARIABLE(FixedArrayBase, var_elements, LoadElements(array));
// Resize the capacity of the fixed array if it doesn't fit.
- Node* growth = IntPtrOrSmiConstant(1, mode);
- PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(),
- &var_elements, growth, bailout);
+ TNode<BInt> growth = IntPtrOrSmiConstant<BInt>(1);
+ PossiblyGrowElementsCapacity(kind, array, var_length.value(), &var_elements,
+ growth, bailout);
// Push each argument onto the end of the array now that there is enough
// capacity.
- TryStoreArrayElement(kind, mode, bailout, var_elements.value(),
- var_length.value(), value);
+ TryStoreArrayElement(kind, bailout, var_elements.value(), var_length.value(),
+ value);
Increment(&var_length);
TNode<Smi> length = BIntToSmi(var_length.value());
@@ -3552,12 +3523,13 @@ TNode<BoolT> CodeStubAssembler::IsValidFastJSArrayCapacity(
TNode<JSArray> CodeStubAssembler::AllocateJSArray(
TNode<Map> array_map, TNode<FixedArrayBase> elements, TNode<Smi> length,
- TNode<AllocationSite> allocation_site, int array_header_size) {
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ int array_header_size) {
Comment("begin allocation of JSArray passing in elements");
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
int base_size = array_header_size;
- if (!allocation_site.is_null()) {
+ if (allocation_site) {
base_size += AllocationMemento::kSize;
}
@@ -3571,8 +3543,9 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
- TNode<AllocationSite> allocation_site, TNode<IntPtrT> capacity,
- AllocationFlags allocation_flags, int array_header_size) {
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ TNode<IntPtrT> capacity, AllocationFlags allocation_flags,
+ int array_header_size) {
Comment("begin allocation of JSArray with elements");
CHECK_EQ(allocation_flags & ~kAllowLargeObjectAllocation, 0);
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
@@ -3608,7 +3581,7 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
BIND(&nonempty);
{
int base_size = array_header_size;
- if (!allocation_site.is_null()) {
+ if (allocation_site) {
base_size += AllocationMemento::kSize;
}
@@ -3680,7 +3653,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
TNode<Map> array_map, TNode<Smi> length,
- TNode<AllocationSite> allocation_site, TNode<IntPtrT> size_in_bytes) {
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ TNode<IntPtrT> size_in_bytes) {
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
// Allocate space for the JSArray and the elements FixedArray in one go.
@@ -3691,9 +3665,9 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset,
RootIndex::kEmptyFixedArray);
- if (!allocation_site.is_null()) {
+ if (allocation_site) {
InitializeAllocationMemento(array, IntPtrConstant(JSArray::kHeaderSize),
- allocation_site);
+ *allocation_site);
}
return CAST(array);
@@ -3701,7 +3675,7 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
TNode<JSArray> CodeStubAssembler::AllocateJSArray(
ElementsKind kind, TNode<Map> array_map, TNode<IntPtrT> capacity,
- TNode<Smi> length, TNode<AllocationSite> allocation_site,
+ TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
AllocationFlags allocation_flags) {
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
@@ -3728,9 +3702,10 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
return array;
}
-TNode<JSArray> CodeStubAssembler::ExtractFastJSArray(
- TNode<Context> context, TNode<JSArray> array, Node* begin, Node* count,
- ParameterMode mode, Node* capacity, TNode<AllocationSite> allocation_site) {
+TNode<JSArray> CodeStubAssembler::ExtractFastJSArray(TNode<Context> context,
+ TNode<JSArray> array,
+ TNode<BInt> begin,
+ TNode<BInt> count) {
TNode<Map> original_array_map = LoadMap(array);
TNode<Int32T> elements_kind = LoadMapElementsKind(original_array_map);
@@ -3739,17 +3714,18 @@ TNode<JSArray> CodeStubAssembler::ExtractFastJSArray(
TNode<Map> array_map = LoadJSArrayElementsMap(elements_kind, native_context);
TNode<FixedArrayBase> new_elements = ExtractFixedArray(
- LoadElements(array), begin, count, capacity,
- ExtractFixedArrayFlag::kAllFixedArrays, mode, nullptr, elements_kind);
+ LoadElements(array), begin, count, base::nullopt,
+ ExtractFixedArrayFlag::kAllFixedArrays, nullptr, elements_kind);
TNode<JSArray> result = AllocateJSArray(
- array_map, new_elements, ParameterToTagged(count, mode), allocation_site);
+ array_map, new_elements, ParameterToTagged(count), base::nullopt);
return result;
}
TNode<JSArray> CodeStubAssembler::CloneFastJSArray(
TNode<Context> context, TNode<JSArray> array,
- TNode<AllocationSite> allocation_site, HoleConversionMode convert_holes) {
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ HoleConversionMode convert_holes) {
// TODO(dhai): we should be able to assert IsFastJSArray(array) here, but this
// function is also used to copy boilerplates even when the no-elements
// protector is invalid. This function should be renamed to reflect its uses.
@@ -3775,7 +3751,7 @@ TNode<JSArray> CodeStubAssembler::CloneFastJSArray(
// Simple extraction that preserves holes.
new_elements =
ExtractFixedArray(LoadElements(array), IntPtrOrSmiConstant(0, mode),
- TaggedToParameter(CAST(length), mode), nullptr,
+ TaggedToParameter<BInt>(CAST(length)), nullptr,
ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW, mode,
nullptr, var_elements_kind.value());
var_new_elements = new_elements;
@@ -3793,7 +3769,7 @@ TNode<JSArray> CodeStubAssembler::CloneFastJSArray(
// ExtractFixedArrayFlag::kDontCopyCOW.
new_elements = ExtractFixedArray(
LoadElements(array), IntPtrOrSmiConstant(0, mode),
- TaggedToParameter(CAST(length), mode), nullptr,
+ TaggedToParameter<BInt>(CAST(length)), nullptr,
ExtractFixedArrayFlag::kAllFixedArrays, mode, &var_holes_converted);
var_new_elements = new_elements;
// If the array type didn't change, use the original elements kind.
@@ -4284,9 +4260,9 @@ void CodeStubAssembler::FillPropertyArrayWithUndefined(
CSA_SLOW_ASSERT(this, MatchesParameterMode(to_node, mode));
ElementsKind kind = PACKED_ELEMENTS;
TNode<Oddball> value = UndefinedConstant();
- BuildFastFixedArrayForEach(
+ BuildFastArrayForEach(
array, kind, from_node, to_node,
- [this, value](Node* array, Node* offset) {
+ [this, value](TNode<HeapObject> array, TNode<IntPtrT> offset) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, array, offset,
value);
},
@@ -4312,9 +4288,10 @@ void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind,
float_value = LoadHeapNumberValue(CAST(value));
}
- BuildFastFixedArrayForEach(
+ BuildFastArrayForEach(
array, kind, from_node, to_node,
- [this, value, float_value, kind](Node* array, Node* offset) {
+ [this, value, float_value, kind](TNode<HeapObject> array,
+ TNode<IntPtrT> offset) {
if (IsDoubleElementsKind(kind)) {
StoreNoWriteBarrier(MachineRepresentation::kFloat64, array, offset,
float_value);
@@ -4472,7 +4449,7 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
const TNode<IntPtrT> delta =
IntPtrMul(IntPtrSub(dst_index, begin),
IntPtrConstant(ElementsKindToByteSize(kind)));
- auto loop_body = [&](Node* array, Node* offset) {
+ auto loop_body = [&](TNode<HeapObject> array, TNode<IntPtrT> offset) {
const TNode<AnyTaggedT> element = Load<AnyTaggedT>(array, offset);
const TNode<WordT> delta_offset = IntPtrAdd(offset, delta);
Store(array, delta_offset, element);
@@ -4485,17 +4462,15 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
BIND(&iterate_forward);
{
// Make a loop for the stores.
- BuildFastFixedArrayForEach(elements, kind, begin, end, loop_body,
- INTPTR_PARAMETERS,
- ForEachDirection::kForward);
+ BuildFastArrayForEach(elements, kind, begin, end, loop_body,
+ INTPTR_PARAMETERS, ForEachDirection::kForward);
Goto(&finished);
}
BIND(&iterate_backward);
{
- BuildFastFixedArrayForEach(elements, kind, begin, end, loop_body,
- INTPTR_PARAMETERS,
- ForEachDirection::kReverse);
+ BuildFastArrayForEach(elements, kind, begin, end, loop_body,
+ INTPTR_PARAMETERS, ForEachDirection::kReverse);
Goto(&finished);
}
}
@@ -4563,9 +4538,9 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
const TNode<IntPtrT> delta =
IntPtrMul(IntPtrSub(dst_index, src_index),
IntPtrConstant(ElementsKindToByteSize(kind)));
- BuildFastFixedArrayForEach(
+ BuildFastArrayForEach(
src_elements, kind, begin, end,
- [&](Node* array, Node* offset) {
+ [&](TNode<HeapObject> array, TNode<IntPtrT> offset) {
const TNode<AnyTaggedT> element = Load<AnyTaggedT>(array, offset);
const TNode<WordT> delta_offset = IntPtrAdd(offset, delta);
if (write_barrier == SKIP_WRITE_BARRIER) {
@@ -4757,12 +4732,6 @@ void CodeStubAssembler::CopyFixedArrayElements(
Comment("] CopyFixedArrayElements");
}
-TNode<JSAggregateError> CodeStubAssembler::HeapObjectToJSAggregateError(
- TNode<HeapObject> heap_object, Label* fail) {
- GotoIfNot(IsJSAggregateError(heap_object), fail);
- return UncheckedCast<JSAggregateError>(heap_object);
-}
-
TNode<FixedArray> CodeStubAssembler::HeapObjectToFixedArray(
TNode<HeapObject> base, Label* cast_fail) {
Label fixed_array(this);
@@ -4795,10 +4764,10 @@ void CodeStubAssembler::CopyPropertyArrayValues(TNode<HeapObject> from_array,
Node* start = IntPtrOrSmiConstant(0, mode);
ElementsKind kind = PACKED_ELEMENTS;
- BuildFastFixedArrayForEach(
+ BuildFastArrayForEach(
from_array, kind, start, property_count,
- [this, to_array, needs_write_barrier, destroy_source](Node* array,
- Node* offset) {
+ [this, to_array, needs_write_barrier, destroy_source](
+ TNode<HeapObject> array, TNode<IntPtrT> offset) {
TNode<AnyTaggedT> value = Load<AnyTaggedT>(array, offset);
if (destroy_source == DestroySource::kNo) {
@@ -4829,11 +4798,9 @@ void CodeStubAssembler::CopyPropertyArrayValues(TNode<HeapObject> from_array,
Comment("] CopyPropertyArrayValues");
}
-Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
- Node* offset,
- ElementsKind from_kind,
- ElementsKind to_kind,
- Label* if_hole) {
+Node* CodeStubAssembler::LoadElementAndPrepareForStore(
+ TNode<FixedArrayBase> array, TNode<IntPtrT> offset, ElementsKind from_kind,
+ ElementsKind to_kind, Label* if_hole) {
CSA_ASSERT(this, IsFixedArrayWithKind(array, from_kind));
if (IsDoubleElementsKind(from_kind)) {
TNode<Float64T> value =
@@ -4874,28 +4841,32 @@ TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
TNode<Smi> capacity = LoadFixedArrayBaseLength(elements);
- ParameterMode mode = OptimalParameterMode();
- return TryGrowElementsCapacity(
- object, elements, kind, TaggedToParameter(key, mode),
- TaggedToParameter(capacity, mode), mode, bailout);
+ return TryGrowElementsCapacity(object, elements, kind,
+ TaggedToParameter<BInt>(key),
+ TaggedToParameter<BInt>(capacity), bailout);
}
+template <typename TIndex>
TNode<FixedArrayBase> CodeStubAssembler::TryGrowElementsCapacity(
TNode<HeapObject> object, TNode<FixedArrayBase> elements, ElementsKind kind,
- Node* key, Node* capacity, ParameterMode mode, Label* bailout) {
+ TNode<TIndex> key, TNode<TIndex> capacity, Label* bailout) {
+ static_assert(
+ std::is_same<TIndex, Smi>::value || std::is_same<TIndex, IntPtrT>::value,
+ "Only Smi or IntPtrT key and capacity nodes are allowed");
Comment("TryGrowElementsCapacity");
CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode));
- CSA_SLOW_ASSERT(this, MatchesParameterMode(key, mode));
// If the gap growth is too big, fall back to the runtime.
- Node* max_gap = IntPtrOrSmiConstant(JSObject::kMaxGap, mode);
- Node* max_capacity = IntPtrOrSmiAdd(capacity, max_gap, mode);
- GotoIf(UintPtrOrSmiGreaterThanOrEqual(key, max_capacity, mode), bailout);
+ TNode<TIndex> max_gap = IntPtrOrSmiConstant<TIndex>(JSObject::kMaxGap);
+ TNode<TIndex> max_capacity = IntPtrOrSmiAdd(capacity, max_gap);
+ GotoIf(UintPtrOrSmiGreaterThanOrEqual(key, max_capacity), bailout);
// Calculate the capacity of the new backing store.
Node* new_capacity = CalculateNewElementsCapacity(
- IntPtrOrSmiAdd(key, IntPtrOrSmiConstant(1, mode), mode), mode);
+ IntPtrOrSmiAdd(key, IntPtrOrSmiConstant<TIndex>(1)));
+
+ ParameterMode mode =
+ std::is_same<TIndex, Smi>::value ? SMI_PARAMETERS : INTPTR_PARAMETERS;
return GrowElementsCapacity(object, elements, kind, kind, capacity,
new_capacity, mode, bailout);
}
@@ -6002,10 +5973,6 @@ TNode<BoolT> CodeStubAssembler::IsJSPrimitiveWrapperMap(SloppyTNode<Map> map) {
return IsJSPrimitiveWrapperInstanceType(LoadMapInstanceType(map));
}
-TNode<BoolT> CodeStubAssembler::IsJSAggregateError(TNode<HeapObject> object) {
- return HasInstanceType(object, JS_AGGREGATE_ERROR_TYPE);
-}
-
TNode<BoolT> CodeStubAssembler::IsJSArrayInstanceType(
SloppyTNode<Int32T> instance_type) {
return InstanceTypeEqual(instance_type, JS_ARRAY_TYPE);
@@ -6144,11 +6111,6 @@ TNode<BoolT> CodeStubAssembler::IsAccessorPair(SloppyTNode<HeapObject> object) {
return IsAccessorPairMap(LoadMap(object));
}
-TNode<BoolT> CodeStubAssembler::IsAllocationSite(
- SloppyTNode<HeapObject> object) {
- return IsAllocationSiteInstanceType(LoadInstanceType(object));
-}
-
TNode<BoolT> CodeStubAssembler::IsHeapNumber(SloppyTNode<HeapObject> object) {
return IsHeapNumberMap(LoadMap(object));
}
@@ -6329,11 +6291,6 @@ TNode<BoolT> CodeStubAssembler::IsJSFunctionInstanceType(
return InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsAllocationSiteInstanceType(
- SloppyTNode<Int32T> instance_type) {
- return InstanceTypeEqual(instance_type, ALLOCATION_SITE_TYPE);
-}
-
TNode<BoolT> CodeStubAssembler::IsJSFunction(SloppyTNode<HeapObject> object) {
return IsJSFunctionMap(LoadMap(object));
}
@@ -6844,13 +6801,13 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input,
TNode<Word32T> hash = Word32And(SmiToInt32(smi_input.value()), mask);
TNode<IntPtrT> entry_index =
Signed(ChangeUint32ToWord(Int32Add(hash, hash)));
- TNode<Object> smi_key = UnsafeLoadFixedArrayElement(
- number_string_cache, entry_index, 0, INTPTR_PARAMETERS);
+ TNode<Object> smi_key =
+ UnsafeLoadFixedArrayElement(number_string_cache, entry_index);
GotoIf(TaggedNotEqual(smi_key, smi_input.value()), bailout);
// Smi match, return value from cache entry.
result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, entry_index,
- kTaggedSize, INTPTR_PARAMETERS));
+ kTaggedSize));
Goto(&done);
}
BIND(&done);
@@ -7297,6 +7254,12 @@ TNode<Number> CodeStubAssembler::ToLength_Inline(SloppyTNode<Context> context,
[=] { return CAST(CallBuiltin(Builtins::kToLength, context, input)); });
}
+TNode<Object> CodeStubAssembler::OrdinaryToPrimitive(
+ TNode<Context> context, TNode<Object> input, OrdinaryToPrimitiveHint hint) {
+ Callable callable = CodeFactory::OrdinaryToPrimitive(isolate(), hint);
+ return CallStub(callable, context, input);
+}
+
TNode<Uint32T> CodeStubAssembler::DecodeWord32(SloppyTNode<Word32T> word32,
uint32_t shift, uint32_t mask) {
DCHECK_EQ((mask >> shift) << shift, mask);
@@ -8422,16 +8385,6 @@ void CodeStubAssembler::Lookup(TNode<Name> unique_name, TNode<Array> array,
}
}
-TNode<BoolT> CodeStubAssembler::IsSimpleObjectMap(TNode<Map> map) {
- uint32_t mask = Map::Bits1::HasNamedInterceptorBit::kMask |
- Map::Bits1::IsAccessCheckNeededBit::kMask;
- // !IsSpecialReceiverType && !IsNamedInterceptor && !IsAccessCheckNeeded
- return Select<BoolT>(
- IsSpecialReceiverInstanceType(LoadMapInstanceType(map)),
- [=] { return Int32FalseConstant(); },
- [=] { return IsClearWord32(LoadMapBitField(map), mask); });
-}
-
void CodeStubAssembler::TryLookupPropertyInSimpleObject(
TNode<JSObject> object, TNode<Map> map, TNode<Name> unique_name,
Label* if_found_fast, Label* if_found_dict,
@@ -10009,9 +9962,8 @@ Node* CodeStubAssembler::CheckForCapacityGrow(
GotoIf(UintPtrLessThan(key, current_capacity), &fits_capacity);
{
- Node* new_elements =
- TryGrowElementsCapacity(object, elements, kind, key, current_capacity,
- INTPTR_PARAMETERS, &grow_bailout);
+ Node* new_elements = TryGrowElementsCapacity(
+ object, elements, kind, key, current_capacity, &grow_bailout);
checked_elements.Bind(new_elements);
Goto(&fits_capacity);
}
@@ -10316,10 +10268,10 @@ template TNode<UintPtrT> CodeStubAssembler::BuildFastLoop<UintPtrT>(
TNode<UintPtrT> end_index, const FastLoopBody<UintPtrT>& body,
int increment, IndexAdvanceMode advance_mode);
-void CodeStubAssembler::BuildFastFixedArrayForEach(
+void CodeStubAssembler::BuildFastArrayForEach(
const CodeStubAssembler::VariableList& vars, Node* fixed_array,
ElementsKind kind, Node* first_element_inclusive,
- Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
+ Node* last_element_exclusive, const FastArrayForEachBody& body,
ParameterMode mode, ForEachDirection direction) {
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
CSA_SLOW_ASSERT(this, MatchesParameterMode(first_element_inclusive, mode));
@@ -10339,14 +10291,14 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
TNode<IntPtrT> index = IntPtrConstant(i);
TNode<IntPtrT> offset = ElementOffsetFromIndex(
index, kind, FixedArray::kHeaderSize - kHeapObjectTag);
- body(fixed_array, offset);
+ body(CAST(fixed_array), offset);
}
} else {
for (int i = last_val - 1; i >= first_val; --i) {
TNode<IntPtrT> index = IntPtrConstant(i);
TNode<IntPtrT> offset = ElementOffsetFromIndex(
index, kind, FixedArray::kHeaderSize - kHeapObjectTag);
- body(fixed_array, offset);
+ body(CAST(fixed_array), offset);
}
}
return;
@@ -10364,7 +10316,7 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
int increment = IsDoubleElementsKind(kind) ? kDoubleSize : kTaggedSize;
BuildFastLoop<IntPtrT>(
vars, start, limit,
- [&](TNode<IntPtrT> offset) { body(fixed_array, offset); },
+ [&](TNode<IntPtrT> offset) { body(CAST(fixed_array), offset); },
direction == ForEachDirection::kReverse ? -increment : increment,
direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre
: IndexAdvanceMode::kPost);
@@ -11013,7 +10965,7 @@ void CodeStubAssembler::GenerateEqual_Same(SloppyTNode<Object> value,
BIND(&if_boolean);
{
- CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kBoolean);
Goto(if_equal);
}
@@ -11095,60 +11047,75 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_left_smi);
{
Label if_right_smi(this), if_right_not_smi(this);
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kSignedSmall);
Branch(TaggedIsSmi(right), &if_right_smi, &if_right_not_smi);
BIND(&if_right_smi);
{
// We have already checked for {left} and {right} being the same value,
// so when we get here they must be different Smis.
- CombineFeedback(var_type_feedback,
- CompareOperationFeedback::kSignedSmall);
Goto(&if_notequal);
}
BIND(&if_right_not_smi);
- TNode<Map> right_map = LoadMap(CAST(right));
- Label if_right_heapnumber(this), if_right_boolean(this),
- if_right_bigint(this, Label::kDeferred),
- if_right_receiver(this, Label::kDeferred);
- GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
- // {left} is Smi and {right} is not HeapNumber or Smi.
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
- GotoIf(IsBooleanMap(right_map), &if_right_boolean);
- TNode<Uint16T> right_type = LoadMapInstanceType(right_map);
- GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber);
- GotoIf(IsBigIntInstanceType(right_type), &if_right_bigint);
- Branch(IsJSReceiverInstanceType(right_type), &if_right_receiver,
- &if_notequal);
-
- BIND(&if_right_heapnumber);
{
- var_left_float = SmiToFloat64(CAST(left));
- var_right_float = LoadHeapNumberValue(CAST(right));
- CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
- Goto(&do_float_comparison);
- }
+ TNode<Map> right_map = LoadMap(CAST(right));
+ Label if_right_heapnumber(this), if_right_boolean(this),
+ if_right_oddball(this), if_right_bigint(this, Label::kDeferred),
+ if_right_receiver(this, Label::kDeferred);
+ GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
- BIND(&if_right_boolean);
- {
- var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
- Goto(&loop);
- }
+ // {left} is Smi and {right} is not HeapNumber or Smi.
+ TNode<Uint16T> right_type = LoadMapInstanceType(right_map);
+ GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber);
+ GotoIf(IsOddballInstanceType(right_type), &if_right_oddball);
+ GotoIf(IsBigIntInstanceType(right_type), &if_right_bigint);
+ GotoIf(IsJSReceiverInstanceType(right_type), &if_right_receiver);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ Goto(&if_notequal);
- BIND(&if_right_bigint);
- {
- result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber,
- NoContextConstant(), right, left));
- Goto(&end);
- }
+ BIND(&if_right_heapnumber);
+ {
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
+ var_left_float = SmiToFloat64(CAST(left));
+ var_right_float = LoadHeapNumberValue(CAST(right));
+ Goto(&do_float_comparison);
+ }
- BIND(&if_right_receiver);
- {
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
- var_right = CallStub(callable, context, right);
- Goto(&loop);
+ BIND(&if_right_oddball);
+ {
+ Label if_right_boolean(this);
+ GotoIf(IsBooleanMap(right_map), &if_right_boolean);
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kOddball);
+ Goto(&if_notequal);
+
+ BIND(&if_right_boolean);
+ {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kBoolean);
+ var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
+ Goto(&loop);
+ }
+ }
+
+ BIND(&if_right_bigint);
+ {
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
+ result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber,
+ NoContextConstant(), right, left));
+ Goto(&end);
+ }
+
+ BIND(&if_right_receiver);
+ {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kReceiver);
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_right = CallStub(callable, context, right);
+ Goto(&loop);
+ }
}
}
@@ -11187,29 +11154,41 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_left_number);
{
Label if_right_not_number(this);
+
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
GotoIf(Word32NotEqual(left_type, right_type), &if_right_not_number);
var_left_float = LoadHeapNumberValue(CAST(left));
var_right_float = LoadHeapNumberValue(CAST(right));
- CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
Goto(&do_float_comparison);
BIND(&if_right_not_number);
{
- Label if_right_boolean(this);
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ Label if_right_oddball(this);
+
GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber);
- GotoIf(IsBooleanMap(right_map), &if_right_boolean);
+ GotoIf(IsOddballInstanceType(right_type), &if_right_oddball);
GotoIf(IsBigIntInstanceType(right_type), &use_symmetry);
- Branch(IsJSReceiverInstanceType(right_type), &use_symmetry,
- &if_notequal);
+ GotoIf(IsJSReceiverInstanceType(right_type), &use_symmetry);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ Goto(&if_notequal);
- BIND(&if_right_boolean);
+ BIND(&if_right_oddball);
{
- var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
- Goto(&loop);
+ Label if_right_boolean(this);
+ GotoIf(IsBooleanMap(right_map), &if_right_boolean);
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kOddball);
+ Goto(&if_notequal);
+
+ BIND(&if_right_boolean);
+ {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kBoolean);
+ var_right =
+ LoadObjectField(CAST(right), Oddball::kToNumberOffset);
+ Goto(&loop);
+ }
}
}
}
@@ -11218,6 +11197,8 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
{
Label if_right_heapnumber(this), if_right_bigint(this),
if_right_string(this), if_right_boolean(this);
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
+
GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber);
GotoIf(IsBigIntInstanceType(right_type), &if_right_bigint);
GotoIf(IsStringInstanceType(right_type), &if_right_string);
@@ -11227,9 +11208,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_right_heapnumber);
{
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber,
NoContextConstant(), left, right));
Goto(&end);
@@ -11237,7 +11216,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_right_bigint);
{
- CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt);
+ // We already have BigInt feedback.
result = CAST(CallRuntime(Runtime::kBigIntEqualToBigInt,
NoContextConstant(), left, right));
Goto(&end);
@@ -11245,9 +11224,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_right_string);
{
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kString);
result = CAST(CallRuntime(Runtime::kBigIntEqualToString,
NoContextConstant(), left, right));
Goto(&end);
@@ -11255,9 +11232,8 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_right_boolean);
{
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kBoolean);
var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset);
Goto(&loop);
}
@@ -11266,35 +11242,60 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_left_oddball);
{
Label if_left_boolean(this), if_left_not_boolean(this);
- Branch(IsBooleanMap(left_map), &if_left_boolean, &if_left_not_boolean);
+ GotoIf(IsBooleanMap(left_map), &if_left_boolean);
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kNullOrUndefined);
+ GotoIf(IsUndetectableMap(left_map), &if_left_not_boolean);
+ }
+ Goto(&if_left_not_boolean);
BIND(&if_left_not_boolean);
{
// {left} is either Null or Undefined. Check if {right} is
// undetectable (which includes Null and Undefined).
- Label if_right_undetectable(this), if_right_not_undetectable(this);
- Branch(IsUndetectableMap(right_map), &if_right_undetectable,
- &if_right_not_undetectable);
+ Label if_right_undetectable(this), if_right_number(this),
+ if_right_oddball(this),
+ if_right_not_number_or_oddball_or_undetectable(this);
+ GotoIf(IsUndetectableMap(right_map), &if_right_undetectable);
+ GotoIf(IsHeapNumberInstanceType(right_type), &if_right_number);
+ GotoIf(IsOddballInstanceType(right_type), &if_right_oddball);
+ Goto(&if_right_not_number_or_oddball_or_undetectable);
BIND(&if_right_undetectable);
{
- if (var_type_feedback != nullptr) {
- // If {right} is undetectable, it must be either also
- // Null or Undefined, or a Receiver (aka document.all).
- *var_type_feedback = SmiConstant(
- CompareOperationFeedback::kReceiverOrNullOrUndefined);
- }
+ // If {right} is undetectable, it must be either also
+ // Null or Undefined, or a Receiver (aka document.all).
+ CombineFeedback(
+ var_type_feedback,
+ CompareOperationFeedback::kReceiverOrNullOrUndefined);
Goto(&if_equal);
}
- BIND(&if_right_not_undetectable);
+ BIND(&if_right_number);
+ {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kNumber);
+ Goto(&if_notequal);
+ }
+
+ BIND(&if_right_oddball);
+ {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kOddball);
+ Goto(&if_notequal);
+ }
+
+ BIND(&if_right_not_number_or_oddball_or_undetectable);
{
if (var_type_feedback != nullptr) {
// Track whether {right} is Null, Undefined or Receiver.
- *var_type_feedback = SmiConstant(
+ CombineFeedback(
+ var_type_feedback,
CompareOperationFeedback::kReceiverOrNullOrUndefined);
GotoIf(IsJSReceiverInstanceType(right_type), &if_notequal);
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kAny);
}
Goto(&if_notequal);
}
@@ -11302,9 +11303,8 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&if_left_boolean);
{
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kBoolean);
// If {right} is a Boolean too, it must be a different Boolean.
GotoIf(TaggedEqual(right_map, left_map), &if_notequal);
@@ -11387,9 +11387,7 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
{
// {right} is a Primitive, and neither Null or Undefined;
// convert {left} to Primitive too.
- if (var_type_feedback != nullptr) {
- *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny);
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny);
Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
var_left = CallStub(callable, context, left);
Goto(&loop);
@@ -11400,6 +11398,12 @@ TNode<Oddball> CodeStubAssembler::Equal(SloppyTNode<Object> left,
BIND(&do_right_stringtonumber);
{
+ if (var_type_feedback != nullptr) {
+ TNode<Map> right_map = LoadMap(CAST(right));
+ TNode<Uint16T> right_type = LoadMapInstanceType(right_map);
+ CombineFeedback(var_type_feedback,
+ CollectFeedbackForString(right_type));
+ }
var_right = CallBuiltin(Builtins::kStringToNumber, context, right);
Goto(&loop);
}
@@ -11678,15 +11682,47 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(
BIND(&if_lhsisoddball);
{
- STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == ODDBALL_TYPE);
- GotoIf(IsBooleanMap(rhs_map), &if_not_equivalent_types);
- GotoIf(Int32LessThan(rhs_instance_type,
- Int32Constant(ODDBALL_TYPE)),
- &if_not_equivalent_types);
- OverwriteFeedback(
- var_type_feedback,
- CompareOperationFeedback::kReceiverOrNullOrUndefined);
- Goto(&if_notequal);
+ Label if_lhsisboolean(this), if_lhsisnotboolean(this);
+ Branch(IsBooleanMap(lhs_map), &if_lhsisboolean,
+ &if_lhsisnotboolean);
+
+ BIND(&if_lhsisboolean);
+ {
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kNumberOrOddball);
+ GotoIf(IsBooleanMap(rhs_map), &if_notequal);
+ Goto(&if_not_equivalent_types);
+ }
+
+ BIND(&if_lhsisnotboolean);
+ {
+ Label if_rhsisheapnumber(this), if_rhsisnotheapnumber(this);
+
+ STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE ==
+ ODDBALL_TYPE);
+ GotoIf(Int32LessThan(rhs_instance_type,
+ Int32Constant(ODDBALL_TYPE)),
+ &if_not_equivalent_types);
+
+ Branch(IsHeapNumberMap(rhs_map), &if_rhsisheapnumber,
+ &if_rhsisnotheapnumber);
+
+ BIND(&if_rhsisheapnumber);
+ {
+ OverwriteFeedback(
+ var_type_feedback,
+ CompareOperationFeedback::kNumberOrOddball);
+ Goto(&if_not_equivalent_types);
+ }
+
+ BIND(&if_rhsisnotheapnumber);
+ {
+ OverwriteFeedback(
+ var_type_feedback,
+ CompareOperationFeedback::kReceiverOrNullOrUndefined);
+ Goto(&if_notequal);
+ }
+ }
}
BIND(&if_lhsissymbol);
@@ -11742,7 +11778,14 @@ TNode<Oddball> CodeStubAssembler::StrictEqual(
}
BIND(&if_rhsisnotnumber);
- Goto(&if_not_equivalent_types);
+ {
+ TNode<Uint16T> rhs_instance_type = LoadMapInstanceType(rhs_map);
+ GotoIfNot(IsOddballInstanceType(rhs_instance_type),
+ &if_not_equivalent_types);
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kNumberOrOddball);
+ Goto(&if_notequal);
+ }
}
}
}
@@ -12380,28 +12423,6 @@ TNode<Number> CodeStubAssembler::BitwiseOp(TNode<Word32T> left32,
UNREACHABLE();
}
-// ES #sec-createarrayiterator
-TNode<JSArrayIterator> CodeStubAssembler::CreateArrayIterator(
- TNode<Context> context, TNode<Object> object, IterationKind kind) {
- TNode<NativeContext> native_context = LoadNativeContext(context);
- TNode<Map> iterator_map = CAST(LoadContextElement(
- native_context, Context::INITIAL_ARRAY_ITERATOR_MAP_INDEX));
- TNode<HeapObject> iterator = Allocate(JSArrayIterator::kHeaderSize);
- StoreMapNoWriteBarrier(iterator, iterator_map);
- StoreObjectFieldRoot(iterator, JSArrayIterator::kPropertiesOrHashOffset,
- RootIndex::kEmptyFixedArray);
- StoreObjectFieldRoot(iterator, JSArrayIterator::kElementsOffset,
- RootIndex::kEmptyFixedArray);
- StoreObjectFieldNoWriteBarrier(
- iterator, JSArrayIterator::kIteratedObjectOffset, object);
- StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
- SmiConstant(0));
- StoreObjectFieldNoWriteBarrier(
- iterator, JSArrayIterator::kKindOffset,
- SmiConstant(Smi::FromInt(static_cast<int>(kind))));
- return CAST(iterator);
-}
-
TNode<JSObject> CodeStubAssembler::AllocateJSIteratorResult(
SloppyTNode<Context> context, SloppyTNode<Object> value,
SloppyTNode<Oddball> done) {
@@ -12508,10 +12529,8 @@ TNode<UintPtrT> CodeStubAssembler::LoadJSTypedArrayLength(
}
CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler,
- TNode<IntPtrT> argc, TNode<RawPtrT> fp,
- ReceiverMode receiver_mode)
+ TNode<IntPtrT> argc, TNode<RawPtrT> fp)
: assembler_(assembler),
- receiver_mode_(receiver_mode),
argc_(argc),
base_(),
fp_(fp != nullptr ? fp : assembler_->LoadFramePointer()) {
@@ -12531,7 +12550,6 @@ CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler,
}
TNode<Object> CodeStubArguments::GetReceiver() const {
- DCHECK_EQ(receiver_mode_, ReceiverMode::kHasReceiver);
#ifdef V8_REVERSE_JSARGS
intptr_t offset = -kSystemPointerSize;
#else
@@ -12541,7 +12559,6 @@ TNode<Object> CodeStubArguments::GetReceiver() const {
}
void CodeStubArguments::SetReceiver(TNode<Object> object) const {
- DCHECK_EQ(receiver_mode_, ReceiverMode::kHasReceiver);
#ifdef V8_REVERSE_JSARGS
intptr_t offset = -kSystemPointerSize;
#else
@@ -12575,26 +12592,6 @@ TNode<Object> CodeStubArguments::AtIndex(int index) const {
}
TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
- int index, TNode<Object> default_value) {
- CodeStubAssembler::TVariable<Object> result(assembler_);
- CodeStubAssembler::Label argument_missing(assembler_),
- argument_done(assembler_, &result);
-
- assembler_->GotoIf(assembler_->UintPtrGreaterThanOrEqual(
- assembler_->IntPtrConstant(index), argc_),
- &argument_missing);
- result = AtIndex(index);
- assembler_->Goto(&argument_done);
-
- assembler_->BIND(&argument_missing);
- result = default_value;
- assembler_->Goto(&argument_done);
-
- assembler_->BIND(&argument_done);
- return result.value();
-}
-
-TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
TNode<IntPtrT> index, TNode<Object> default_value) {
CodeStubAssembler::TVariable<Object> result(assembler_);
CodeStubAssembler::Label argument_missing(assembler_),
@@ -12641,13 +12638,8 @@ void CodeStubArguments::ForEach(
}
void CodeStubArguments::PopAndReturn(TNode<Object> value) {
- TNode<IntPtrT> pop_count;
- if (receiver_mode_ == ReceiverMode::kHasReceiver) {
- pop_count = assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1));
- } else {
- pop_count = argc_;
- }
-
+ TNode<IntPtrT> pop_count =
+ assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1));
assembler_->PopAndReturn(pop_count, value);
}
@@ -13085,17 +13077,9 @@ TNode<Object> CodeStubAssembler::CallApiCallback(
TNode<Object> context, TNode<RawPtrT> callback, TNode<IntPtrT> argc,
TNode<Object> data, TNode<Object> holder, TNode<Object> receiver,
TNode<Object> value) {
- // CallApiCallback receives the first four arguments in registers
- // (callback, argc, data and holder). The last arguments are in the stack in
- // JS ordering. See ApiCallbackDescriptor.
Callable callable = CodeFactory::CallApiCallback(isolate());
-#ifdef V8_REVERSE_JSARGS
- return CallStub(callable, context, callback, argc, data, holder, value,
- receiver);
-#else
return CallStub(callable, context, callback, argc, data, holder, receiver,
value);
-#endif
}
TNode<Object> CodeStubAssembler::CallRuntimeNewArray(
diff --git a/chromium/v8/src/codegen/code-stub-assembler.h b/chromium/v8/src/codegen/code-stub-assembler.h
index b01729c73db..a1369993994 100644
--- a/chromium/v8/src/codegen/code-stub-assembler.h
+++ b/chromium/v8/src/codegen/code-stub-assembler.h
@@ -107,6 +107,10 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(TypedArraySpeciesProtector, typed_array_species_protector, \
TypedArraySpeciesProtector)
+#define UNIQUE_INSTANCE_TYPE_IMMUTABLE_IMMOVABLE_MAP_ADAPTER( \
+ V, rootIndexName, rootAccessorName, class_name) \
+ V(rootIndexName, rootAccessorName, class_name##Map)
+
#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \
V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
@@ -137,6 +141,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
EmptySlowElementDictionary) \
V(empty_string, empty_string, EmptyString) \
V(error_to_string, error_to_string, ErrorToString) \
+ V(errors_string, errors_string, ErrorsString) \
V(FalseValue, false_value, False) \
V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \
V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
@@ -168,7 +173,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
V(null_to_string, null_to_string, NullToString) \
V(NullValue, null_value, Null) \
- V(number_string, number_string, numberString) \
+ V(number_string, number_string, NumberString) \
V(number_to_string, number_to_string, NumberToString) \
V(Object_string, Object_string, ObjectString) \
V(object_to_string, object_to_string, ObjectToString) \
@@ -195,8 +200,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(resolve_string, resolve_string, ResolveString) \
V(return_string, return_string, ReturnString) \
V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \
- V(SloppyArgumentsElementsMap, sloppy_arguments_elements_map, \
- SloppyArgumentsElementsMap) \
V(SmallOrderedHashSetMap, small_ordered_hash_set_map, \
SmallOrderedHashSetMap) \
V(SmallOrderedHashMapMap, small_ordered_hash_map_map, \
@@ -212,6 +215,8 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(SymbolMap, symbol_map, SymbolMap) \
V(TheHoleValue, the_hole_value, TheHole) \
V(then_string, then_string, ThenString) \
+ V(toString_string, toString_string, ToStringString) \
+ V(to_primitive_symbol, to_primitive_symbol, ToPrimitiveSymbol) \
V(to_string_tag_symbol, to_string_tag_symbol, ToStringTagSymbol) \
V(TransitionArrayMap, transition_array_map, TransitionArrayMap) \
V(TrueValue, true_value, True) \
@@ -228,9 +233,11 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(undefined_to_string, undefined_to_string, UndefinedToString) \
V(UndefinedValue, undefined_value, Undefined) \
V(uninitialized_symbol, uninitialized_symbol, UninitializedSymbol) \
+ V(valueOf_string, valueOf_string, ValueOfString) \
V(WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArrayMap) \
V(zero_string, zero_string, ZeroString) \
- TORQUE_INTERNAL_MAP_CSA_LIST(V)
+ UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR( \
+ UNIQUE_INSTANCE_TYPE_IMMUTABLE_IMMOVABLE_MAP_ADAPTER, V)
#define HEAP_IMMOVABLE_OBJECT_LIST(V) \
HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \
@@ -367,15 +374,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
#endif
}
- MachineRepresentation ParameterRepresentation(ParameterMode mode) const {
- return mode == INTPTR_PARAMETERS ? MachineType::PointerRepresentation()
- : MachineRepresentation::kTaggedSigned;
- }
-
- MachineRepresentation OptimalParameterRepresentation() const {
- return ParameterRepresentation(OptimalParameterMode());
- }
-
TNode<IntPtrT> ParameterToIntPtr(TNode<Smi> value) { return SmiUntag(value); }
TNode<IntPtrT> ParameterToIntPtr(TNode<IntPtrT> value) { return value; }
// TODO(v8:9708): remove once all uses are ported.
@@ -384,24 +382,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return UncheckedCast<IntPtrT>(value);
}
- template <typename TIndex>
- TNode<TIndex> IntPtrToParameter(TNode<IntPtrT> value);
+ TNode<Smi> ParameterToTagged(TNode<Smi> value) { return value; }
- Node* IntPtrToParameter(SloppyTNode<IntPtrT> value, ParameterMode mode) {
- if (mode == SMI_PARAMETERS) return SmiTag(value);
- return value;
- }
-
- Node* Int32ToParameter(SloppyTNode<Int32T> value, ParameterMode mode) {
- return IntPtrToParameter(ChangeInt32ToIntPtr(value), mode);
- }
+ TNode<Smi> ParameterToTagged(TNode<IntPtrT> value) { return SmiTag(value); }
TNode<Smi> ParameterToTagged(Node* value, ParameterMode mode) {
if (mode != SMI_PARAMETERS) return SmiTag(value);
return UncheckedCast<Smi>(value);
}
- Node* TaggedToParameter(SloppyTNode<Smi> value, ParameterMode mode) {
+ template <typename TIndex>
+ TNode<TIndex> TaggedToParameter(TNode<Smi> value);
+
+ // TODO(v8:9708): remove once all uses are ported.
+ Node* TaggedToParameter(TNode<Smi> value, ParameterMode mode) {
if (mode != SMI_PARAMETERS) return SmiUntag(value);
return value;
}
@@ -481,19 +475,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return UncheckedCast<HeapObject>(value);
}
- TNode<JSAggregateError> HeapObjectToJSAggregateError(
- TNode<HeapObject> heap_object, Label* fail);
-
- TNode<JSArray> HeapObjectToJSArray(TNode<HeapObject> heap_object,
- Label* fail) {
- GotoIfNot(IsJSArray(heap_object), fail);
- return UncheckedCast<JSArray>(heap_object);
- }
-
- TNode<JSArrayBuffer> HeapObjectToJSArrayBuffer(TNode<HeapObject> heap_object,
- Label* fail) {
- GotoIfNot(IsJSArrayBuffer(heap_object), fail);
- return UncheckedCast<JSArrayBuffer>(heap_object);
+ TNode<Uint16T> Uint16Constant(uint16_t t) {
+ return UncheckedCast<Uint16T>(Int32Constant(t));
}
TNode<JSArray> TaggedToFastJSArray(TNode<Context> context,
@@ -659,11 +642,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// TODO(v8:9708): remove once all uses are ported.
Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
- bool IsIntPtrOrSmiConstantZero(TNode<Smi> test);
- bool IsIntPtrOrSmiConstantZero(TNode<IntPtrT> test);
- // TODO(v8:9708): remove once all uses are ported.
- bool IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode);
-
bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value,
ParameterMode mode);
@@ -777,15 +755,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
}
- Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) {
- if (mode == SMI_PARAMETERS) {
- return SmiShl(CAST(a), shift);
- } else {
- DCHECK_EQ(INTPTR_PARAMETERS, mode);
- return WordShl(a, shift);
- }
- }
-
Node* WordOrSmiShr(Node* a, int shift, ParameterMode mode) {
if (mode == SMI_PARAMETERS) {
return SmiShr(CAST(a), shift);
@@ -1437,11 +1406,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// This doesn't emit a bounds-check. As part of the security-performance
// tradeoff, only use it if it is performance critical.
TNode<Object> UnsafeLoadFixedArrayElement(
- TNode<FixedArray> object, Node* index, int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS,
+ TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset = 0,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
return LoadFixedArrayElement(object, index, additional_offset,
- parameter_mode, needs_poisoning,
+ INTPTR_PARAMETERS, needs_poisoning,
CheckBounds::kDebugOnly);
}
@@ -1452,14 +1420,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return LoadFixedArrayElement(object, index, 0, INTPTR_PARAMETERS,
needs_poisoning, check_bounds);
}
- // This doesn't emit a bounds-check. As part of the security-performance
- // tradeoff, only use it if it is performance critical.
- TNode<Object> UnsafeLoadFixedArrayElement(TNode<FixedArray> object,
- TNode<IntPtrT> index,
- LoadSensitivity needs_poisoning) {
- return LoadFixedArrayElement(object, index, needs_poisoning,
- CheckBounds::kDebugOnly);
- }
TNode<Object> LoadFixedArrayElement(
TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset = 0,
@@ -1514,18 +1474,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
// Load an array element from a WeakFixedArray.
- TNode<MaybeObject> LoadWeakFixedArrayElement(
- TNode<WeakFixedArray> object, Node* index, int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
-
- TNode<MaybeObject> LoadWeakFixedArrayElement(
- TNode<WeakFixedArray> object, int index, int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return LoadWeakFixedArrayElement(object, IntPtrConstant(index),
- additional_offset, INTPTR_PARAMETERS,
- needs_poisoning);
- }
+ TNode<MaybeObject> LoadWeakFixedArrayElement(TNode<WeakFixedArray> object,
+ TNode<IntPtrT> index,
+ int additional_offset = 0);
// Load an array element from a FixedDoubleArray.
TNode<Float64T> LoadFixedDoubleArrayElement(
@@ -1845,9 +1796,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Int32T> EnsureArrayPushable(TNode<Context> context, TNode<Map> map,
Label* bailout);
- void TryStoreArrayElement(ElementsKind kind, ParameterMode mode,
- Label* bailout, TNode<FixedArrayBase> elements,
- Node* index, TNode<Object> value);
+ void TryStoreArrayElement(ElementsKind kind, Label* bailout,
+ TNode<FixedArrayBase> elements, TNode<BInt> index,
+ TNode<Object> value);
// Consumes args into the array, and returns tagged new length.
TNode<Smi> BuildAppendJSArray(ElementsKind kind, TNode<JSArray> array,
CodeStubArguments* args,
@@ -1961,45 +1912,43 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
//
// Allocate and return a JSArray with initialized header fields and its
// uninitialized elements.
- // The ParameterMode argument is only used for the capacity parameter.
std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
- TNode<AllocationSite> allocation_site, TNode<IntPtrT> capacity,
- AllocationFlags allocation_flags = kNone,
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ TNode<IntPtrT> capacity, AllocationFlags allocation_flags = kNone,
int array_header_size = JSArray::kHeaderSize);
// Allocate a JSArray and fill elements with the hole.
- TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
- TNode<IntPtrT> capacity, TNode<Smi> length,
- TNode<AllocationSite> allocation_site,
- AllocationFlags allocation_flags = kNone);
- TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
- TNode<Smi> capacity, TNode<Smi> length,
- TNode<AllocationSite> allocation_site,
- AllocationFlags allocation_flags = kNone) {
+ TNode<JSArray> AllocateJSArray(
+ ElementsKind kind, TNode<Map> array_map, TNode<IntPtrT> capacity,
+ TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
+ AllocationFlags allocation_flags = kNone);
+ TNode<JSArray> AllocateJSArray(
+ ElementsKind kind, TNode<Map> array_map, TNode<Smi> capacity,
+ TNode<Smi> length, base::Optional<TNode<AllocationSite>> allocation_site,
+ AllocationFlags allocation_flags = kNone) {
return AllocateJSArray(kind, array_map, SmiUntag(capacity), length,
allocation_site, allocation_flags);
}
TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
TNode<Smi> capacity, TNode<Smi> length,
AllocationFlags allocation_flags = kNone) {
- return AllocateJSArray(kind, array_map, SmiUntag(capacity), length, {},
- allocation_flags);
+ return AllocateJSArray(kind, array_map, SmiUntag(capacity), length,
+ base::nullopt, allocation_flags);
}
TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
TNode<IntPtrT> capacity, TNode<Smi> length,
AllocationFlags allocation_flags = kNone) {
- return AllocateJSArray(kind, array_map, capacity, length, {},
+ return AllocateJSArray(kind, array_map, capacity, length, base::nullopt,
allocation_flags);
}
// Allocate a JSArray and initialize the header fields.
- TNode<JSArray> AllocateJSArray(TNode<Map> array_map,
- TNode<FixedArrayBase> elements,
- TNode<Smi> length,
- TNode<AllocationSite> allocation_site = {},
- int array_header_size = JSArray::kHeaderSize);
+ TNode<JSArray> AllocateJSArray(
+ TNode<Map> array_map, TNode<FixedArrayBase> elements, TNode<Smi> length,
+ base::Optional<TNode<AllocationSite>> allocation_site = base::nullopt,
+ int array_header_size = JSArray::kHeaderSize);
enum class HoleConversionMode { kDontConvert, kConvertToUndefined };
// Clone a fast JSArray |array| into a new fast JSArray.
@@ -2014,15 +1963,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// function generates significantly less code in this case.
TNode<JSArray> CloneFastJSArray(
TNode<Context> context, TNode<JSArray> array,
- TNode<AllocationSite> allocation_site = {},
+ base::Optional<TNode<AllocationSite>> allocation_site = base::nullopt,
HoleConversionMode convert_holes = HoleConversionMode::kDontConvert);
TNode<JSArray> ExtractFastJSArray(TNode<Context> context,
- TNode<JSArray> array, Node* begin,
- Node* count,
- ParameterMode mode = INTPTR_PARAMETERS,
- Node* capacity = nullptr,
- TNode<AllocationSite> allocation_site = {});
+ TNode<JSArray> array, TNode<BInt> begin,
+ TNode<BInt> count);
TNode<FixedArrayBase> AllocateFixedArray(
ElementsKind kind, Node* capacity, ParameterMode mode = INTPTR_PARAMETERS,
@@ -2093,11 +2039,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* capacity, ParameterMode mode = INTPTR_PARAMETERS,
AllocationFlags flags = kNone);
- // Perform CreateArrayIterator (ES #sec-createarrayiterator).
- TNode<JSArrayIterator> CreateArrayIterator(TNode<Context> context,
- TNode<Object> object,
- IterationKind mode);
-
// TODO(v8:9722): Return type should be JSIteratorResult
TNode<JSObject> AllocateJSIteratorResult(SloppyTNode<Context> context,
SloppyTNode<Object> value,
@@ -2234,17 +2175,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return UncheckedCast<FixedDoubleArray>(base);
}
- TNode<SloppyArgumentsElements> HeapObjectToSloppyArgumentsElements(
- TNode<HeapObject> base, Label* cast_fail) {
- GotoIf(TaggedNotEqual(LoadMap(base), SloppyArgumentsElementsMapConstant()),
- cast_fail);
- return UncheckedCast<SloppyArgumentsElements>(base);
- }
-
TNode<Int32T> ConvertElementsKindToInt(TNode<Int32T> elements_kind) {
return UncheckedCast<Int32T>(elements_kind);
}
+ template <typename T>
+ bool ClassHasMapConstant() {
+ return false;
+ }
+
+ template <typename T>
+ TNode<Map> GetClassMapConstant() {
+ UNREACHABLE();
+ return TNode<Map>();
+ }
+
enum class ExtractFixedArrayFlag {
kFixedArrays = 1,
kFixedDoubleArrays = 2,
@@ -2295,20 +2240,32 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<FixedArrayBase> ExtractFixedArray(
TNode<FixedArrayBase> source, TNode<Smi> first, TNode<Smi> count,
- TNode<Smi> capacity,
+ base::Optional<TNode<Smi>> capacity,
ExtractFixedArrayFlags extract_flags =
- ExtractFixedArrayFlag::kAllFixedArrays) {
- return ExtractFixedArray(source, first, count, capacity, extract_flags,
- SMI_PARAMETERS);
+ ExtractFixedArrayFlag::kAllFixedArrays,
+ TVariable<BoolT>* var_holes_converted = nullptr,
+ base::Optional<TNode<Int32T>> source_elements_kind = base::nullopt) {
+ // TODO(solanes): just use capacity when ExtractFixedArray is fully
+ // converted.
+ Node* capacity_node = capacity ? static_cast<Node*>(*capacity) : nullptr;
+ return ExtractFixedArray(source, first, count, capacity_node, extract_flags,
+ SMI_PARAMETERS, var_holes_converted,
+ source_elements_kind);
}
- TNode<FixedArray> ExtractFixedArray(
- TNode<FixedArray> source, TNode<IntPtrT> first, TNode<IntPtrT> count,
- TNode<IntPtrT> capacity,
+ TNode<FixedArrayBase> ExtractFixedArray(
+ TNode<FixedArrayBase> source, TNode<IntPtrT> first, TNode<IntPtrT> count,
+ base::Optional<TNode<IntPtrT>> capacity,
ExtractFixedArrayFlags extract_flags =
- ExtractFixedArrayFlag::kAllFixedArrays) {
- return CAST(ExtractFixedArray(source, first, count, capacity, extract_flags,
- INTPTR_PARAMETERS));
+ ExtractFixedArrayFlag::kAllFixedArrays,
+ TVariable<BoolT>* var_holes_converted = nullptr,
+ base::Optional<TNode<Int32T>> source_elements_kind = base::nullopt) {
+ // TODO(solanes): just use capacity when ExtractFixedArray is fully
+ // converted.
+ Node* capacity_node = capacity ? static_cast<Node*>(*capacity) : nullptr;
+ return ExtractFixedArray(source, first, count, capacity_node, extract_flags,
+ INTPTR_PARAMETERS, var_holes_converted,
+ source_elements_kind);
}
// Copy a portion of an existing FixedArray or FixedDoubleArray into a new
@@ -2400,12 +2357,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// (NOTE: not index!), does a hole check if |if_hole| is provided and
// converts the value so that it becomes ready for storing to array of
// |to_kind| elements.
- Node* LoadElementAndPrepareForStore(Node* array, Node* offset,
+ Node* LoadElementAndPrepareForStore(TNode<FixedArrayBase> array,
+ TNode<IntPtrT> offset,
ElementsKind from_kind,
ElementsKind to_kind, Label* if_hole);
- Node* CalculateNewElementsCapacity(Node* old_capacity,
- ParameterMode mode = INTPTR_PARAMETERS);
+ Node* CalculateNewElementsCapacity(Node* old_capacity, ParameterMode mode);
TNode<Smi> CalculateNewElementsCapacity(TNode<Smi> old_capacity) {
return CAST(CalculateNewElementsCapacity(old_capacity, SMI_PARAMETERS));
@@ -2425,11 +2382,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Tries to grow the |capacity|-length |elements| array of given |object|
// to store the |key| or bails out if the growing gap is too big. Returns
// new elements.
+ template <typename TIndex>
TNode<FixedArrayBase> TryGrowElementsCapacity(TNode<HeapObject> object,
TNode<FixedArrayBase> elements,
- ElementsKind kind, Node* key,
- Node* capacity,
- ParameterMode mode,
+ ElementsKind kind,
+ TNode<TIndex> key,
+ TNode<TIndex> capacity,
Label* bailout);
// Grows elements capacity of given object. Returns new elements.
@@ -2441,10 +2399,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Given a need to grow by |growth|, allocate an appropriate new capacity
// if necessary, and return a new elements FixedArray object. Label |bailout|
// is followed for allocation failure.
- void PossiblyGrowElementsCapacity(ParameterMode mode, ElementsKind kind,
- TNode<HeapObject> array, Node* length,
+ void PossiblyGrowElementsCapacity(ElementsKind kind, TNode<HeapObject> array,
+ TNode<BInt> length,
TVariable<FixedArrayBase>* var_elements,
- Node* growth, Label* bailout);
+ TNode<BInt> growth, Label* bailout);
// Allocation site manipulation
void InitializeAllocationMemento(TNode<HeapObject> base,
@@ -2568,7 +2526,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> InstanceTypeEqual(SloppyTNode<Int32T> instance_type, int type);
TNode<BoolT> IsAccessorInfo(SloppyTNode<HeapObject> object);
TNode<BoolT> IsAccessorPair(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsAllocationSite(SloppyTNode<HeapObject> object);
TNode<BoolT> IsNoElementsProtectorCellInvalid();
TNode<BoolT> IsArrayIteratorProtectorCellInvalid();
TNode<BoolT> IsBigIntInstanceType(SloppyTNode<Int32T> instance_type);
@@ -2608,7 +2565,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsOddball(SloppyTNode<HeapObject> object);
TNode<BoolT> IsOddballInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsIndirectStringInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsJSAggregateError(TNode<HeapObject> object);
TNode<BoolT> IsJSArrayBuffer(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSDataView(TNode<HeapObject> object);
TNode<BoolT> IsJSArrayInstanceType(SloppyTNode<Int32T> instance_type);
@@ -2617,7 +2573,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSArrayIterator(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSAsyncGeneratorObject(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSFunctionInstanceType(SloppyTNode<Int32T> instance_type);
- TNode<BoolT> IsAllocationSiteInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSFunctionMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSFunction(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSBoundFunction(SloppyTNode<HeapObject> object);
@@ -2685,9 +2640,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsCustomElementsReceiverInstanceType(
TNode<Int32T> instance_type);
TNode<BoolT> IsSpecialReceiverMap(SloppyTNode<Map> map);
- // Returns true if the map corresponds to non-special fast or dictionary
- // object.
- TNode<BoolT> IsSimpleObjectMap(TNode<Map> map);
TNode<BoolT> IsStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsString(SloppyTNode<HeapObject> object);
TNode<BoolT> IsSymbolInstanceType(SloppyTNode<Int32T> instance_type);
@@ -2844,6 +2796,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Number> ToLength_Inline(SloppyTNode<Context> context,
SloppyTNode<Object> input);
+ TNode<Object> OrdinaryToPrimitive(TNode<Context> context, TNode<Object> input,
+ OrdinaryToPrimitiveHint hint);
+
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |BitField| in |word32|. Returns result as an uint32 node.
template <typename BitField>
@@ -3512,24 +3467,24 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum class ForEachDirection { kForward, kReverse };
- using FastFixedArrayForEachBody =
- std::function<void(Node* fixed_array, Node* offset)>;
+ using FastArrayForEachBody =
+ std::function<void(TNode<HeapObject> array, TNode<IntPtrT> offset)>;
- void BuildFastFixedArrayForEach(
- const CodeStubAssembler::VariableList& vars, Node* fixed_array,
+ void BuildFastArrayForEach(
+ const CodeStubAssembler::VariableList& vars, Node* array,
ElementsKind kind, Node* first_element_inclusive,
- Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
+ Node* last_element_exclusive, const FastArrayForEachBody& body,
ParameterMode mode = INTPTR_PARAMETERS,
ForEachDirection direction = ForEachDirection::kReverse);
- void BuildFastFixedArrayForEach(
- Node* fixed_array, ElementsKind kind, Node* first_element_inclusive,
- Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
+ void BuildFastArrayForEach(
+ Node* array, ElementsKind kind, Node* first_element_inclusive,
+ Node* last_element_exclusive, const FastArrayForEachBody& body,
ParameterMode mode = INTPTR_PARAMETERS,
ForEachDirection direction = ForEachDirection::kReverse) {
CodeStubAssembler::VariableList list(0, zone());
- BuildFastFixedArrayForEach(list, fixed_array, kind, first_element_inclusive,
- last_element_exclusive, body, mode, direction);
+ BuildFastArrayForEach(list, array, kind, first_element_inclusive,
+ last_element_exclusive, body, mode, direction);
}
TNode<IntPtrT> GetArrayAllocationSize(TNode<IntPtrT> element_count,
@@ -3764,6 +3719,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
bool ConstexprInt32NotEqual(int32_t a, int32_t b) { return a != b; }
bool ConstexprInt32GreaterThanEqual(int32_t a, int32_t b) { return a >= b; }
uint32_t ConstexprUint32Add(uint32_t a, uint32_t b) { return a + b; }
+ int32_t ConstexprUint32Sub(uint32_t a, uint32_t b) { return a - b; }
int31_t ConstexprInt31Add(int31_t a, int31_t b) {
int32_t val;
CHECK(!base::bits::SignedAddOverflow32(a, b, &val));
@@ -3943,7 +3899,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// fields initialized.
TNode<JSArray> AllocateUninitializedJSArray(
TNode<Map> array_map, TNode<Smi> length,
- TNode<AllocationSite> allocation_site, TNode<IntPtrT> size_in_bytes);
+ base::Optional<TNode<AllocationSite>> allocation_site,
+ TNode<IntPtrT> size_in_bytes);
TNode<BoolT> IsValidSmi(TNode<Smi> smi);
@@ -4017,49 +3974,24 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
ParameterMode parameter_mode = INTPTR_PARAMETERS);
};
-// template <typename TIndex>
class V8_EXPORT_PRIVATE CodeStubArguments {
public:
using Node = compiler::Node;
- enum ReceiverMode { kHasReceiver, kNoReceiver };
-
- // |argc| specifies the number of arguments passed to the builtin excluding
- // the receiver. The arguments will include a receiver iff |receiver_mode|
- // is kHasReceiver.
- CodeStubArguments(CodeStubAssembler* assembler, TNode<IntPtrT> argc,
- ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
- : CodeStubArguments(assembler, argc, TNode<RawPtrT>(), receiver_mode) {}
-
- CodeStubArguments(CodeStubAssembler* assembler, TNode<Int32T> argc,
- ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
- : CodeStubArguments(assembler, assembler->ChangeInt32ToIntPtr(argc),
- TNode<RawPtrT>(), receiver_mode) {}
-
- // TODO(v8:9708): Consider removing this variant
- CodeStubArguments(CodeStubAssembler* assembler, TNode<Smi> argc,
- ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
- : CodeStubArguments(assembler, assembler->ParameterToIntPtr(argc),
- TNode<RawPtrT>(), receiver_mode) {}
// |argc| specifies the number of arguments passed to the builtin excluding
- // the receiver. The arguments will include a receiver iff |receiver_mode|
- // is kHasReceiver.
+ // the receiver. The arguments include the receiver.
+ CodeStubArguments(CodeStubAssembler* assembler, TNode<IntPtrT> argc)
+ : CodeStubArguments(assembler, argc, TNode<RawPtrT>()) {}
+ CodeStubArguments(CodeStubAssembler* assembler, TNode<Int32T> argc)
+ : CodeStubArguments(assembler, assembler->ChangeInt32ToIntPtr(argc)) {}
CodeStubArguments(CodeStubAssembler* assembler, TNode<IntPtrT> argc,
- TNode<RawPtrT> fp,
- ReceiverMode receiver_mode = ReceiverMode::kHasReceiver);
-
- CodeStubArguments(CodeStubAssembler* assembler, TNode<Smi> argc,
- TNode<RawPtrT> fp,
- ReceiverMode receiver_mode = ReceiverMode::kHasReceiver)
- : CodeStubArguments(assembler, assembler->ParameterToIntPtr(argc), fp,
- receiver_mode) {}
+ TNode<RawPtrT> fp);
// Used by Torque to construct arguments based on a Torque-defined
// struct of values.
CodeStubArguments(CodeStubAssembler* assembler,
TorqueStructArguments torque_arguments)
: assembler_(assembler),
- receiver_mode_(ReceiverMode::kHasReceiver),
argc_(torque_arguments.length),
base_(torque_arguments.base),
fp_(torque_arguments.frame) {}
@@ -4072,68 +4004,41 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
// Computes address of the index'th argument.
TNode<RawPtrT> AtIndexPtr(TNode<IntPtrT> index) const;
- TNode<RawPtrT> AtIndexPtr(TNode<Smi> index) const {
- return AtIndexPtr(assembler_->ParameterToIntPtr(index));
- }
// |index| is zero-based and does not include the receiver
TNode<Object> AtIndex(TNode<IntPtrT> index) const;
- // TODO(v8:9708): Consider removing this variant
- TNode<Object> AtIndex(TNode<Smi> index) const {
- return AtIndex(assembler_->ParameterToIntPtr(index));
- }
-
TNode<Object> AtIndex(int index) const;
- TNode<Object> GetOptionalArgumentValue(int index) {
- return GetOptionalArgumentValue(index, assembler_->UndefinedConstant());
- }
- TNode<Object> GetOptionalArgumentValue(int index,
- TNode<Object> default_value);
-
TNode<IntPtrT> GetLength() const { return argc_; }
TorqueStructArguments GetTorqueArguments() const {
return TorqueStructArguments{fp_, base_, argc_};
}
+ TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index,
+ TNode<Object> default_value);
TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index) {
return GetOptionalArgumentValue(index, assembler_->UndefinedConstant());
}
- TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index,
- TNode<Object> default_value);
-
- using ForEachBodyFunction = std::function<void(TNode<Object> arg)>;
+ TNode<Object> GetOptionalArgumentValue(int index) {
+ return GetOptionalArgumentValue(assembler_->IntPtrConstant(index));
+ }
// Iteration doesn't include the receiver. |first| and |last| are zero-based.
- template <typename TIndex>
- void ForEach(const ForEachBodyFunction& body, TNode<TIndex> first = {},
- TNode<TIndex> last = {}) const {
+ using ForEachBodyFunction = std::function<void(TNode<Object> arg)>;
+ void ForEach(const ForEachBodyFunction& body, TNode<IntPtrT> first = {},
+ TNode<IntPtrT> last = {}) const {
CodeStubAssembler::VariableList list(0, assembler_->zone());
ForEach(list, body, first, last);
}
-
- // Iteration doesn't include the receiver. |first| and |last| are zero-based.
void ForEach(const CodeStubAssembler::VariableList& vars,
const ForEachBodyFunction& body, TNode<IntPtrT> first = {},
TNode<IntPtrT> last = {}) const;
- void ForEach(const CodeStubAssembler::VariableList& vars,
- const ForEachBodyFunction& body, TNode<Smi> first,
- TNode<Smi> last = {}) const {
- TNode<IntPtrT> first_intptr = assembler_->ParameterToIntPtr(first);
- TNode<IntPtrT> last_intptr;
- if (last != nullptr) {
- last_intptr = assembler_->ParameterToIntPtr(last);
- }
- return ForEach(vars, body, first_intptr, last_intptr);
- }
-
void PopAndReturn(TNode<Object> value);
private:
CodeStubAssembler* assembler_;
- ReceiverMode receiver_mode_;
TNode<IntPtrT> argc_;
TNode<RawPtrT> base_;
TNode<RawPtrT> fp_;
@@ -4226,6 +4131,19 @@ class PrototypeCheckAssembler : public CodeStubAssembler {
DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags)
+#define CLASS_MAP_CONSTANT_ADAPTER(V, rootIndexName, rootAccessorName, \
+ class_name) \
+ template <> \
+ inline bool CodeStubAssembler::ClassHasMapConstant<class_name>() { \
+ return true; \
+ } \
+ template <> \
+ inline TNode<Map> CodeStubAssembler::GetClassMapConstant<class_name>() { \
+ return class_name##MapConstant(); \
+ }
+
+UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR(CLASS_MAP_CONSTANT_ADAPTER, _)
+
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_CODE_STUB_ASSEMBLER_H_
diff --git a/chromium/v8/src/codegen/compiler.cc b/chromium/v8/src/codegen/compiler.cc
index c436c57407c..1386c1dd199 100644
--- a/chromium/v8/src/codegen/compiler.cc
+++ b/chromium/v8/src/codegen/compiler.cc
@@ -751,7 +751,7 @@ void InsertCodeIntoOptimizedCodeCache(
// Function context specialization folds-in the function context,
// so no sharing can occur.
- if (compilation_info->is_function_context_specializing()) {
+ if (compilation_info->function_context_specializing()) {
// Native context specialized code is not shared, so make sure the optimized
// code cache is clear.
ClearOptimizedCodeCache(compilation_info);
@@ -1090,7 +1090,9 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
VMState<BYTECODE_COMPILER> state(isolate);
if (parse_info->literal() == nullptr &&
!parsing::ParseProgram(parse_info, script, maybe_outer_scope_info,
- isolate)) {
+ isolate, parsing::ReportStatisticsMode::kYes)) {
+ FailWithPendingException(isolate, script, parse_info,
+ Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
return MaybeHandle<SharedFunctionInfo>();
}
// Measure how long it takes to do the compilation; only take the
@@ -1456,7 +1458,7 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
// Parse and update ParseInfo with the results. Don't update parsing
// statistics since we've already parsed the code before.
if (!parsing::ParseAny(&parse_info, shared_info, isolate,
- parsing::ReportErrorsAndStatisticsMode::kNo)) {
+ parsing::ReportStatisticsMode::kNo)) {
// Parsing failed probably as a result of stack exhaustion.
bytecode->SetSourcePositionsFailedToCollect();
return FailAndClearPendingException(isolate);
@@ -1548,7 +1550,8 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
}
// Parse and update ParseInfo with the results.
- if (!parsing::ParseAny(&parse_info, shared_info, isolate)) {
+ if (!parsing::ParseAny(&parse_info, shared_info, isolate,
+ parsing::ReportStatisticsMode::kYes)) {
return FailWithPendingException(isolate, script, &parse_info, flag);
}
@@ -1595,7 +1598,7 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
Handle<Code> code = handle(shared_info->GetCode(), isolate);
// Initialize the feedback cell for this JSFunction.
- JSFunction::InitializeFeedbackCell(function);
+ JSFunction::InitializeFeedbackCell(function, is_compiled_scope);
// Optimize now if --always-opt is enabled.
if (FLAG_always_opt && !function->shared().HasAsmWasmData()) {
@@ -1801,7 +1804,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
} else {
result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_info, context, AllocationType::kYoung);
- JSFunction::InitializeFeedbackCell(result);
+ JSFunction::InitializeFeedbackCell(result, &is_compiled_scope);
if (allow_eval_cache) {
// Make sure to cache this result.
Handle<FeedbackCell> new_feedback_cell(result->raw_feedback_cell(),
@@ -1813,7 +1816,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
} else {
result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared_info, context, AllocationType::kYoung);
- JSFunction::InitializeFeedbackCell(result);
+ JSFunction::InitializeFeedbackCell(result, &is_compiled_scope);
if (allow_eval_cache) {
// Add the SharedFunctionInfo and the LiteralsArray to the eval cache if
// we didn't retrieve from there.
@@ -2764,7 +2767,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
// If code is compiled to bytecode (i.e., isn't asm.js), then allocate a
// feedback and check for optimized code.
if (is_compiled_scope.is_compiled() && shared->HasBytecodeArray()) {
- JSFunction::InitializeFeedbackCell(function);
+ JSFunction::InitializeFeedbackCell(function, &is_compiled_scope);
Code code = function->has_feedback_vector()
? function->feedback_vector().optimized_code()
@@ -2779,7 +2782,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
if (FLAG_always_opt && shared->allows_lazy_compilation() &&
!shared->optimization_disabled() && !function->IsOptimized() &&
!function->HasOptimizedCode()) {
- JSFunction::EnsureFeedbackVector(function);
+ JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
}
}
diff --git a/chromium/v8/src/codegen/cpu-features.h b/chromium/v8/src/codegen/cpu-features.h
index 14c94ebae9a..eef98f77e78 100644
--- a/chromium/v8/src/codegen/cpu-features.h
+++ b/chromium/v8/src/codegen/cpu-features.h
@@ -27,7 +27,7 @@ enum CpuFeature {
POPCNT,
ATOM,
-#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
+#elif V8_TARGET_ARCH_ARM
// - Standard configurations. The baseline is ARMv6+VFPv2.
ARMv7, // ARMv7-A + VFPv3-D32 + NEON
ARMv7_SUDIV, // ARMv7-A + VFPv4-D32 + NEON + SUDIV
@@ -39,6 +39,9 @@ enum CpuFeature {
VFP32DREGS = ARMv7,
SUDIV = ARMv7_SUDIV,
+#elif V8_TARGET_ARCH_ARM64
+ JSCVT,
+
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
FPU,
FP64FPU,
diff --git a/chromium/v8/src/codegen/external-reference.cc b/chromium/v8/src/codegen/external-reference.cc
index 5c2c63e816c..4595269d028 100644
--- a/chromium/v8/src/codegen/external-reference.cc
+++ b/chromium/v8/src/codegen/external-reference.cc
@@ -277,6 +277,14 @@ FUNCTION_REFERENCE(wasm_float32_to_int64, wasm::float32_to_int64_wrapper)
FUNCTION_REFERENCE(wasm_float32_to_uint64, wasm::float32_to_uint64_wrapper)
FUNCTION_REFERENCE(wasm_float64_to_int64, wasm::float64_to_int64_wrapper)
FUNCTION_REFERENCE(wasm_float64_to_uint64, wasm::float64_to_uint64_wrapper)
+FUNCTION_REFERENCE(wasm_float32_to_int64_sat,
+ wasm::float32_to_int64_sat_wrapper)
+FUNCTION_REFERENCE(wasm_float32_to_uint64_sat,
+ wasm::float32_to_uint64_sat_wrapper)
+FUNCTION_REFERENCE(wasm_float64_to_int64_sat,
+ wasm::float64_to_int64_sat_wrapper)
+FUNCTION_REFERENCE(wasm_float64_to_uint64_sat,
+ wasm::float64_to_uint64_sat_wrapper)
FUNCTION_REFERENCE(wasm_int64_div, wasm::int64_div_wrapper)
FUNCTION_REFERENCE(wasm_int64_mod, wasm::int64_mod_wrapper)
FUNCTION_REFERENCE(wasm_uint64_div, wasm::uint64_div_wrapper)
@@ -289,6 +297,9 @@ FUNCTION_REFERENCE(wasm_word32_rol, wasm::word32_rol_wrapper)
FUNCTION_REFERENCE(wasm_word32_ror, wasm::word32_ror_wrapper)
FUNCTION_REFERENCE(wasm_word64_rol, wasm::word64_rol_wrapper)
FUNCTION_REFERENCE(wasm_word64_ror, wasm::word64_ror_wrapper)
+FUNCTION_REFERENCE(wasm_f32x4_ceil, wasm::f32x4_ceil_wrapper)
+FUNCTION_REFERENCE(wasm_f32x4_floor, wasm::f32x4_floor_wrapper)
+FUNCTION_REFERENCE(wasm_f32x4_trunc, wasm::f32x4_trunc_wrapper)
FUNCTION_REFERENCE(wasm_memory_init, wasm::memory_init_wrapper)
FUNCTION_REFERENCE(wasm_memory_copy, wasm::memory_copy_wrapper)
FUNCTION_REFERENCE(wasm_memory_fill, wasm::memory_fill_wrapper)
@@ -488,8 +499,12 @@ FUNCTION_REFERENCE_WITH_ISOLATE(re_match_for_call_from_js,
IrregexpInterpreter::MatchForCallFromJs)
FUNCTION_REFERENCE_WITH_ISOLATE(
- re_case_insensitive_compare_uc16,
- NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)
+ re_case_insensitive_compare_unicode,
+ NativeRegExpMacroAssembler::CaseInsensitiveCompareUnicode)
+
+FUNCTION_REFERENCE_WITH_ISOLATE(
+ re_case_insensitive_compare_non_unicode,
+ NativeRegExpMacroAssembler::CaseInsensitiveCompareNonUnicode)
ExternalReference ExternalReference::re_word_character_map(Isolate* isolate) {
return ExternalReference(
diff --git a/chromium/v8/src/codegen/external-reference.h b/chromium/v8/src/codegen/external-reference.h
index f42a7d74861..f5e93210d66 100644
--- a/chromium/v8/src/codegen/external-reference.h
+++ b/chromium/v8/src/codegen/external-reference.h
@@ -77,8 +77,10 @@ class StatsCounter;
V(address_of_regexp_stack_memory_top_address, \
"RegExpStack::memory_top_address_address()") \
V(address_of_static_offsets_vector, "OffsetsVector::static_offsets_vector") \
- V(re_case_insensitive_compare_uc16, \
- "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()") \
+ V(re_case_insensitive_compare_unicode, \
+ "NativeRegExpMacroAssembler::CaseInsensitiveCompareUnicode()") \
+ V(re_case_insensitive_compare_non_unicode, \
+ "NativeRegExpMacroAssembler::CaseInsensitiveCompareNonUnicode()") \
V(re_check_stack_guard_state, \
"RegExpMacroAssembler*::CheckStackGuardState()") \
V(re_grow_stack, "NativeRegExpMacroAssembler::GrowStack()") \
@@ -181,9 +183,13 @@ class StatsCounter;
V(wasm_f64_trunc, "wasm::f64_trunc_wrapper") \
V(wasm_float32_to_int64, "wasm::float32_to_int64_wrapper") \
V(wasm_float32_to_uint64, "wasm::float32_to_uint64_wrapper") \
+ V(wasm_float32_to_int64_sat, "wasm::float32_to_int64_sat_wrapper") \
+ V(wasm_float32_to_uint64_sat, "wasm::float32_to_uint64_sat_wrapper") \
V(wasm_float64_pow, "wasm::float64_pow") \
V(wasm_float64_to_int64, "wasm::float64_to_int64_wrapper") \
V(wasm_float64_to_uint64, "wasm::float64_to_uint64_wrapper") \
+ V(wasm_float64_to_int64_sat, "wasm::float64_to_int64_sat_wrapper") \
+ V(wasm_float64_to_uint64_sat, "wasm::float64_to_uint64_sat_wrapper") \
V(wasm_int64_div, "wasm::int64_div") \
V(wasm_int64_mod, "wasm::int64_mod") \
V(wasm_int64_to_float32, "wasm::int64_to_float32_wrapper") \
@@ -200,6 +206,9 @@ class StatsCounter;
V(wasm_word64_ror, "wasm::word64_ror") \
V(wasm_word64_ctz, "wasm::word64_ctz") \
V(wasm_word64_popcnt, "wasm::word64_popcnt") \
+ V(wasm_f32x4_ceil, "wasm::f32x4_ceil_wrapper") \
+ V(wasm_f32x4_floor, "wasm::f32x4_floor_wrapper") \
+ V(wasm_f32x4_trunc, "wasm::f32x4_trunc_wrapper") \
V(wasm_memory_init, "wasm::memory_init") \
V(wasm_memory_copy, "wasm::memory_copy") \
V(wasm_memory_fill, "wasm::memory_fill") \
diff --git a/chromium/v8/src/codegen/ia32/assembler-ia32.cc b/chromium/v8/src/codegen/ia32/assembler-ia32.cc
index 551750936db..321a59ceded 100644
--- a/chromium/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/chromium/v8/src/codegen/ia32/assembler-ia32.cc
@@ -691,6 +691,29 @@ void Assembler::stos() {
EMIT(0xAB);
}
+void Assembler::xadd(Operand dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xC1);
+ emit_operand(src, dst);
+}
+
+void Assembler::xadd_b(Operand dst, Register src) {
+ DCHECK(src.is_byte_register());
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xC0);
+ emit_operand(src, dst);
+}
+
+void Assembler::xadd_w(Operand dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xC1);
+ emit_operand(src, dst);
+}
+
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
if (src == eax || dst == eax) { // Single-byte encoding.
@@ -2246,6 +2269,30 @@ void Assembler::ucomisd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
+void Assembler::roundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x08);
+ emit_sse_operand(dst, src);
+ // Mask precision exeption.
+ EMIT(static_cast<byte>(mode) | 0x8);
+}
+
+void Assembler::roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x09);
+ emit_sse_operand(dst, src);
+ // Mask precision exeption.
+ EMIT(static_cast<byte>(mode) | 0x8);
+}
+
void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -2921,6 +2968,15 @@ void Assembler::vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
EMIT(offset);
}
+void Assembler::vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ vinstr(0x08, dst, xmm0, Operand(src), k66, k0F3A, kWIG);
+ EMIT(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+}
+void Assembler::vroundpd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ vinstr(0x09, dst, xmm0, Operand(src), k66, k0F3A, kWIG);
+ EMIT(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+}
+
void Assembler::vmovmskps(Register dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
diff --git a/chromium/v8/src/codegen/ia32/assembler-ia32.h b/chromium/v8/src/codegen/ia32/assembler-ia32.h
index 60d978df5be..5edbe8677a1 100644
--- a/chromium/v8/src/codegen/ia32/assembler-ia32.h
+++ b/chromium/v8/src/codegen/ia32/assembler-ia32.h
@@ -528,6 +528,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void rep_stos();
void stos();
+ void xadd(Operand dst, Register src);
+ void xadd_b(Operand dst, Register src);
+ void xadd_w(Operand dst, Register src);
+
// Exchange
void xchg(Register dst, Register src);
void xchg(Register dst, Operand src);
@@ -1064,6 +1068,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void pinsrd(XMMRegister dst, Operand src, uint8_t offset);
+ void roundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
// AVX instructions
void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmadd132sd(dst, src1, Operand(src2));
@@ -1409,6 +1416,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t offset);
+ void vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void vroundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
void vcvtdq2ps(XMMRegister dst, XMMRegister src) {
vcvtdq2ps(dst, Operand(src));
}
diff --git a/chromium/v8/src/codegen/ia32/interface-descriptors-ia32.cc b/chromium/v8/src/codegen/ia32/interface-descriptors-ia32.cc
index 8b1ea8d880e..ee9c3919cd4 100644
--- a/chromium/v8/src/codegen/ia32/interface-descriptors-ia32.cc
+++ b/chromium/v8/src/codegen/ia32/interface-descriptors-ia32.cc
@@ -195,12 +195,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edx, eax};
@@ -312,6 +306,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc
index b73050a680d..8b1cc912987 100644
--- a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -597,6 +597,28 @@ void TurboAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
add(dst, Immediate(0x80000000));
}
+void TurboAssembler::Roundps(XMMRegister dst, XMMRegister src,
+ RoundingMode mode) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vroundps(dst, src, mode);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ roundps(dst, src, mode);
+ }
+}
+
+void TurboAssembler::Roundpd(XMMRegister dst, XMMRegister src,
+ RoundingMode mode) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vroundpd(dst, src, mode);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ roundpd(dst, src, mode);
+ }
+}
+
void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
DCHECK_GE(63, shift);
if (shift >= 32) {
@@ -2045,9 +2067,9 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
and_(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
- test_b(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+ test_b(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
} else {
- test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+ test(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
}
j(cc, condition_met, condition_met_distance);
}
diff --git a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h
index 94ddb2f7847..2b1f4400146 100644
--- a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -286,6 +286,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
AVX_OP2_WITH_TYPE(Movd, movd, Operand, XMMRegister)
AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, Operand)
+ AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, XMMRegister)
+ AVX_OP2_WITH_TYPE(Cvttps2dq, cvttps2dq, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Sqrtps, sqrtps, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Sqrtpd, sqrtpd, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Sqrtpd, sqrtpd, XMMRegister, const Operand&)
@@ -319,6 +321,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP3_XO(Pcmpeqb, pcmpeqb)
AVX_OP3_XO(Pcmpeqw, pcmpeqw)
AVX_OP3_XO(Pcmpeqd, pcmpeqd)
+ AVX_OP3_XO(Por, por)
AVX_OP3_XO(Psubb, psubb)
AVX_OP3_XO(Psubw, psubw)
AVX_OP3_XO(Psubd, psubd)
@@ -357,6 +360,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
+ AVX_PACKED_OP3(Addps, addps)
AVX_PACKED_OP3(Addpd, addpd)
AVX_PACKED_OP3(Subps, subps)
AVX_PACKED_OP3(Subpd, subpd)
@@ -365,6 +369,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3(Cmpeqpd, cmpeqpd)
AVX_PACKED_OP3(Cmpneqpd, cmpneqpd)
AVX_PACKED_OP3(Cmpltpd, cmpltpd)
+ AVX_PACKED_OP3(Cmpleps, cmpleps)
AVX_PACKED_OP3(Cmplepd, cmplepd)
AVX_PACKED_OP3(Minps, minps)
AVX_PACKED_OP3(Minpd, minpd)
@@ -380,6 +385,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3(Psrlq, psrlq)
AVX_PACKED_OP3(Psraw, psraw)
AVX_PACKED_OP3(Psrad, psrad)
+ AVX_PACKED_OP3(Pmaddwd, pmaddwd)
+ AVX_PACKED_OP3(Paddd, paddd)
AVX_PACKED_OP3(Paddq, paddq)
AVX_PACKED_OP3(Psubq, psubq)
AVX_PACKED_OP3(Pmuludq, pmuludq)
@@ -444,6 +451,30 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef AVX_OP2_WITH_TYPE_SCOPE
#undef AVX_OP2_XO_SSE4
+#define AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \
+ sse_scope) \
+ void macro_name(dst_type dst, src_type src) { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope scope(this, AVX); \
+ v##name(dst, dst, src); \
+ return; \
+ } \
+ if (CpuFeatures::IsSupported(sse_scope)) { \
+ CpuFeatureScope scope(this, sse_scope); \
+ name(dst, src); \
+ return; \
+ } \
+ UNREACHABLE(); \
+ }
+#define AVX_OP3_XO_SSE4(macro_name, name) \
+ AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, SSE4_1) \
+ AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, SSE4_1)
+
+ AVX_OP3_XO_SSE4(Pmaxsd, pmaxsd)
+
+#undef AVX_OP3_XO_SSE4
+#undef AVX_OP3_WITH_TYPE_SCOPE
+
void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, Operand(src)); }
void Pshufb(XMMRegister dst, Operand src);
void Pblendw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
@@ -506,6 +537,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void Cvttsd2ui(Register dst, Operand src, XMMRegister tmp);
+ void Roundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void Roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
void Push(Register src) { push(src); }
void Push(Operand src) { push(src); }
void Push(Immediate value);
diff --git a/chromium/v8/src/codegen/ia32/sse-instr.h b/chromium/v8/src/codegen/ia32/sse-instr.h
index b8a7a3c827a..a56dc13361c 100644
--- a/chromium/v8/src/codegen/ia32/sse-instr.h
+++ b/chromium/v8/src/codegen/ia32/sse-instr.h
@@ -9,6 +9,7 @@
V(packsswb, 66, 0F, 63) \
V(packssdw, 66, 0F, 6B) \
V(packuswb, 66, 0F, 67) \
+ V(pmaddwd, 66, 0F, F5) \
V(paddb, 66, 0F, FC) \
V(paddw, 66, 0F, FD) \
V(paddd, 66, 0F, FE) \
diff --git a/chromium/v8/src/codegen/interface-descriptors.cc b/chromium/v8/src/codegen/interface-descriptors.cc
index 503da3cb43c..ce12bc61f53 100644
--- a/chromium/v8/src/codegen/interface-descriptors.cc
+++ b/chromium/v8/src/codegen/interface-descriptors.cc
@@ -30,10 +30,12 @@ void CallInterfaceDescriptorData::InitializePlatformSpecific(
void CallInterfaceDescriptorData::InitializePlatformIndependent(
Flags flags, int return_count, int parameter_count,
- const MachineType* machine_types, int machine_types_length) {
+ const MachineType* machine_types, int machine_types_length,
+ StackArgumentOrder stack_order) {
DCHECK(IsInitializedPlatformSpecific());
flags_ = flags;
+ stack_order_ = stack_order;
return_count_ = return_count;
param_count_ = parameter_count;
const int types_length = return_count_ + param_count_;
@@ -83,7 +85,6 @@ void CallDescriptors::InitializeOncePerProcess() {
DCHECK(ContextOnlyDescriptor{}.HasContextParameter());
DCHECK(!NoContextDescriptor{}.HasContextParameter());
DCHECK(!AllocateDescriptor{}.HasContextParameter());
- DCHECK(!AllocateHeapNumberDescriptor{}.HasContextParameter());
DCHECK(!AbortDescriptor{}.HasContextParameter());
DCHECK(!WasmFloat32ToNumberDescriptor{}.HasContextParameter());
DCHECK(!WasmFloat64ToNumberDescriptor{}.HasContextParameter());
@@ -391,44 +392,17 @@ void WasmFloat64ToNumberDescriptor::InitializePlatformSpecific(
}
#endif // !V8_TARGET_ARCH_IA32
-void WasmTableInitDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data,
- kParameterCount - kStackArgumentsCount);
-}
-
-void WasmTableCopyDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data,
- kParameterCount - kStackArgumentsCount);
-}
-
-void WasmAtomicNotifyDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64)
void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
-void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data,
kParameterCount - kStackArgumentsCount);
}
-
-void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
#endif
void CloneObjectWithVectorDescriptor::InitializePlatformSpecific(
diff --git a/chromium/v8/src/codegen/interface-descriptors.h b/chromium/v8/src/codegen/interface-descriptors.h
index fc27b46ca14..14c021b3b76 100644
--- a/chromium/v8/src/codegen/interface-descriptors.h
+++ b/chromium/v8/src/codegen/interface-descriptors.h
@@ -20,93 +20,101 @@ namespace internal {
BUILTIN_LIST_FROM_TORQUE(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
IGNORE_BUILTIN, IGNORE_BUILTIN)
-#define INTERFACE_DESCRIPTOR_LIST(V) \
- V(Abort) \
- V(Allocate) \
- V(AllocateHeapNumber) \
- V(ApiCallback) \
- V(ApiGetter) \
- V(ArgumentsAdaptor) \
- V(ArrayConstructor) \
- V(ArrayNArgumentsConstructor) \
- V(ArrayNoArgumentConstructor) \
- V(ArraySingleArgumentConstructor) \
- V(AsyncFunctionStackParameter) \
- V(BigIntToI32Pair) \
- V(BigIntToI64) \
- V(BinaryOp) \
- V(BinaryOp_WithFeedback) \
- V(CallForwardVarargs) \
- V(CallFunctionTemplate) \
- V(CallTrampoline) \
- V(CallTrampoline_WithFeedback) \
- V(CallVarargs) \
- V(CallWithArrayLike) \
- V(CallWithSpread) \
- V(CEntry1ArgvOnStack) \
- V(CloneObjectWithVector) \
- V(Compare) \
- V(Compare_WithFeedback) \
- V(ConstructForwardVarargs) \
- V(ConstructStub) \
- V(ConstructVarargs) \
- V(ConstructWithArrayLike) \
- V(Construct_WithFeedback) \
- V(ConstructWithSpread) \
- V(ContextOnly) \
- V(CppBuiltinAdaptor) \
- V(EphemeronKeyBarrier) \
- V(FastNewFunctionContext) \
- V(FastNewObject) \
- V(FrameDropperTrampoline) \
- V(GetIteratorStackParameter) \
- V(GetProperty) \
- V(GrowArrayElements) \
- V(I32PairToBigInt) \
- V(I64ToBigInt) \
- V(InterpreterCEntry1) \
- V(InterpreterCEntry2) \
- V(InterpreterDispatch) \
- V(InterpreterPushArgsThenCall) \
- V(InterpreterPushArgsThenConstruct) \
- V(JSTrampoline) \
- V(Load) \
- V(LoadGlobal) \
- V(LoadGlobalNoFeedback) \
- V(LoadGlobalWithVector) \
- V(LoadNoFeedback) \
- V(LoadWithVector) \
- V(NewArgumentsElements) \
- V(NoContext) \
- V(RecordWrite) \
- V(ResumeGenerator) \
- V(RunMicrotasks) \
- V(RunMicrotasksEntry) \
- V(Store) \
- V(StoreGlobal) \
- V(StoreGlobalWithVector) \
- V(StoreTransition) \
- V(StoreWithVector) \
- V(StringAt) \
- V(StringAtAsString) \
- V(StringSubstring) \
- V(TypeConversion) \
- V(TypeConversionStackParameter) \
- V(Typeof) \
- V(UnaryOp_WithFeedback) \
- V(Void) \
- V(WasmAtomicNotify) \
- V(WasmFloat32ToNumber) \
- V(WasmFloat64ToNumber) \
- V(WasmI32AtomicWait32) \
- V(WasmI32AtomicWait64) \
- V(WasmI64AtomicWait32) \
- V(WasmI64AtomicWait64) \
- V(WasmTableInit) \
- V(WasmTableCopy) \
- BUILTIN_LIST_TFS(V) \
+#define INTERFACE_DESCRIPTOR_LIST(V) \
+ V(Abort) \
+ V(Allocate) \
+ V(ApiCallback) \
+ V(ApiGetter) \
+ V(ArgumentsAdaptor) \
+ V(ArrayConstructor) \
+ V(ArrayNArgumentsConstructor) \
+ V(ArrayNoArgumentConstructor) \
+ V(ArraySingleArgumentConstructor) \
+ V(AsyncFunctionStackParameter) \
+ V(BigIntToI32Pair) \
+ V(BigIntToI64) \
+ V(BinaryOp) \
+ V(BinaryOp_WithFeedback) \
+ V(CallForwardVarargs) \
+ V(CallFunctionTemplate) \
+ V(CallTrampoline) \
+ V(CallTrampoline_WithFeedback) \
+ V(CallVarargs) \
+ V(CallWithArrayLike) \
+ V(CallWithArrayLike_WithFeedback) \
+ V(CallWithSpread) \
+ V(CallWithSpread_WithFeedback) \
+ V(CEntry1ArgvOnStack) \
+ V(CloneObjectWithVector) \
+ V(Compare) \
+ V(Compare_WithFeedback) \
+ V(ConstructForwardVarargs) \
+ V(ConstructStub) \
+ V(ConstructVarargs) \
+ V(ConstructWithArrayLike) \
+ V(ConstructWithArrayLike_WithFeedback) \
+ V(Construct_WithFeedback) \
+ V(ConstructWithSpread) \
+ V(ConstructWithSpread_WithFeedback) \
+ V(ContextOnly) \
+ V(CppBuiltinAdaptor) \
+ V(EphemeronKeyBarrier) \
+ V(FastNewFunctionContext) \
+ V(FastNewObject) \
+ V(FrameDropperTrampoline) \
+ V(GetIteratorStackParameter) \
+ V(GetProperty) \
+ V(GrowArrayElements) \
+ V(I32PairToBigInt) \
+ V(I64ToBigInt) \
+ V(InterpreterCEntry1) \
+ V(InterpreterCEntry2) \
+ V(InterpreterDispatch) \
+ V(InterpreterPushArgsThenCall) \
+ V(InterpreterPushArgsThenConstruct) \
+ V(JSTrampoline) \
+ V(Load) \
+ V(LoadGlobal) \
+ V(LoadGlobalNoFeedback) \
+ V(LoadGlobalWithVector) \
+ V(LoadNoFeedback) \
+ V(LoadWithVector) \
+ V(NewArgumentsElements) \
+ V(NoContext) \
+ V(RecordWrite) \
+ V(ResumeGenerator) \
+ V(RunMicrotasks) \
+ V(RunMicrotasksEntry) \
+ V(Store) \
+ V(StoreGlobal) \
+ V(StoreGlobalWithVector) \
+ V(StoreTransition) \
+ V(StoreWithVector) \
+ V(StringAt) \
+ V(StringAtAsString) \
+ V(StringSubstring) \
+ V(TypeConversion) \
+ V(TypeConversionStackParameter) \
+ V(Typeof) \
+ V(UnaryOp_WithFeedback) \
+ V(Void) \
+ V(WasmFloat32ToNumber) \
+ V(WasmFloat64ToNumber) \
+ V(WasmI32AtomicWait32) \
+ V(WasmI64AtomicWait32) \
+ BUILTIN_LIST_TFS(V) \
TORQUE_BUILTIN_LIST_TFC(V)
+enum class StackArgumentOrder {
+ kDefault, // Arguments in the stack are pushed in the default/stub order (the
+ // first argument is pushed first).
+ kJS, // Arguments in the stack are pushed in the same order as the one used
+ // by JS-to-JS function calls. This should be used if calling a
+ // JSFunction or if the builtin is expected to be called directly from a
+ // JSFunction. When V8_REVERSE_JSARGS is set, this order is reversed
+ // compared to kDefault.
+};
+
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
public:
enum Flag {
@@ -142,7 +150,8 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
void InitializePlatformIndependent(Flags flags, int return_count,
int parameter_count,
const MachineType* machine_types,
- int machine_types_length);
+ int machine_types_length,
+ StackArgumentOrder stack_order);
void Reset();
@@ -165,6 +174,7 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
DCHECK_LT(index, param_count_);
return machine_types_[return_count_ + index];
}
+ StackArgumentOrder stack_order() const { return stack_order_; }
void RestrictAllocatableRegisters(const Register* registers, int num) {
DCHECK_EQ(allocatable_registers_, 0);
@@ -199,6 +209,7 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
int return_count_ = -1;
int param_count_ = -1;
Flags flags_ = kNoFlags;
+ StackArgumentOrder stack_order_ = StackArgumentOrder::kDefault;
// Specifying the set of registers that could be used by the register
// allocator. Currently, it's only used by RecordWrite code stub.
@@ -295,6 +306,10 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
return data()->allocatable_registers();
}
+ StackArgumentOrder GetStackArgumentOrder() const {
+ return data()->stack_order();
+ }
+
static const Register ContextRegister();
const char* DebugName() const;
@@ -314,9 +329,9 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
CallInterfaceDescriptorData* data) {
// Default descriptor configuration: one result, all parameters are passed
// in registers and all parameters have MachineType::AnyTagged() type.
- data->InitializePlatformIndependent(CallInterfaceDescriptorData::kNoFlags,
- 1, data->register_param_count(),
- nullptr, 0);
+ data->InitializePlatformIndependent(
+ CallInterfaceDescriptorData::kNoFlags, 1, data->register_param_count(),
+ nullptr, 0, StackArgumentOrder::kDefault);
}
// Initializes |data| using the platform dependent default set of registers.
@@ -402,7 +417,8 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
override { \
data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount, \
- kParameterCount, nullptr, 0); \
+ kParameterCount, nullptr, 0, \
+ kStackArgumentOrder); \
} \
name(CallDescriptors::Key key) : base(key) {} \
\
@@ -420,9 +436,11 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
\
public:
-#define DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS(flags, return_count, ...) \
+#define DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS(flags, stack_order, \
+ return_count, ...) \
static constexpr int kDescriptorFlags = flags; \
static constexpr int kReturnCount = return_count; \
+ static constexpr StackArgumentOrder kStackArgumentOrder = stack_order; \
enum ParameterIndices { \
__dummy = -1, /* to be able to pass zero arguments */ \
##__VA_ARGS__, \
@@ -431,35 +449,41 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
kContext = kParameterCount /* implicit parameter */ \
};
-#define DEFINE_RESULT_AND_PARAMETERS(return_count, ...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kNoFlags, return_count, ##__VA_ARGS__)
+#define DEFINE_RESULT_AND_PARAMETERS(return_count, ...) \
+ DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
+ CallInterfaceDescriptorData::kNoFlags, StackArgumentOrder::kDefault, \
+ return_count, ##__VA_ARGS__)
// This is valid only for builtins that use EntryFrame, which does not scan
// stack arguments on GC.
-#define DEFINE_PARAMETERS_ENTRY(...) \
- static constexpr int kDescriptorFlags = \
- CallInterfaceDescriptorData::kNoContext | \
- CallInterfaceDescriptorData::kNoStackScan; \
- static constexpr int kReturnCount = 1; \
- enum ParameterIndices { \
- __dummy = -1, /* to be able to pass zero arguments */ \
- ##__VA_ARGS__, \
- \
- kParameterCount \
+#define DEFINE_PARAMETERS_ENTRY(...) \
+ static constexpr int kDescriptorFlags = \
+ CallInterfaceDescriptorData::kNoContext | \
+ CallInterfaceDescriptorData::kNoStackScan; \
+ static constexpr StackArgumentOrder kStackArgumentOrder = \
+ StackArgumentOrder::kDefault; \
+ static constexpr int kReturnCount = 1; \
+ enum ParameterIndices { \
+ __dummy = -1, /* to be able to pass zero arguments */ \
+ ##__VA_ARGS__, \
+ \
+ kParameterCount \
};
-#define DEFINE_PARAMETERS(...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kNoFlags, 1, ##__VA_ARGS__)
+#define DEFINE_PARAMETERS(...) \
+ DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
+ CallInterfaceDescriptorData::kNoFlags, StackArgumentOrder::kDefault, 1, \
+ ##__VA_ARGS__)
-#define DEFINE_PARAMETERS_NO_CONTEXT(...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kNoContext, 1, ##__VA_ARGS__)
+#define DEFINE_PARAMETERS_NO_CONTEXT(...) \
+ DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
+ CallInterfaceDescriptorData::kNoContext, StackArgumentOrder::kDefault, \
+ 1, ##__VA_ARGS__)
-#define DEFINE_PARAMETERS_VARARGS(...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kAllowVarArgs, 1, ##__VA_ARGS__)
+#define DEFINE_PARAMETERS_VARARGS(...) \
+ DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
+ CallInterfaceDescriptorData::kAllowVarArgs, StackArgumentOrder::kJS, 1, \
+ ##__VA_ARGS__)
#define DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG(flag, ...) \
void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
@@ -470,7 +494,7 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
"Parameter names definition is not consistent with parameter types"); \
data->InitializePlatformIndependent( \
Flags(flag | kDescriptorFlags), kReturnCount, kParameterCount, \
- machine_types, arraysize(machine_types)); \
+ machine_types, arraysize(machine_types), kStackArgumentOrder); \
}
#define DEFINE_RESULT_AND_PARAMETER_TYPES(...) \
@@ -481,18 +505,20 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged() /* result */, \
##__VA_ARGS__)
-#define DEFINE_JS_PARAMETERS(...) \
- static constexpr int kDescriptorFlags = \
- CallInterfaceDescriptorData::kAllowVarArgs; \
- static constexpr int kReturnCount = 1; \
- enum ParameterIndices { \
- kTarget, \
- kNewTarget, \
- kActualArgumentsCount, \
- ##__VA_ARGS__, \
- \
- kParameterCount, \
- kContext = kParameterCount /* implicit parameter */ \
+#define DEFINE_JS_PARAMETERS(...) \
+ static constexpr int kDescriptorFlags = \
+ CallInterfaceDescriptorData::kAllowVarArgs; \
+ static constexpr int kReturnCount = 1; \
+ static constexpr StackArgumentOrder kStackArgumentOrder = \
+ StackArgumentOrder::kJS; \
+ enum ParameterIndices { \
+ kTarget, \
+ kNewTarget, \
+ kActualArgumentsCount, \
+ ##__VA_ARGS__, \
+ \
+ kParameterCount, \
+ kContext = kParameterCount /* implicit parameter */ \
};
#define DEFINE_JS_PARAMETER_TYPES(...) \
@@ -554,7 +580,8 @@ class TorqueInterfaceDescriptor : public CallInterfaceDescriptor {
DCHECK_EQ(kReturnCount + kParameterCount, machine_types.size());
data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount,
kParameterCount, machine_types.data(),
- static_cast<int>(machine_types.size()));
+ static_cast<int>(machine_types.size()),
+ StackArgumentOrder::kDefault);
}
};
@@ -948,6 +975,20 @@ class CallWithSpreadDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(CallWithSpreadDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
+class CallWithSpread_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kArgumentsCount, kSpread, kSlot,
+ kMaybeFeedbackVector)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
+ MachineType::Int32(), // kArgumentsCount
+ MachineType::AnyTagged(), // kSpread
+ MachineType::Int32(), // kSlot
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_DESCRIPTOR(CallWithSpread_WithFeedbackDescriptor,
+ CallInterfaceDescriptor)
+};
+
class CallWithArrayLikeDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kArgumentsList)
@@ -956,6 +997,19 @@ class CallWithArrayLikeDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(CallWithArrayLikeDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
+class CallWithArrayLike_WithFeedbackDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kArgumentsList, kSlot, kMaybeFeedbackVector)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
+ MachineType::AnyTagged(), // kArgumentsList
+ MachineType::Int32(), // kSlot
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_DESCRIPTOR(CallWithArrayLike_WithFeedbackDescriptor,
+ CallInterfaceDescriptor)
+};
+
class ConstructVarargsDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_JS_PARAMETERS(kArgumentsLength, kArgumentsList)
@@ -979,6 +1033,20 @@ class ConstructWithSpreadDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(ConstructWithSpreadDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
+class ConstructWithSpread_WithFeedbackDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ // Note: kSlot comes before kSpread since as an untagged value it must be
+ // passed in a register.
+ DEFINE_JS_PARAMETERS(kSlot, kSpread, kMaybeFeedbackVector)
+ DEFINE_JS_PARAMETER_TYPES(MachineType::Int32(), // kSlot
+ MachineType::AnyTagged(), // kSpread
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_DESCRIPTOR(ConstructWithSpread_WithFeedbackDescriptor,
+ CallInterfaceDescriptor)
+};
+
class ConstructWithArrayLikeDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList)
@@ -988,6 +1056,21 @@ class ConstructWithArrayLikeDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(ConstructWithArrayLikeDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
+class ConstructWithArrayLike_WithFeedbackDescriptor
+ : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList, kSlot,
+ kMaybeFeedbackVector)
+ DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
+ MachineType::AnyTagged(), // kNewTarget
+ MachineType::AnyTagged(), // kArgumentsList
+ MachineType::Int32(), // kSlot
+ MachineType::AnyTagged()) // kMaybeFeedbackVector
+ DECLARE_DESCRIPTOR(ConstructWithArrayLike_WithFeedbackDescriptor,
+ CallInterfaceDescriptor)
+};
+
// TODO(ishell): consider merging this with ArrayConstructorDescriptor
class ConstructStubDescriptor : public CallInterfaceDescriptor {
public:
@@ -1006,13 +1089,6 @@ class AbortDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(AbortDescriptor, CallInterfaceDescriptor)
};
-class AllocateHeapNumberDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT()
- DEFINE_PARAMETER_TYPES()
- DECLARE_DESCRIPTOR(AllocateHeapNumberDescriptor, CallInterfaceDescriptor)
-};
-
class ArrayConstructorDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_JS_PARAMETERS(kAllocationSite)
@@ -1331,52 +1407,6 @@ class WasmFloat64ToNumberDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(WasmFloat64ToNumberDescriptor, CallInterfaceDescriptor)
};
-class WasmTableInitDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kDestination, kSource, kSize, kTableIndex,
- kSegmentIndex)
- DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kDestination
- MachineType::Int32(), // kSource
- MachineType::Int32(), // kSize
- MachineType::AnyTagged(), // kTableIndex
- MachineType::AnyTagged(), // kSegmentindex
- )
-
-#if V8_TARGET_ARCH_IA32
- static constexpr bool kPassLastArgOnStack = true;
-#else
- static constexpr bool kPassLastArgOnStack = false;
-#endif
-
- // Pass the last parameter through the stack.
- static constexpr int kStackArgumentsCount = kPassLastArgOnStack ? 1 : 0;
-
- DECLARE_DESCRIPTOR(WasmTableInitDescriptor, CallInterfaceDescriptor)
-};
-
-class WasmTableCopyDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kDestination, kSource, kSize, kDestinationTable,
- kSourceTable)
- DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kDestination
- MachineType::Int32(), // kSource
- MachineType::Int32(), // kSize
- MachineType::AnyTagged(), // kDestinationTable
- MachineType::AnyTagged(), // kSourceTable
- )
-
-#if V8_TARGET_ARCH_IA32
- static constexpr bool kPassLastArgOnStack = true;
-#else
- static constexpr bool kPassLastArgOnStack = false;
-#endif
-
- // Pass the last parameter through the stack.
- static constexpr int kStackArgumentsCount = kPassLastArgOnStack ? 1 : 0;
-
- DECLARE_DESCRIPTOR(WasmTableCopyDescriptor, CallInterfaceDescriptor)
-};
-
class V8_EXPORT_PRIVATE I64ToBigIntDescriptor final
: public CallInterfaceDescriptor {
public:
@@ -1414,15 +1444,6 @@ class V8_EXPORT_PRIVATE BigIntToI32PairDescriptor final
DECLARE_DESCRIPTOR(BigIntToI32PairDescriptor, CallInterfaceDescriptor)
};
-class WasmAtomicNotifyDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kCount)
- DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
- MachineType::Uint32(), // kAddress
- MachineType::Uint32()) // kCount
- DECLARE_DESCRIPTOR(WasmAtomicNotifyDescriptor, CallInterfaceDescriptor)
-};
-
class WasmI32AtomicWait32Descriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeoutLow,
@@ -1461,26 +1482,6 @@ class WasmI64AtomicWait32Descriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(WasmI64AtomicWait32Descriptor, CallInterfaceDescriptor)
};
-class WasmI32AtomicWait64Descriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeout)
- DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
- MachineType::Uint32(), // kAddress
- MachineType::Int32(), // kExpectedValue
- MachineType::Uint64()) // kTimeout
- DECLARE_DESCRIPTOR(WasmI32AtomicWait64Descriptor, CallInterfaceDescriptor)
-};
-
-class WasmI64AtomicWait64Descriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeout)
- DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
- MachineType::Uint32(), // kAddress
- MachineType::Uint64(), // kExpectedValue
- MachineType::Uint64()) // kTimeout
- DECLARE_DESCRIPTOR(WasmI64AtomicWait64Descriptor, CallInterfaceDescriptor)
-};
-
class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kSource, kFlags, kSlot, kVector)
@@ -1497,11 +1498,12 @@ class BinaryOp_WithFeedbackDescriptor : public CallInterfaceDescriptor {
DEFINE_PARAMETERS(kLeft, kRight, kSlot, kMaybeFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
MachineType::AnyTagged(), // kRight
- MachineType::Int32(), // kSlot
+ MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kMaybeFeedbackVector
DECLARE_DESCRIPTOR(BinaryOp_WithFeedbackDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
class CallTrampoline_WithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_VARARGS(kFunction, kActualArgumentsCount, kSlot,
@@ -1519,11 +1521,12 @@ class Compare_WithFeedbackDescriptor : public CallInterfaceDescriptor {
DEFINE_PARAMETERS(kLeft, kRight, kSlot, kMaybeFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
MachineType::AnyTagged(), // kRight
- MachineType::Int32(), // kSlot
+ MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kMaybeFeedbackVector
DECLARE_DESCRIPTOR(Compare_WithFeedbackDescriptor, CallInterfaceDescriptor)
};
+// TODO(jgruber): Pass the slot as UintPtr.
class Construct_WithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
// kSlot is passed in a register, kMaybeFeedbackVector on the stack.
@@ -1538,7 +1541,7 @@ class UnaryOp_WithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kValue, kSlot, kMaybeFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue
- MachineType::Int32(), // kSlot
+ MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kMaybeFeedbackVector
DECLARE_DESCRIPTOR(UnaryOp_WithFeedbackDescriptor, CallInterfaceDescriptor)
};
diff --git a/chromium/v8/src/codegen/machine-type.h b/chromium/v8/src/codegen/machine-type.h
index ea054415942..e7e10208d7b 100644
--- a/chromium/v8/src/codegen/machine-type.h
+++ b/chromium/v8/src/codegen/machine-type.h
@@ -188,50 +188,10 @@ class MachineType {
constexpr static MachineType Bool() {
return MachineType(MachineRepresentation::kBit, MachineSemantic::kBool);
}
- constexpr static MachineType TaggedBool() {
- return MachineType(MachineRepresentation::kTagged, MachineSemantic::kBool);
- }
- constexpr static MachineType CompressedBool() {
- return MachineType(MachineRepresentation::kCompressed,
- MachineSemantic::kBool);
- }
constexpr static MachineType None() {
return MachineType(MachineRepresentation::kNone, MachineSemantic::kNone);
}
- // These naked representations should eventually go away.
- constexpr static MachineType RepWord8() {
- return MachineType(MachineRepresentation::kWord8, MachineSemantic::kNone);
- }
- constexpr static MachineType RepWord16() {
- return MachineType(MachineRepresentation::kWord16, MachineSemantic::kNone);
- }
- constexpr static MachineType RepWord32() {
- return MachineType(MachineRepresentation::kWord32, MachineSemantic::kNone);
- }
- constexpr static MachineType RepWord64() {
- return MachineType(MachineRepresentation::kWord64, MachineSemantic::kNone);
- }
- constexpr static MachineType RepFloat32() {
- return MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone);
- }
- constexpr static MachineType RepFloat64() {
- return MachineType(MachineRepresentation::kFloat64, MachineSemantic::kNone);
- }
- constexpr static MachineType RepSimd128() {
- return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
- }
- constexpr static MachineType RepTagged() {
- return MachineType(MachineRepresentation::kTagged, MachineSemantic::kNone);
- }
- constexpr static MachineType RepCompressed() {
- return MachineType(MachineRepresentation::kCompressed,
- MachineSemantic::kNone);
- }
- constexpr static MachineType RepBit() {
- return MachineType(MachineRepresentation::kBit, MachineSemantic::kNone);
- }
-
static MachineType TypeForRepresentation(const MachineRepresentation& rep,
bool isSigned = true) {
switch (rep) {
diff --git a/chromium/v8/src/codegen/mips/assembler-mips.cc b/chromium/v8/src/codegen/mips/assembler-mips.cc
index 768b16b86c4..19a514b2d9d 100644
--- a/chromium/v8/src/codegen/mips/assembler-mips.cc
+++ b/chromium/v8/src/codegen/mips/assembler-mips.cc
@@ -3568,17 +3568,20 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint8_t*>(pc_) = data;
+ pc_ += sizeof(uint8_t);
}
void Assembler::dd(uint32_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
}
void Assembler::dq(uint64_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint64_t*>(pc_) = data;
+ pc_ += sizeof(uint64_t);
}
void Assembler::dd(Label* label) {
@@ -3652,8 +3655,12 @@ void Assembler::CheckTrampolinePool() {
}
}
}
- bind(&after_pool);
+ // If unbound_labels_count_ is big enough, label after_pool will
+ // need a trampoline too, so we must create the trampoline before
+ // the bind operation to make sure function 'bind' can get this
+ // information.
trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+ bind(&after_pool);
trampoline_emitted_ = true;
// As we are only going to emit trampoline once, we need to prevent any
@@ -3794,6 +3801,7 @@ void Assembler::GenPCRelativeJumpAndLink(Register t, int32_t imm32,
addu(t, ra, t);
jalr(t);
if (bdslot == PROTECT) nop();
+ set_last_call_pc_(pc_);
}
UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
diff --git a/chromium/v8/src/codegen/mips/assembler-mips.h b/chromium/v8/src/codegen/mips/assembler-mips.h
index a414168a9f3..248bd1ac751 100644
--- a/chromium/v8/src/codegen/mips/assembler-mips.h
+++ b/chromium/v8/src/codegen/mips/assembler-mips.h
@@ -170,6 +170,35 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Unused on this architecture.
void MaybeEmitOutOfLineConstantPool() {}
+ // Mips uses BlockTrampolinePool to prevent generating trampoline inside a
+ // continuous instruction block. For Call instrution, it prevents generating
+ // trampoline between jalr and delay slot instruction. In the destructor of
+ // BlockTrampolinePool, it must check if it needs to generate trampoline
+ // immediately, if it does not do this, the branch range will go beyond the
+ // max branch offset, that means the pc_offset after call CheckTrampolinePool
+ // may be not the Call instruction's location. So we use last_call_pc here for
+ // safepoint record.
+ int pc_offset_for_safepoint() {
+#ifdef DEBUG
+ Instr instr1 =
+ instr_at(static_cast<int>(last_call_pc_ - buffer_start_ - kInstrSize));
+ Instr instr2 = instr_at(
+ static_cast<int>(last_call_pc_ - buffer_start_ - kInstrSize * 2));
+ if (GetOpcodeField(instr1) != SPECIAL) { // instr1 == jialc.
+ DCHECK(IsMipsArchVariant(kMips32r6) && GetOpcodeField(instr1) == POP76 &&
+ GetRs(instr1) == 0);
+ } else {
+ if (GetFunctionField(instr1) == SLL) { // instr1 == nop, instr2 == jalr.
+ DCHECK(GetOpcodeField(instr2) == SPECIAL &&
+ GetFunctionField(instr2) == JALR);
+ } else { // instr1 == jalr.
+ DCHECK(GetFunctionField(instr1) == JALR);
+ }
+ }
+#endif
+ return static_cast<int>(last_call_pc_ - buffer_start_);
+ }
+
// Label operations & relative jumps (PPUM Appendix D).
//
// Takes a branch opcode (cc) and a label (L) and generates
@@ -1593,6 +1622,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void GenPCRelativeJumpAndLink(Register t, int32_t imm32,
RelocInfo::Mode rmode, BranchDelaySlot bdslot);
+ void set_last_call_pc_(byte* pc) { last_call_pc_ = pc; }
+
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
@@ -1856,6 +1887,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Trampoline trampoline_;
bool internal_trampoline_exception_;
+ // Keep track of the last Call's position to ensure that safepoint can get the
+ // correct information even if there is a trampoline immediately after the
+ // Call.
+ byte* last_call_pc_;
+
private:
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
diff --git a/chromium/v8/src/codegen/mips/interface-descriptors-mips.cc b/chromium/v8/src/codegen/mips/interface-descriptors-mips.cc
index 6770ab5cce8..c092ebc2c75 100644
--- a/chromium/v8/src/codegen/mips/interface-descriptors-mips.cc
+++ b/chromium/v8/src/codegen/mips/interface-descriptors-mips.cc
@@ -39,14 +39,6 @@ void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
-void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
const Register default_stub_registers[] = {a0, a1, a2, a3, t0};
@@ -56,14 +48,6 @@ void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
default_stub_registers);
}
-void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
@@ -233,12 +217,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
@@ -338,6 +316,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/chromium/v8/src/codegen/mips/macro-assembler-mips.cc b/chromium/v8/src/codegen/mips/macro-assembler-mips.cc
index 48b2acf4562..efb2dc11e1f 100644
--- a/chromium/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/chromium/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -3906,6 +3906,7 @@ void TurboAssembler::Call(Register target, int16_t offset, Condition cond,
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
+ set_last_call_pc_(pc_);
}
// Note: To call gcc-compiled C code on mips, you must call through t9.
@@ -3938,6 +3939,7 @@ void TurboAssembler::Call(Register target, Register base, int16_t offset,
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
+ set_last_call_pc_(pc_);
}
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
@@ -5427,7 +5429,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) {
And(scratch, object, Operand(~kPageAlignmentMask));
- lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ lw(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg));
}
diff --git a/chromium/v8/src/codegen/mips64/assembler-mips64.cc b/chromium/v8/src/codegen/mips64/assembler-mips64.cc
index 751d0f8703e..3ec7bbb5e08 100644
--- a/chromium/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/chromium/v8/src/codegen/mips64/assembler-mips64.cc
@@ -3763,17 +3763,20 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint8_t*>(pc_) = data;
+ pc_ += sizeof(uint8_t);
}
void Assembler::dd(uint32_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
}
void Assembler::dq(uint64_t data) {
CheckForEmitInForbiddenSlot();
- EmitHelper(data);
+ *reinterpret_cast<uint64_t*>(pc_) = data;
+ pc_ += sizeof(uint64_t);
}
void Assembler::dd(Label* label) {
@@ -3856,8 +3859,12 @@ void Assembler::CheckTrampolinePool() {
}
}
nop();
- bind(&after_pool);
+ // If unbound_labels_count_ is big enough, label after_pool will
+ // need a trampoline too, so we must create the trampoline before
+ // the bind operation to make sure function 'bind' can get this
+ // information.
trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+ bind(&after_pool);
trampoline_emitted_ = true;
// As we are only going to emit trampoline once, we need to prevent any
diff --git a/chromium/v8/src/codegen/mips64/assembler-mips64.h b/chromium/v8/src/codegen/mips64/assembler-mips64.h
index f70e46f81b3..b5edc75676f 100644
--- a/chromium/v8/src/codegen/mips64/assembler-mips64.h
+++ b/chromium/v8/src/codegen/mips64/assembler-mips64.h
@@ -168,6 +168,35 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Unused on this architecture.
void MaybeEmitOutOfLineConstantPool() {}
+ // Mips uses BlockTrampolinePool to prevent generating trampoline inside a
+ // continuous instruction block. For Call instruction, it prevents generating
+ // trampoline between jalr and delay slot instruction. In the destructor of
+ // BlockTrampolinePool, it must check if it needs to generate trampoline
+ // immediately, if it does not do this, the branch range will go beyond the
+ // max branch offset, that means the pc_offset after call CheckTrampolinePool
+ // may be not the Call instruction's location. So we use last_call_pc here for
+ // safepoint record.
+ int pc_offset_for_safepoint() {
+#ifdef DEBUG
+ Instr instr1 =
+ instr_at(static_cast<int>(last_call_pc_ - buffer_start_ - kInstrSize));
+ Instr instr2 = instr_at(
+ static_cast<int>(last_call_pc_ - buffer_start_ - kInstrSize * 2));
+ if (GetOpcodeField(instr1) != SPECIAL) { // instr1 == jialc.
+ DCHECK((kArchVariant == kMips64r6) && GetOpcodeField(instr1) == POP76 &&
+ GetRs(instr1) == 0);
+ } else {
+ if (GetFunctionField(instr1) == SLL) { // instr1 == nop, instr2 == jalr.
+ DCHECK(GetOpcodeField(instr2) == SPECIAL &&
+ GetFunctionField(instr2) == JALR);
+ } else { // instr1 == jalr.
+ DCHECK(GetFunctionField(instr1) == JALR);
+ }
+ }
+#endif
+ return static_cast<int>(last_call_pc_ - buffer_start_);
+ }
+
// Label operations & relative jumps (PPUM Appendix D).
//
// Takes a branch opcode (cc) and a label (L) and generates
@@ -1629,6 +1658,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
}
+ void set_last_call_pc_(byte* pc) { last_call_pc_ = pc; }
+
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
@@ -1882,6 +1913,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Trampoline trampoline_;
bool internal_trampoline_exception_;
+ // Keep track of the last Call's position to ensure that safepoint can get the
+ // correct information even if there is a trampoline immediately after the
+ // Call.
+ byte* last_call_pc_;
+
RegList scratch_register_list_;
private:
diff --git a/chromium/v8/src/codegen/mips64/interface-descriptors-mips64.cc b/chromium/v8/src/codegen/mips64/interface-descriptors-mips64.cc
index 077b49fa999..00067454f1e 100644
--- a/chromium/v8/src/codegen/mips64/interface-descriptors-mips64.cc
+++ b/chromium/v8/src/codegen/mips64/interface-descriptors-mips64.cc
@@ -39,14 +39,6 @@ void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
-void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
@@ -56,14 +48,6 @@ void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
default_stub_registers);
}
-void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
@@ -233,12 +217,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // register state
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a0};
@@ -338,6 +316,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc b/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc
index a665b76e80e..785cf4aa5cc 100644
--- a/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -4235,6 +4235,7 @@ void TurboAssembler::Call(Register target, Condition cond, Register rs,
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
+ set_last_call_pc_(pc_);
}
void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
@@ -5753,7 +5754,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) {
And(scratch, object, Operand(~kPageAlignmentMask));
- Ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ Ld(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg));
}
diff --git a/chromium/v8/src/codegen/optimized-compilation-info.cc b/chromium/v8/src/codegen/optimized-compilation-info.cc
index 19f93e674e1..286f66e252b 100644
--- a/chromium/v8/src/codegen/optimized-compilation-info.cc
+++ b/chromium/v8/src/codegen/optimized-compilation-info.cc
@@ -19,7 +19,7 @@ namespace internal {
OptimizedCompilationInfo::OptimizedCompilationInfo(
Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> closure)
+ Handle<JSFunction> closure, bool native_context_independent)
: OptimizedCompilationInfo(Code::OPTIMIZED_FUNCTION, zone) {
DCHECK_EQ(*shared, closure->shared());
DCHECK(shared->is_compiled());
@@ -32,9 +32,10 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
// is active, to be able to get more precise source positions at the price of
// more memory consumption.
if (isolate->NeedsDetailedOptimizedCodeLineInfo()) {
- MarkAsSourcePositionsEnabled();
+ set_source_positions();
}
+ if (native_context_independent) set_native_context_independent();
SetTracingFlags(shared->PassesFilter(FLAG_trace_turbo_filter));
}
@@ -53,59 +54,82 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(Code::Kind code_kind,
ConfigureFlags();
}
+#ifdef DEBUG
+bool OptimizedCompilationInfo::FlagSetIsValid(Flag flag) const {
+ switch (flag) {
+ case kPoisonRegisterArguments:
+ return untrusted_code_mitigations();
+ default:
+ return true;
+ }
+ UNREACHABLE();
+}
+
+bool OptimizedCompilationInfo::FlagGetIsValid(Flag flag) const {
+ switch (flag) {
+ case kPoisonRegisterArguments:
+ if (!GetFlag(kPoisonRegisterArguments)) return true;
+ return untrusted_code_mitigations() && called_with_code_start_register();
+ default:
+ return true;
+ }
+ UNREACHABLE();
+}
+#endif // DEBUG
+
void OptimizedCompilationInfo::ConfigureFlags() {
- if (FLAG_untrusted_code_mitigations) SetFlag(kUntrustedCodeMitigations);
+ if (FLAG_untrusted_code_mitigations) set_untrusted_code_mitigations();
switch (code_kind_) {
case Code::OPTIMIZED_FUNCTION:
- SetFlag(kCalledWithCodeStartRegister);
- SetFlag(kSwitchJumpTableEnabled);
+ set_called_with_code_start_register();
+ set_switch_jump_table();
if (FLAG_function_context_specialization) {
- MarkAsFunctionContextSpecializing();
+ set_function_context_specializing();
}
if (FLAG_turbo_splitting) {
- MarkAsSplittingEnabled();
+ set_splitting();
}
if (FLAG_untrusted_code_mitigations) {
- MarkAsPoisoningRegisterArguments();
+ set_poison_register_arguments();
}
if (FLAG_analyze_environment_liveness) {
// TODO(yangguo): Disable this in case of debugging for crbug.com/826613
- MarkAsAnalyzeEnvironmentLiveness();
+ set_analyze_environment_liveness();
}
break;
case Code::BYTECODE_HANDLER:
- SetFlag(kCalledWithCodeStartRegister);
+ set_called_with_code_start_register();
if (FLAG_turbo_splitting) {
- MarkAsSplittingEnabled();
+ set_splitting();
}
break;
case Code::BUILTIN:
case Code::STUB:
if (FLAG_turbo_splitting) {
- MarkAsSplittingEnabled();
+ set_splitting();
}
#if ENABLE_GDB_JIT_INTERFACE && DEBUG
- MarkAsSourcePositionsEnabled();
+ set_source_positions();
#endif // ENABLE_GDB_JIT_INTERFACE && DEBUG
break;
case Code::WASM_FUNCTION:
case Code::WASM_TO_CAPI_FUNCTION:
- SetFlag(kSwitchJumpTableEnabled);
+ set_switch_jump_table();
break;
default:
break;
}
if (FLAG_turbo_control_flow_aware_allocation) {
- MarkAsTurboControlFlowAwareAllocation();
+ set_turbo_control_flow_aware_allocation();
} else {
- MarkAsTurboPreprocessRanges();
+ set_turbo_preprocess_ranges();
}
}
OptimizedCompilationInfo::~OptimizedCompilationInfo() {
- if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
+ if (disable_future_optimization() && has_shared_info()) {
shared_info()->DisableOptimization(bailout_reason());
}
}
@@ -134,12 +158,12 @@ void OptimizedCompilationInfo::AbortOptimization(BailoutReason reason) {
if (bailout_reason_ == BailoutReason::kNoReason) {
bailout_reason_ = reason;
}
- SetFlag(kDisableFutureOptimization);
+ set_disable_future_optimization();
}
void OptimizedCompilationInfo::RetryOptimization(BailoutReason reason) {
DCHECK_NE(reason, BailoutReason::kNoReason);
- if (GetFlag(kDisableFutureOptimization)) return;
+ if (disable_future_optimization()) return;
bailout_reason_ = reason;
}
@@ -225,11 +249,11 @@ int OptimizedCompilationInfo::AddInlinedFunction(
void OptimizedCompilationInfo::SetTracingFlags(bool passes_filter) {
if (!passes_filter) return;
- if (FLAG_trace_turbo) SetFlag(kTraceTurboJson);
- if (FLAG_trace_turbo_graph) SetFlag(kTraceTurboGraph);
- if (FLAG_trace_turbo_scheduled) SetFlag(kTraceTurboScheduled);
- if (FLAG_trace_turbo_alloc) SetFlag(kTraceTurboAllocation);
- if (FLAG_trace_heap_broker) SetFlag(kTraceHeapBroker);
+ if (FLAG_trace_turbo) set_trace_turbo_json();
+ if (FLAG_trace_turbo_graph) set_trace_turbo_graph();
+ if (FLAG_trace_turbo_scheduled) set_trace_turbo_scheduled();
+ if (FLAG_trace_turbo_alloc) set_trace_turbo_allocation();
+ if (FLAG_trace_heap_broker) set_trace_heap_broker();
}
OptimizedCompilationInfo::InlinedFunctionHolder::InlinedFunctionHolder(
diff --git a/chromium/v8/src/codegen/optimized-compilation-info.h b/chromium/v8/src/codegen/optimized-compilation-info.h
index d6d4c88c990..6a5b5631ba2 100644
--- a/chromium/v8/src/codegen/optimized-compilation-info.h
+++ b/chromium/v8/src/codegen/optimized-compilation-info.h
@@ -11,6 +11,7 @@
#include "src/codegen/source-position-table.h"
#include "src/codegen/tick-counter.h"
#include "src/common/globals.h"
+#include "src/diagnostics/basic-block-profiler.h"
#include "src/execution/frames.h"
#include "src/handles/handles.h"
#include "src/objects/objects.h"
@@ -43,35 +44,64 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
public:
// Various configuration flags for a compilation, as well as some properties
// of the compiled code produced by a compilation.
+
+#define FLAGS(V) \
+ V(FunctionContextSpecializing, function_context_specializing, 0) \
+ V(Inlining, inlining, 1) \
+ V(DisableFutureOptimization, disable_future_optimization, 2) \
+ V(Splitting, splitting, 3) \
+ V(SourcePositions, source_positions, 4) \
+ V(BailoutOnUninitialized, bailout_on_uninitialized, 5) \
+ V(LoopPeeling, loop_peeling, 6) \
+ V(UntrustedCodeMitigations, untrusted_code_mitigations, 7) \
+ V(SwitchJumpTable, switch_jump_table, 8) \
+ V(CalledWithCodeStartRegister, called_with_code_start_register, 9) \
+ V(PoisonRegisterArguments, poison_register_arguments, 10) \
+ V(AllocationFolding, allocation_folding, 11) \
+ V(AnalyzeEnvironmentLiveness, analyze_environment_liveness, 12) \
+ V(TraceTurboJson, trace_turbo_json, 13) \
+ V(TraceTurboGraph, trace_turbo_graph, 14) \
+ V(TraceTurboScheduled, trace_turbo_scheduled, 15) \
+ V(TraceTurboAllocation, trace_turbo_allocation, 16) \
+ V(TraceHeapBroker, trace_heap_broker, 17) \
+ V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 18) \
+ V(TurboControlFlowAwareAllocation, turbo_control_flow_aware_allocation, 19) \
+ V(TurboPreprocessRanges, turbo_preprocess_ranges, 20) \
+ V(ConcurrentInlining, concurrent_inlining, 21) \
+ V(NativeContextIndependent, native_context_independent, 22)
+
enum Flag {
- kFunctionContextSpecializing = 1 << 0,
- kInliningEnabled = 1 << 1,
- kDisableFutureOptimization = 1 << 2,
- kSplittingEnabled = 1 << 3,
- kSourcePositionsEnabled = 1 << 4,
- kBailoutOnUninitialized = 1 << 5,
- kLoopPeelingEnabled = 1 << 6,
- kUntrustedCodeMitigations = 1 << 7,
- kSwitchJumpTableEnabled = 1 << 8,
- kCalledWithCodeStartRegister = 1 << 9,
- kPoisonRegisterArguments = 1 << 10,
- kAllocationFoldingEnabled = 1 << 11,
- kAnalyzeEnvironmentLiveness = 1 << 12,
- kTraceTurboJson = 1 << 13,
- kTraceTurboGraph = 1 << 14,
- kTraceTurboScheduled = 1 << 15,
- kTraceTurboAllocation = 1 << 16,
- kTraceHeapBroker = 1 << 17,
- kWasmRuntimeExceptionSupport = 1 << 18,
- kTurboControlFlowAwareAllocation = 1 << 19,
- kTurboPreprocessRanges = 1 << 20,
- kConcurrentInlining = 1 << 21,
+#define DEF_ENUM(Camel, Lower, Bit) k##Camel = 1 << Bit,
+ FLAGS(DEF_ENUM)
+#undef DEF_ENUM
};
+#define DEF_GETTER(Camel, Lower, Bit) \
+ bool Lower() const { \
+ DCHECK(FlagGetIsValid(k##Camel)); \
+ return GetFlag(k##Camel); \
+ }
+ FLAGS(DEF_GETTER)
+#undef DEF_GETTER
+
+#define DEF_SETTER(Camel, Lower, Bit) \
+ void set_##Lower() { \
+ DCHECK(FlagSetIsValid(k##Camel)); \
+ SetFlag(k##Camel); \
+ }
+ FLAGS(DEF_SETTER)
+#undef DEF_SETTER
+
+#ifdef DEBUG
+ bool FlagGetIsValid(Flag flag) const;
+ bool FlagSetIsValid(Flag flag) const;
+#endif // DEBUG
+
// Construct a compilation info for optimized compilation.
OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> closure);
+ Handle<JSFunction> closure,
+ bool native_context_independent);
// Construct a compilation info for stub compilation, Wasm, and testing.
OptimizedCompilationInfo(Vector<const char> debug_name, Zone* zone,
Code::Kind code_kind);
@@ -92,38 +122,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
BailoutId osr_offset() const { return osr_offset_; }
JavaScriptFrame* osr_frame() const { return osr_frame_; }
- // Flags used by optimized compilation.
-
- void MarkAsConcurrentInlining() { SetFlag(kConcurrentInlining); }
- bool is_concurrent_inlining() const { return GetFlag(kConcurrentInlining); }
-
- void MarkAsTurboControlFlowAwareAllocation() {
- SetFlag(kTurboControlFlowAwareAllocation);
- }
- bool is_turbo_control_flow_aware_allocation() const {
- return GetFlag(kTurboControlFlowAwareAllocation);
- }
-
- void MarkAsTurboPreprocessRanges() { SetFlag(kTurboPreprocessRanges); }
- bool is_turbo_preprocess_ranges() const {
- return GetFlag(kTurboPreprocessRanges);
- }
-
- void MarkAsFunctionContextSpecializing() {
- SetFlag(kFunctionContextSpecializing);
- }
- bool is_function_context_specializing() const {
- return GetFlag(kFunctionContextSpecializing);
- }
-
- void MarkAsSourcePositionsEnabled() { SetFlag(kSourcePositionsEnabled); }
- bool is_source_positions_enabled() const {
- return GetFlag(kSourcePositionsEnabled);
- }
-
- void MarkAsInliningEnabled() { SetFlag(kInliningEnabled); }
- bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
-
void SetPoisoningMitigationLevel(PoisoningMitigationLevel poisoning_level) {
poisoning_level_ = poisoning_level;
}
@@ -131,75 +129,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
return poisoning_level_;
}
- void MarkAsSplittingEnabled() { SetFlag(kSplittingEnabled); }
- bool is_splitting_enabled() const { return GetFlag(kSplittingEnabled); }
-
- void MarkAsBailoutOnUninitialized() { SetFlag(kBailoutOnUninitialized); }
- bool is_bailout_on_uninitialized() const {
- return GetFlag(kBailoutOnUninitialized);
- }
-
- void MarkAsLoopPeelingEnabled() { SetFlag(kLoopPeelingEnabled); }
- bool is_loop_peeling_enabled() const { return GetFlag(kLoopPeelingEnabled); }
-
- bool has_untrusted_code_mitigations() const {
- return GetFlag(kUntrustedCodeMitigations);
- }
-
- bool switch_jump_table_enabled() const {
- return GetFlag(kSwitchJumpTableEnabled);
- }
-
- bool called_with_code_start_register() const {
- bool enabled = GetFlag(kCalledWithCodeStartRegister);
- return enabled;
- }
-
- void MarkAsPoisoningRegisterArguments() {
- DCHECK(has_untrusted_code_mitigations());
- SetFlag(kPoisonRegisterArguments);
- }
- bool is_poisoning_register_arguments() const {
- bool enabled = GetFlag(kPoisonRegisterArguments);
- DCHECK_IMPLIES(enabled, has_untrusted_code_mitigations());
- DCHECK_IMPLIES(enabled, called_with_code_start_register());
- return enabled;
- }
-
- void MarkAsAllocationFoldingEnabled() { SetFlag(kAllocationFoldingEnabled); }
- bool is_allocation_folding_enabled() const {
- return GetFlag(kAllocationFoldingEnabled);
- }
-
- void MarkAsAnalyzeEnvironmentLiveness() {
- SetFlag(kAnalyzeEnvironmentLiveness);
- }
- bool is_analyze_environment_liveness() const {
- return GetFlag(kAnalyzeEnvironmentLiveness);
- }
-
- void SetWasmRuntimeExceptionSupport() {
- SetFlag(kWasmRuntimeExceptionSupport);
- }
-
- bool wasm_runtime_exception_support() {
- return GetFlag(kWasmRuntimeExceptionSupport);
- }
-
- bool trace_turbo_json_enabled() const { return GetFlag(kTraceTurboJson); }
-
- bool trace_turbo_graph_enabled() const { return GetFlag(kTraceTurboGraph); }
-
- bool trace_turbo_allocation_enabled() const {
- return GetFlag(kTraceTurboAllocation);
- }
-
- bool trace_turbo_scheduled_enabled() const {
- return GetFlag(kTraceTurboScheduled);
- }
-
- bool trace_heap_broker_enabled() const { return GetFlag(kTraceHeapBroker); }
-
// Code getters and setters.
void SetCode(Handle<Code> code) { code_ = code; }
@@ -239,10 +168,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
BailoutReason bailout_reason() const { return bailout_reason_; }
- bool is_disable_future_optimization() const {
- return GetFlag(kDisableFutureOptimization);
- }
-
int optimization_id() const {
DCHECK(IsOptimizing());
return optimization_id_;
@@ -290,6 +215,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
TickCounter& tick_counter() { return tick_counter_; }
+ BasicBlockProfilerData* profiler_data() const { return profiler_data_; }
+ void set_profiler_data(BasicBlockProfilerData* profiler_data) {
+ profiler_data_ = profiler_data;
+ }
+
private:
OptimizedCompilationInfo(Code::Kind code_kind, Zone* zone);
void ConfigureFlags();
@@ -318,6 +248,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// The compiled code.
Handle<Code> code_;
+ // Basic block profiling support.
+ BasicBlockProfilerData* profiler_data_ = nullptr;
+
// The WebAssembly compilation result, not published in the NativeModule yet.
std::unique_ptr<wasm::WasmCompilationResult> wasm_compilation_result_;
diff --git a/chromium/v8/src/codegen/ppc/assembler-ppc.cc b/chromium/v8/src/codegen/ppc/assembler-ppc.cc
index b9f09e23f23..62e33bba369 100644
--- a/chromium/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/chromium/v8/src/codegen/ppc/assembler-ppc.cc
@@ -1758,31 +1758,21 @@ void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
}
// Vector instructions
-void Assembler::mfvsrd(const Register ra, const DoubleRegister rs) {
+void Assembler::mfvsrd(const Register ra, const Simd128Register rs) {
int SX = 1;
emit(MFVSRD | rs.code() * B21 | ra.code() * B16 | SX);
}
-void Assembler::mfvsrwz(const Register ra, const DoubleRegister rs) {
+void Assembler::mfvsrwz(const Register ra, const Simd128Register rs) {
int SX = 1;
emit(MFVSRWZ | rs.code() * B21 | ra.code() * B16 | SX);
}
-void Assembler::mtvsrd(const DoubleRegister rt, const Register ra) {
+void Assembler::mtvsrd(const Simd128Register rt, const Register ra) {
int TX = 1;
emit(MTVSRD | rt.code() * B21 | ra.code() * B16 | TX);
}
-void Assembler::vor(const DoubleRegister rt, const DoubleRegister ra,
- const DoubleRegister rb) {
- emit(VOR | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
-}
-
-void Assembler::vsro(const DoubleRegister rt, const DoubleRegister ra,
- const DoubleRegister rb) {
- emit(VSRO | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
-}
-
// Pseudo instructions.
void Assembler::nop(int type) {
Register reg = r0;
diff --git a/chromium/v8/src/codegen/ppc/assembler-ppc.h b/chromium/v8/src/codegen/ppc/assembler-ppc.h
index 778e94c1859..d8f1d8ef20d 100644
--- a/chromium/v8/src/codegen/ppc/assembler-ppc.h
+++ b/chromium/v8/src/codegen/ppc/assembler-ppc.h
@@ -435,9 +435,10 @@ class Assembler : public AssemblerBase {
inline void xx3_form(Instr instr, DoubleRegister t, DoubleRegister a,
DoubleRegister b) {
- int AX = ((a.code() & 0x20) >> 5) & 0x1;
- int BX = ((b.code() & 0x20) >> 5) & 0x1;
- int TX = ((t.code() & 0x20) >> 5) & 0x1;
+ // Using VR (high VSR) registers.
+ int AX = 1;
+ int BX = 1;
+ int TX = 1;
emit(instr | (t.code() & 0x1F) * B21 | (a.code() & 0x1F) * B16 |
(b.code() & 0x1F) * B11 | AX * B2 | BX * B1 | TX);
@@ -447,18 +448,59 @@ class Assembler : public AssemblerBase {
#undef DECLARE_PPC_XX3_INSTRUCTIONS
#define DECLARE_PPC_VX_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
- inline void name(const DoubleRegister rt, const DoubleRegister rb, \
+ inline void name(const Simd128Register rt, const Simd128Register rb, \
const Operand& imm) { \
vx_form(instr_name, rt, rb, imm); \
}
+#define DECLARE_PPC_VX_INSTRUCTIONS_B_FORM(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Simd128Register ra, \
+ const Simd128Register rb) { \
+ vx_form(instr_name, rt, ra, rb); \
+ }
- inline void vx_form(Instr instr, DoubleRegister rt, DoubleRegister rb,
+ inline void vx_form(Instr instr, Simd128Register rt, Simd128Register rb,
const Operand& imm) {
emit(instr | rt.code() * B21 | imm.immediate() * B16 | rb.code() * B11);
}
+ inline void vx_form(Instr instr, Simd128Register rt, Simd128Register ra,
+ Simd128Register rb) {
+ emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
+ }
PPC_VX_OPCODE_A_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_A_FORM)
+ PPC_VX_OPCODE_B_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_B_FORM)
#undef DECLARE_PPC_VX_INSTRUCTIONS_A_FORM
+#undef DECLARE_PPC_VX_INSTRUCTIONS_B_FORM
+
+#define DECLARE_PPC_VA_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Simd128Register ra, \
+ const Simd128Register rb, const Simd128Register rc) { \
+ va_form(instr_name, rt, ra, rb, rc); \
+ }
+
+ inline void va_form(Instr instr, Simd128Register rt, Simd128Register ra,
+ Simd128Register rb, Simd128Register rc) {
+ emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ rc.code() * B6);
+ }
+
+ PPC_VA_OPCODE_A_FORM_LIST(DECLARE_PPC_VA_INSTRUCTIONS_A_FORM)
+#undef DECLARE_PPC_VA_INSTRUCTIONS_A_FORM
+
+#define DECLARE_PPC_VC_INSTRUCTIONS(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Simd128Register ra, \
+ const Simd128Register rb, const RCBit rc = LeaveRC) { \
+ vc_form(instr_name, rt, ra, rb, rc); \
+ }
+
+ inline void vc_form(Instr instr, Simd128Register rt, Simd128Register ra,
+ Simd128Register rb, int rc) {
+ emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+ rc * B10);
+ }
+
+ PPC_VC_OPCODE_LIST(DECLARE_PPC_VC_INSTRUCTIONS)
+#undef DECLARE_PPC_VC_INSTRUCTIONS
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
// ---------------------------------------------------------------------------
@@ -947,13 +989,9 @@ class Assembler : public AssemblerBase {
RCBit rc = LeaveRC);
// Vector instructions
- void mfvsrd(const Register ra, const DoubleRegister r);
- void mfvsrwz(const Register ra, const DoubleRegister r);
- void mtvsrd(const DoubleRegister rt, const Register ra);
- void vor(const DoubleRegister rt, const DoubleRegister ra,
- const DoubleRegister rb);
- void vsro(const DoubleRegister rt, const DoubleRegister ra,
- const DoubleRegister rb);
+ void mfvsrd(const Register ra, const Simd128Register r);
+ void mfvsrwz(const Register ra, const Simd128Register r);
+ void mtvsrd(const Simd128Register rt, const Register ra);
// Pseudo instructions
diff --git a/chromium/v8/src/codegen/ppc/constants-ppc.h b/chromium/v8/src/codegen/ppc/constants-ppc.h
index b75c3e32576..f784bef54b8 100644
--- a/chromium/v8/src/codegen/ppc/constants-ppc.h
+++ b/chromium/v8/src/codegen/ppc/constants-ppc.h
@@ -1707,8 +1707,6 @@ using Instr = uint32_t;
V(stvewx, STVEWX, 0x7C00018E) \
/* Store Vector Indexed Last */ \
V(stvxl, STVXL, 0x7C0003CE) \
- /* Vector Minimum Signed Doubleword */ \
- V(vminsd, VMINSD, 0x100003C2) \
/* Floating Merge Even Word */ \
V(fmrgew, FMRGEW, 0xFC00078C) \
/* Floating Merge Odd Word */ \
@@ -1920,7 +1918,15 @@ using Instr = uint32_t;
/* Floating Reciprocal Square Root Estimate Single */ \
V(frsqrtes, FRSQRTES, 0xEC000034)
-#define PPC_VA_OPCODE_LIST(V) \
+#define PPC_VA_OPCODE_A_FORM_LIST(V) \
+ /* Vector Permute */ \
+ V(vperm, VPERM, 0x1000002B) \
+ /* Vector Multiply-Low-Add Unsigned Halfword Modulo */ \
+ V(vmladduhm, VMLADDUHM, 0x10000022) \
+ /* Vector Select */ \
+ V(vsel, VSEL, 0x1000002A)
+
+#define PPC_VA_OPCODE_UNUSED_LIST(V) \
/* Vector Add Extended & write Carry Unsigned Quadword */ \
V(vaddecuq, VADDECUQ, 0x1000003D) \
/* Vector Add Extended Unsigned Quadword Modulo */ \
@@ -1931,8 +1937,6 @@ using Instr = uint32_t;
V(vmhaddshs, VMHADDSHS, 0x10000020) \
/* Vector Multiply-High-Round-Add Signed Halfword Saturate */ \
V(vmhraddshs, VMHRADDSHS, 0x10000021) \
- /* Vector Multiply-Low-Add Unsigned Halfword Modulo */ \
- V(vmladduhm, VMLADDUHM, 0x10000022) \
/* Vector Multiply-Sum Mixed Byte Modulo */ \
V(vmsummbm, VMSUMMBM, 0x10000025) \
/* Vector Multiply-Sum Signed Halfword Modulo */ \
@@ -1947,10 +1951,6 @@ using Instr = uint32_t;
V(vmsumuhs, VMSUMUHS, 0x10000027) \
/* Vector Negative Multiply-Subtract Single-Precision */ \
V(vnmsubfp, VNMSUBFP, 0x1000002F) \
- /* Vector Permute */ \
- V(vperm, VPERM, 0x1000002B) \
- /* Vector Select */ \
- V(vsel, VSEL, 0x1000002A) \
/* Vector Shift Left Double by Octet Immediate */ \
V(vsldoi, VSLDOI, 0x1000002C) \
/* Vector Subtract Extended & write Carry Unsigned Quadword */ \
@@ -1960,6 +1960,10 @@ using Instr = uint32_t;
/* Vector Permute and Exclusive-OR */ \
V(vpermxor, VPERMXOR, 0x1000002D)
+#define PPC_VA_OPCODE_LIST(V) \
+ PPC_VA_OPCODE_A_FORM_LIST(V) \
+ PPC_VA_OPCODE_UNUSED_LIST(V)
+
#define PPC_XX1_OPCODE_LIST(V) \
/* Load VSR Scalar Doubleword Indexed */ \
V(lxsdx, LXSDX, 0x7C000498) \
@@ -2200,6 +2204,112 @@ using Instr = uint32_t;
/* Vector Splat Halfword */ \
V(vsplth, VSPLTH, 0x1000024C)
+#define PPC_VX_OPCODE_B_FORM_LIST(V) \
+ /* Vector Logical OR */ \
+ V(vor, VOR, 0x10000484) \
+ /* Vector Logical XOR */ \
+ V(vxor, VXOR, 0x100004C4) \
+ /* Vector Logical NOR */ \
+ V(vnor, VNOR, 0x10000504) \
+ /* Vector Shift Right by Octet */ \
+ V(vsro, VSRO, 0x1000044C) \
+ /* Vector Shift Left by Octet */ \
+ V(vslo, VSLO, 0x1000040C) \
+ /* Vector Add Unsigned Doubleword Modulo */ \
+ V(vaddudm, VADDUDM, 0x100000C0) \
+ /* Vector Add Unsigned Word Modulo */ \
+ V(vadduwm, VADDUWM, 0x10000080) \
+ /* Vector Add Unsigned Halfword Modulo */ \
+ V(vadduhm, VADDUHM, 0x10000040) \
+ /* Vector Add Unsigned Byte Modulo */ \
+ V(vaddubm, VADDUBM, 0x10000000) \
+ /* Vector Add Single-Precision */ \
+ V(vaddfp, VADDFP, 0x1000000A) \
+ /* Vector Subtract Single-Precision */ \
+ V(vsubfp, VSUBFP, 0x1000004A) \
+ /* Vector Subtract Unsigned Doubleword Modulo */ \
+ V(vsubudm, VSUBUDM, 0x100004C0) \
+ /* Vector Subtract Unsigned Word Modulo */ \
+ V(vsubuwm, VSUBUWM, 0x10000480) \
+ /* Vector Subtract Unsigned Halfword Modulo */ \
+ V(vsubuhm, VSUBUHM, 0x10000440) \
+ /* Vector Subtract Unsigned Byte Modulo */ \
+ V(vsububm, VSUBUBM, 0x10000400) \
+ /* Vector Multiply Unsigned Word Modulo */ \
+ V(vmuluwm, VMULUWM, 0x10000089) \
+ /* Vector Pack Unsigned Halfword Unsigned Modulo */ \
+ V(vpkuhum, VPKUHUM, 0x1000000E) \
+ /* Vector Multiply Even Unsigned Byte */ \
+ V(vmuleub, VMULEUB, 0x10000208) \
+ /* Vector Multiply Odd Unsigned Byte */ \
+ V(vmuloub, VMULOUB, 0x10000008) \
+ /* Vector Sum across Quarter Signed Halfword Saturate */ \
+ V(vsum4shs, VSUM4SHS, 0x10000648) \
+ /* Vector Pack Unsigned Word Unsigned Saturate */ \
+ V(vpkuwus, VPKUWUS, 0x100000CE) \
+ /* Vector Sum across Half Signed Word Saturate */ \
+ V(vsum2sws, VSUM2SWS, 0x10000688) \
+ /* Vector Pack Unsigned Doubleword Unsigned Modulo */ \
+ V(vpkudum, VPKUDUM, 0x1000044E) \
+ /* Vector Maximum Signed Byte */ \
+ V(vmaxsb, VMAXSB, 0x10000102) \
+ /* Vector Maximum Unsigned Byte */ \
+ V(vmaxub, VMAXUB, 0x10000002) \
+ /* Vector Maximum Signed Doubleword */ \
+ V(vmaxsd, VMAXSD, 0x100001C2) \
+ /* Vector Maximum Unsigned Doubleword */ \
+ V(vmaxud, VMAXUD, 0x100000C2) \
+ /* Vector Maximum Signed Halfword */ \
+ V(vmaxsh, VMAXSH, 0x10000142) \
+ /* Vector Maximum Unsigned Halfword */ \
+ V(vmaxuh, VMAXUH, 0x10000042) \
+ /* Vector Maximum Signed Word */ \
+ V(vmaxsw, VMAXSW, 0x10000182) \
+ /* Vector Maximum Unsigned Word */ \
+ V(vmaxuw, VMAXUW, 0x10000082) \
+ /* Vector Minimum Signed Byte */ \
+ V(vminsb, VMINSB, 0x10000302) \
+ /* Vector Minimum Unsigned Byte */ \
+ V(vminub, VMINUB, 0x10000202) \
+ /* Vector Minimum Signed Doubleword */ \
+ V(vminsd, VMINSD, 0x100003C2) \
+ /* Vector Minimum Unsigned Doubleword */ \
+ V(vminud, VMINUD, 0x100002C2) \
+ /* Vector Minimum Signed Halfword */ \
+ V(vminsh, VMINSH, 0x10000342) \
+ /* Vector Minimum Unsigned Halfword */ \
+ V(vminuh, VMINUH, 0x10000242) \
+ /* Vector Minimum Signed Word */ \
+ V(vminsw, VMINSW, 0x10000382) \
+ /* Vector Minimum Unsigned Word */ \
+ V(vminuw, VMINUW, 0x10000282) \
+ /* Vector Shift Left Byte */ \
+ V(vslb, VSLB, 0x10000104) \
+ /* Vector Shift Left Word */ \
+ V(vslw, VSLW, 0x10000184) \
+ /* Vector Shift Left Halfword */ \
+ V(vslh, VSLH, 0x10000144) \
+ /* Vector Shift Left Doubleword */ \
+ V(vsld, VSLD, 0x100005C4) \
+ /* Vector Shift Right Byte */ \
+ V(vsrb, VSRB, 0x10000204) \
+ /* Vector Shift Right Word */ \
+ V(vsrw, VSRW, 0x10000284) \
+ /* Vector Shift Right Halfword */ \
+ V(vsrh, VSRH, 0x10000244) \
+ /* Vector Shift Right Doubleword */ \
+ V(vsrd, VSRD, 0x100006C4) \
+ /* Vector Shift Right Algebraic Byte */ \
+ V(vsrab, VSRAB, 0x10000304) \
+ /* Vector Shift Right Algebraic Word */ \
+ V(vsraw, VSRAW, 0x10000384) \
+ /* Vector Shift Right Algebraic Halfword */ \
+ V(vsrah, VSRAH, 0x10000344) \
+ /* Vector Shift Right Algebraic Doubleword */ \
+ V(vsrad, VSRAD, 0x100003C4) \
+ /* Vector Logical AND */ \
+ V(vand, VAND, 0x10000404)
+
#define PPC_VX_OPCODE_UNUSED_LIST(V) \
/* Decimal Add Modulo */ \
V(bcdadd, BCDADD, 0xF0000400) \
@@ -2213,32 +2323,20 @@ using Instr = uint32_t;
V(vaddcuq, VADDCUQ, 0x10000140) \
/* Vector Add and Write Carry-Out Unsigned Word */ \
V(vaddcuw, VADDCUW, 0x10000180) \
- /* Vector Add Single-Precision */ \
- V(vaddfp, VADDFP, 0x1000000A) \
/* Vector Add Signed Byte Saturate */ \
V(vaddsbs, VADDSBS, 0x10000300) \
/* Vector Add Signed Halfword Saturate */ \
V(vaddshs, VADDSHS, 0x10000340) \
/* Vector Add Signed Word Saturate */ \
V(vaddsws, VADDSWS, 0x10000380) \
- /* Vector Add Unsigned Byte Modulo */ \
- V(vaddubm, VADDUBM, 0x10000000) \
/* Vector Add Unsigned Byte Saturate */ \
V(vaddubs, VADDUBS, 0x10000200) \
- /* Vector Add Unsigned Doubleword Modulo */ \
- V(vaddudm, VADDUDM, 0x100000C0) \
- /* Vector Add Unsigned Halfword Modulo */ \
- V(vadduhm, VADDUHM, 0x10000040) \
/* Vector Add Unsigned Halfword Saturate */ \
V(vadduhs, VADDUHS, 0x10000240) \
/* Vector Add Unsigned Quadword Modulo */ \
V(vadduqm, VADDUQM, 0x10000100) \
- /* Vector Add Unsigned Word Modulo */ \
- V(vadduwm, VADDUWM, 0x10000080) \
/* Vector Add Unsigned Word Saturate */ \
V(vadduws, VADDUWS, 0x10000280) \
- /* Vector Logical AND */ \
- V(vand, VAND, 0x10000404) \
/* Vector Logical AND with Complement */ \
V(vandc, VANDC, 0x10000444) \
/* Vector Average Signed Byte */ \
@@ -2283,38 +2381,8 @@ using Instr = uint32_t;
V(vlogefp, VLOGEFP, 0x100001CA) \
/* Vector Maximum Single-Precision */ \
V(vmaxfp, VMAXFP, 0x1000040A) \
- /* Vector Maximum Signed Byte */ \
- V(vmaxsb, VMAXSB, 0x10000102) \
- /* Vector Maximum Signed Doubleword */ \
- V(vmaxsd, VMAXSD, 0x100001C2) \
- /* Vector Maximum Signed Halfword */ \
- V(vmaxsh, VMAXSH, 0x10000142) \
- /* Vector Maximum Signed Word */ \
- V(vmaxsw, VMAXSW, 0x10000182) \
- /* Vector Maximum Unsigned Byte */ \
- V(vmaxub, VMAXUB, 0x10000002) \
- /* Vector Maximum Unsigned Doubleword */ \
- V(vmaxud, VMAXUD, 0x100000C2) \
- /* Vector Maximum Unsigned Halfword */ \
- V(vmaxuh, VMAXUH, 0x10000042) \
- /* Vector Maximum Unsigned Word */ \
- V(vmaxuw, VMAXUW, 0x10000082) \
/* Vector Minimum Single-Precision */ \
V(vminfp, VMINFP, 0x1000044A) \
- /* Vector Minimum Signed Byte */ \
- V(vminsb, VMINSB, 0x10000302) \
- /* Vector Minimum Signed Halfword */ \
- V(vminsh, VMINSH, 0x10000342) \
- /* Vector Minimum Signed Word */ \
- V(vminsw, VMINSW, 0x10000382) \
- /* Vector Minimum Unsigned Byte */ \
- V(vminub, VMINUB, 0x10000202) \
- /* Vector Minimum Unsigned Doubleword */ \
- V(vminud, VMINUD, 0x100002C2) \
- /* Vector Minimum Unsigned Halfword */ \
- V(vminuh, VMINUH, 0x10000242) \
- /* Vector Minimum Unsigned Word */ \
- V(vminuw, VMINUW, 0x10000282) \
/* Vector Merge High Byte */ \
V(vmrghb, VMRGHB, 0x1000000C) \
/* Vector Merge High Halfword */ \
@@ -2333,8 +2401,6 @@ using Instr = uint32_t;
V(vmulesh, VMULESH, 0x10000348) \
/* Vector Multiply Even Signed Word */ \
V(vmulesw, VMULESW, 0x10000388) \
- /* Vector Multiply Even Unsigned Byte */ \
- V(vmuleub, VMULEUB, 0x10000208) \
/* Vector Multiply Even Unsigned Halfword */ \
V(vmuleuh, VMULEUH, 0x10000248) \
/* Vector Multiply Even Unsigned Word */ \
@@ -2345,20 +2411,12 @@ using Instr = uint32_t;
V(vmulosh, VMULOSH, 0x10000148) \
/* Vector Multiply Odd Signed Word */ \
V(vmulosw, VMULOSW, 0x10000188) \
- /* Vector Multiply Odd Unsigned Byte */ \
- V(vmuloub, VMULOUB, 0x10000008) \
/* Vector Multiply Odd Unsigned Halfword */ \
V(vmulouh, VMULOUH, 0x10000048) \
/* Vector Multiply Odd Unsigned Word */ \
V(vmulouw, VMULOUW, 0x10000088) \
- /* Vector Multiply Unsigned Word Modulo */ \
- V(vmuluwm, VMULUWM, 0x10000089) \
/* Vector NAND */ \
V(vnand, VNAND, 0x10000584) \
- /* Vector Logical NOR */ \
- V(vnor, VNOR, 0x10000504) \
- /* Vector Logical OR */ \
- V(vor, VOR, 0x10000484) \
/* Vector OR with Complement */ \
V(vorc, VORC, 0x10000544) \
/* Vector Pack Pixel */ \
@@ -2375,18 +2433,12 @@ using Instr = uint32_t;
V(vpkswss, VPKSWSS, 0x100001CE) \
/* Vector Pack Signed Word Unsigned Saturate */ \
V(vpkswus, VPKSWUS, 0x1000014E) \
- /* Vector Pack Unsigned Doubleword Unsigned Modulo */ \
- V(vpkudum, VPKUDUM, 0x1000044E) \
/* Vector Pack Unsigned Doubleword Unsigned Saturate */ \
V(vpkudus, VPKUDUS, 0x100004CE) \
- /* Vector Pack Unsigned Halfword Unsigned Modulo */ \
- V(vpkuhum, VPKUHUM, 0x1000000E) \
/* Vector Pack Unsigned Halfword Unsigned Saturate */ \
V(vpkuhus, VPKUHUS, 0x1000008E) \
/* Vector Pack Unsigned Word Unsigned Modulo */ \
V(vpkuwum, VPKUWUM, 0x1000004E) \
- /* Vector Pack Unsigned Word Unsigned Saturate */ \
- V(vpkuwus, VPKUWUS, 0x100000CE) \
/* Vector Polynomial Multiply-Sum Byte */ \
V(vpmsumb, VPMSUMB, 0x10000408) \
/* Vector Polynomial Multiply-Sum Doubleword */ \
@@ -2425,16 +2477,6 @@ using Instr = uint32_t;
V(vrsqrtefp, VRSQRTEFP, 0x1000014A) \
/* Vector Shift Left */ \
V(vsl, VSL, 0x100001C4) \
- /* Vector Shift Left Byte */ \
- V(vslb, VSLB, 0x10000104) \
- /* Vector Shift Left Doubleword */ \
- V(vsld, VSLD, 0x100005C4) \
- /* Vector Shift Left Halfword */ \
- V(vslh, VSLH, 0x10000144) \
- /* Vector Shift Left by Octet */ \
- V(vslo, VSLO, 0x1000040C) \
- /* Vector Shift Left Word */ \
- V(vslw, VSLW, 0x10000184) \
/* Vector Splat Immediate Signed Byte */ \
V(vspltisb, VSPLTISB, 0x1000030C) \
/* Vector Splat Immediate Signed Halfword */ \
@@ -2443,58 +2485,26 @@ using Instr = uint32_t;
V(vspltisw, VSPLTISW, 0x1000038C) \
/* Vector Shift Right */ \
V(vsr, VSR, 0x100002C4) \
- /* Vector Shift Right Algebraic Byte */ \
- V(vsrab, VSRAB, 0x10000304) \
- /* Vector Shift Right Algebraic Doubleword */ \
- V(vsrad, VSRAD, 0x100003C4) \
- /* Vector Shift Right Algebraic Halfword */ \
- V(vsrah, VSRAH, 0x10000344) \
- /* Vector Shift Right Algebraic Word */ \
- V(vsraw, VSRAW, 0x10000384) \
- /* Vector Shift Right Byte */ \
- V(vsrb, VSRB, 0x10000204) \
- /* Vector Shift Right Doubleword */ \
- V(vsrd, VSRD, 0x100006C4) \
- /* Vector Shift Right Halfword */ \
- V(vsrh, VSRH, 0x10000244) \
- /* Vector Shift Right by Octet */ \
- V(vsro, VSRO, 0x1000044C) \
- /* Vector Shift Right Word */ \
- V(vsrw, VSRW, 0x10000284) \
/* Vector Subtract & write Carry Unsigned Quadword */ \
V(vsubcuq, VSUBCUQ, 0x10000540) \
/* Vector Subtract and Write Carry-Out Unsigned Word */ \
V(vsubcuw, VSUBCUW, 0x10000580) \
- /* Vector Subtract Single-Precision */ \
- V(vsubfp, VSUBFP, 0x1000004A) \
/* Vector Subtract Signed Byte Saturate */ \
V(vsubsbs, VSUBSBS, 0x10000700) \
/* Vector Subtract Signed Halfword Saturate */ \
V(vsubshs, VSUBSHS, 0x10000740) \
/* Vector Subtract Signed Word Saturate */ \
V(vsubsws, VSUBSWS, 0x10000780) \
- /* Vector Subtract Unsigned Byte Modulo */ \
- V(vsububm, VSUBUBM, 0x10000400) \
/* Vector Subtract Unsigned Byte Saturate */ \
V(vsububs, VSUBUBS, 0x10000600) \
- /* Vector Subtract Unsigned Doubleword Modulo */ \
- V(vsubudm, VSUBUDM, 0x100004C0) \
- /* Vector Subtract Unsigned Halfword Modulo */ \
- V(vsubuhm, VSUBUHM, 0x10000440) \
/* Vector Subtract Unsigned Halfword Saturate */ \
V(vsubuhs, VSUBUHS, 0x10000640) \
/* Vector Subtract Unsigned Quadword Modulo */ \
V(vsubuqm, VSUBUQM, 0x10000500) \
- /* Vector Subtract Unsigned Word Modulo */ \
- V(vsubuwm, VSUBUWM, 0x10000480) \
/* Vector Subtract Unsigned Word Saturate */ \
V(vsubuws, VSUBUWS, 0x10000680) \
- /* Vector Sum across Half Signed Word Saturate */ \
- V(vsum2sws, VSUM2SWS, 0x10000688) \
/* Vector Sum across Quarter Signed Byte Saturate */ \
V(vsum4sbs, VSUM4SBS, 0x10000708) \
- /* Vector Sum across Quarter Signed Halfword Saturate */ \
- V(vsum4shs, VSUM4SHS, 0x10000648) \
/* Vector Sum across Quarter Unsigned Byte Saturate */ \
V(vsum4bus, VSUM4BUS, 0x10000608) \
/* Vector Sum across Signed Word Saturate */ \
@@ -2515,8 +2525,6 @@ using Instr = uint32_t;
V(vupklsh, VUPKLSH, 0x100002CE) \
/* Vector Unpack Low Signed Word */ \
V(vupklsw, VUPKLSW, 0x100006CE) \
- /* Vector Logical XOR */ \
- V(vxor, VXOR, 0x100004C4) \
/* Vector AES Cipher */ \
V(vcipher, VCIPHER, 0x10000508) \
/* Vector AES Cipher Last */ \
@@ -2538,6 +2546,7 @@ using Instr = uint32_t;
#define PPC_VX_OPCODE_LIST(V) \
PPC_VX_OPCODE_A_FORM_LIST(V) \
+ PPC_VX_OPCODE_B_FORM_LIST(V) \
PPC_VX_OPCODE_UNUSED_LIST(V)
#define PPC_XS_OPCODE_LIST(V) \
diff --git a/chromium/v8/src/codegen/ppc/interface-descriptors-ppc.cc b/chromium/v8/src/codegen/ppc/interface-descriptors-ppc.cc
index cd0ab1a3281..65f574d1b30 100644
--- a/chromium/v8/src/codegen/ppc/interface-descriptors-ppc.cc
+++ b/chromium/v8/src/codegen/ppc/interface-descriptors-ppc.cc
@@ -191,11 +191,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3};
@@ -295,6 +290,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc b/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 3cf819f1028..14ed9682275 100644
--- a/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -2099,7 +2099,7 @@ void TurboAssembler::CheckPageFlag(
int mask, Condition cc, Label* condition_met) {
DCHECK(cc == ne || cc == eq);
ClearRightImm(scratch, object, Operand(kPageSizeBits));
- LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ LoadP(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
mov(r0, Operand(mask));
and_(r0, scratch, r0, SetRC);
diff --git a/chromium/v8/src/codegen/register.h b/chromium/v8/src/codegen/register.h
index bf499668bb1..2dcf0fbe8fd 100644
--- a/chromium/v8/src/codegen/register.h
+++ b/chromium/v8/src/codegen/register.h
@@ -5,6 +5,7 @@
#ifndef V8_CODEGEN_REGISTER_H_
#define V8_CODEGEN_REGISTER_H_
+#include "src/base/bounds.h"
#include "src/codegen/reglist.h"
namespace v8 {
@@ -32,10 +33,7 @@ class RegisterBase {
static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }
static constexpr SubType from_code(int code) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_LE(0, code);
- DCHECK_GT(kNumRegisters, code);
-#endif
+ CONSTEXPR_DCHECK(base::IsInRange(code, 0, kNumRegisters - 1));
return SubType{code};
}
@@ -47,9 +45,7 @@ class RegisterBase {
constexpr bool is_valid() const { return reg_code_ != kCode_no_reg; }
constexpr int code() const {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(is_valid());
-#endif
+ CONSTEXPR_DCHECK(is_valid());
return reg_code_;
}
diff --git a/chromium/v8/src/codegen/reloc-info.h b/chromium/v8/src/codegen/reloc-info.h
index 50ce001103e..a4ea9b1ee90 100644
--- a/chromium/v8/src/codegen/reloc-info.h
+++ b/chromium/v8/src/codegen/reloc-info.h
@@ -54,6 +54,8 @@ class RelocInfo {
// Please note the order is important (see IsRealRelocMode, IsGCRelocMode,
// and IsShareableRelocMode predicates below).
+ NONE, // Never recorded value. Most common one, hence value 0.
+
CODE_TARGET,
RELATIVE_CODE_TARGET, // LAST_CODE_TARGET_MODE
COMPRESSED_EMBEDDED_OBJECT,
@@ -89,7 +91,6 @@ class RelocInfo {
// Pseudo-types
NUMBER_OF_MODES,
- NONE, // never recorded value
LAST_CODE_TARGET_MODE = RELATIVE_CODE_TARGET,
FIRST_REAL_RELOC_MODE = CODE_TARGET,
@@ -123,10 +124,8 @@ class RelocInfo {
return mode <= LAST_GCED_ENUM;
}
static constexpr bool IsShareableRelocMode(Mode mode) {
- static_assert(RelocInfo::NONE >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE,
- "Users of this function rely on NONE being a sharable "
- "relocation mode.");
- return mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE;
+ return mode == RelocInfo::NONE ||
+ mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE;
}
static constexpr bool IsCodeTarget(Mode mode) { return mode == CODE_TARGET; }
static constexpr bool IsCodeTargetMode(Mode mode) {
diff --git a/chromium/v8/src/codegen/s390/constants-s390.h b/chromium/v8/src/codegen/s390/constants-s390.h
index 6cd5e4d9faf..5c524350518 100644
--- a/chromium/v8/src/codegen/s390/constants-s390.h
+++ b/chromium/v8/src/codegen/s390/constants-s390.h
@@ -567,11 +567,12 @@ using SixByteInstr = uint64_t;
V(va, VA, 0xE7F3) /* type = VRR_C VECTOR ADD */ \
V(vscbi, VSCBI, \
0xE7F5) /* type = VRR_C VECTOR SUBTRACT COMPUTE BORROW INDICATION */ \
- V(vs, VS, 0xE7F7) /* type = VRR_C VECTOR SUBTRACT */ \
- V(vmnl, VMNL, 0xE7FC) /* type = VRR_C VECTOR MINIMUM LOGICAL */ \
- V(vmxl, VMXL, 0xE7FD) /* type = VRR_C VECTOR MAXIMUM LOGICAL */ \
- V(vmn, VMN, 0xE7FE) /* type = VRR_C VECTOR MINIMUM */ \
- V(vmx, VMX, 0xE7FF) /* type = VRR_C VECTOR MAXIMUM */
+ V(vs, VS, 0xE7F7) /* type = VRR_C VECTOR SUBTRACT */ \
+ V(vmnl, VMNL, 0xE7FC) /* type = VRR_C VECTOR MINIMUM LOGICAL */ \
+ V(vmxl, VMXL, 0xE7FD) /* type = VRR_C VECTOR MAXIMUM LOGICAL */ \
+ V(vmn, VMN, 0xE7FE) /* type = VRR_C VECTOR MINIMUM */ \
+ V(vmx, VMX, 0xE7FF) /* type = VRR_C VECTOR MAXIMUM */ \
+ V(vbperm, VBPERM, 0xE785) /* type = VRR_C VECTOR BIT PERMUTE */
#define S390_VRI_A_OPCODE_LIST(V) \
V(vleib, VLEIB, 0xE740) /* type = VRI_A VECTOR LOAD ELEMENT IMMEDIATE (8) */ \
diff --git a/chromium/v8/src/codegen/s390/interface-descriptors-s390.cc b/chromium/v8/src/codegen/s390/interface-descriptors-s390.cc
index 8e0e9a4cf54..b23ecb7289b 100644
--- a/chromium/v8/src/codegen/s390/interface-descriptors-s390.cc
+++ b/chromium/v8/src/codegen/s390/interface-descriptors-s390.cc
@@ -191,11 +191,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2};
@@ -295,6 +290,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
DefaultInitializePlatformSpecific(data, 4);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // TODO(v8:8888): Implement on this platform.
+ DefaultInitializePlatformSpecific(data, 4);
+}
+
void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:8888): Implement on this platform.
diff --git a/chromium/v8/src/codegen/s390/macro-assembler-s390.cc b/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
index 7e7d1434c44..193f05929c6 100644
--- a/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -2024,10 +2024,10 @@ void TurboAssembler::CheckPageFlag(
// Reverse the byte_offset if emulating on little endian platform
byte_offset = kSystemPointerSize - byte_offset - 1;
#endif
- tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset),
+ tm(MemOperand(scratch, BasicMemoryChunk::kFlagsOffset + byte_offset),
Operand(shifted_mask));
} else {
- LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ LoadP(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
AndP(r0, scratch, Operand(mask));
}
// Should be okay to remove rc
diff --git a/chromium/v8/src/codegen/safepoint-table.cc b/chromium/v8/src/codegen/safepoint-table.cc
index 962b1ea17f8..e50a2fada01 100644
--- a/chromium/v8/src/codegen/safepoint-table.cc
+++ b/chromium/v8/src/codegen/safepoint-table.cc
@@ -90,7 +90,7 @@ void SafepointTable::PrintBits(std::ostream& os, // NOLINT
Safepoint SafepointTableBuilder::DefineSafepoint(
Assembler* assembler, Safepoint::DeoptMode deopt_mode) {
deoptimization_info_.push_back(
- DeoptimizationInfo(zone_, assembler->pc_offset()));
+ DeoptimizationInfo(zone_, assembler->pc_offset_for_safepoint()));
DeoptimizationInfo& new_info = deoptimization_info_.back();
return Safepoint(new_info.indexes);
}
diff --git a/chromium/v8/src/codegen/source-position-table.cc b/chromium/v8/src/codegen/source-position-table.cc
index 0f03867331e..a07f76e4d10 100644
--- a/chromium/v8/src/codegen/source-position-table.cc
+++ b/chromium/v8/src/codegen/source-position-table.cc
@@ -49,10 +49,10 @@ void SubtractFromEntry(PositionTableEntry* value,
// Helper: Encode an integer.
template <typename T>
-void EncodeInt(std::vector<byte>* bytes, T value) {
+void EncodeInt(ZoneVector<byte>* bytes, T value) {
using unsigned_type = typename std::make_unsigned<T>::type;
// Zig-zag encoding.
- static const int kShift = sizeof(T) * kBitsPerByte - 1;
+ static constexpr int kShift = sizeof(T) * kBitsPerByte - 1;
value = ((static_cast<unsigned_type>(value) << 1) ^ (value >> kShift));
DCHECK_GE(value, 0);
unsigned_type encoded = static_cast<unsigned_type>(value);
@@ -67,7 +67,7 @@ void EncodeInt(std::vector<byte>* bytes, T value) {
}
// Encode a PositionTableEntry.
-void EncodeEntry(std::vector<byte>* bytes, const PositionTableEntry& entry) {
+void EncodeEntry(ZoneVector<byte>* bytes, const PositionTableEntry& entry) {
// We only accept ascending code offsets.
DCHECK_GE(entry.code_offset, 0);
// Since code_offset is not negative, we use sign to encode is_statement.
@@ -115,7 +115,7 @@ Vector<const byte> VectorFromByteArray(ByteArray byte_array) {
}
#ifdef ENABLE_SLOW_DCHECKS
-void CheckTableEquals(const std::vector<PositionTableEntry>& raw_entries,
+void CheckTableEquals(const ZoneVector<PositionTableEntry>& raw_entries,
SourcePositionTableIterator* encoded) {
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
@@ -133,8 +133,14 @@ void CheckTableEquals(const std::vector<PositionTableEntry>& raw_entries,
} // namespace
SourcePositionTableBuilder::SourcePositionTableBuilder(
- SourcePositionTableBuilder::RecordingMode mode)
- : mode_(mode), previous_() {}
+ Zone* zone, SourcePositionTableBuilder::RecordingMode mode)
+ : mode_(mode),
+ bytes_(zone),
+#ifdef ENABLE_SLOW_DCHECKS
+ raw_entries_(zone),
+#endif
+ previous_() {
+}
void SourcePositionTableBuilder::AddPosition(size_t code_offset,
SourcePosition source_position,
diff --git a/chromium/v8/src/codegen/source-position-table.h b/chromium/v8/src/codegen/source-position-table.h
index 024eca54fa5..a42c6a44a3c 100644
--- a/chromium/v8/src/codegen/source-position-table.h
+++ b/chromium/v8/src/codegen/source-position-table.h
@@ -49,7 +49,7 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
};
explicit SourcePositionTableBuilder(
- RecordingMode mode = RECORD_SOURCE_POSITIONS);
+ Zone* zone, RecordingMode mode = RECORD_SOURCE_POSITIONS);
void AddPosition(size_t code_offset, SourcePosition source_position,
bool is_statement);
@@ -66,9 +66,9 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
void AddEntry(const PositionTableEntry& entry);
RecordingMode mode_;
- std::vector<byte> bytes_;
+ ZoneVector<byte> bytes_;
#ifdef ENABLE_SLOW_DCHECKS
- std::vector<PositionTableEntry> raw_entries_;
+ ZoneVector<PositionTableEntry> raw_entries_;
#endif
PositionTableEntry previous_; // Previously written entry, to compute delta.
};
diff --git a/chromium/v8/src/codegen/x64/assembler-x64.cc b/chromium/v8/src/codegen/x64/assembler-x64.cc
index 287de802be4..c1e2ec9808d 100644
--- a/chromium/v8/src/codegen/x64/assembler-x64.cc
+++ b/chromium/v8/src/codegen/x64/assembler-x64.cc
@@ -132,168 +132,53 @@ uint32_t RelocInfo::wasm_call_tag() const {
// -----------------------------------------------------------------------------
// Implementation of Operand
-namespace {
-class OperandBuilder {
- public:
- OperandBuilder(Register base, int32_t disp) {
- if (base == rsp || base == r12) {
- // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
- set_sib(times_1, rsp, base);
- }
-
- if (disp == 0 && base != rbp && base != r13) {
- set_modrm(0, base);
- } else if (is_int8(disp)) {
- set_modrm(1, base);
- set_disp8(disp);
- } else {
- set_modrm(2, base);
- set_disp32(disp);
- }
- }
-
- OperandBuilder(Register base, Register index, ScaleFactor scale,
- int32_t disp) {
- DCHECK(index != rsp);
- set_sib(scale, index, base);
- if (disp == 0 && base != rbp && base != r13) {
- // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
- // possibly set by set_sib.
- set_modrm(0, rsp);
- } else if (is_int8(disp)) {
- set_modrm(1, rsp);
- set_disp8(disp);
- } else {
- set_modrm(2, rsp);
- set_disp32(disp);
- }
- }
-
- OperandBuilder(Register index, ScaleFactor scale, int32_t disp) {
- DCHECK(index != rsp);
- set_modrm(0, rsp);
- set_sib(scale, index, rbp);
- set_disp32(disp);
- }
-
- OperandBuilder(Label* label, int addend) {
- data_.addend = addend;
- DCHECK_NOT_NULL(label);
- DCHECK(addend == 0 || (is_int8(addend) && label->is_bound()));
- set_modrm(0, rbp);
- set_disp64(reinterpret_cast<intptr_t>(label));
+Operand::Operand(Operand operand, int32_t offset) {
+ DCHECK_GE(operand.data().len, 1);
+ // Operand encodes REX ModR/M [SIB] [Disp].
+ byte modrm = operand.data().buf[0];
+ DCHECK_LT(modrm, 0xC0); // Disallow mode 3 (register target).
+ bool has_sib = ((modrm & 0x07) == 0x04);
+ byte mode = modrm & 0xC0;
+ int disp_offset = has_sib ? 2 : 1;
+ int base_reg = (has_sib ? operand.data().buf[1] : modrm) & 0x07;
+ // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
+ // displacement.
+ bool is_baseless = (mode == 0) && (base_reg == 0x05); // No base or RIP base.
+ int32_t disp_value = 0;
+ if (mode == 0x80 || is_baseless) {
+ // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
+ disp_value = ReadUnalignedValue<int32_t>(
+ reinterpret_cast<Address>(&operand.data().buf[disp_offset]));
+ } else if (mode == 0x40) {
+ // Mode 1: Byte displacement.
+ disp_value = static_cast<signed char>(operand.data().buf[disp_offset]);
}
- OperandBuilder(Operand operand, int32_t offset) {
- DCHECK_GE(operand.data().len, 1);
- // Operand encodes REX ModR/M [SIB] [Disp].
- byte modrm = operand.data().buf[0];
- DCHECK_LT(modrm, 0xC0); // Disallow mode 3 (register target).
- bool has_sib = ((modrm & 0x07) == 0x04);
- byte mode = modrm & 0xC0;
- int disp_offset = has_sib ? 2 : 1;
- int base_reg = (has_sib ? operand.data().buf[1] : modrm) & 0x07;
- // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
- // displacement.
- bool is_baseless =
- (mode == 0) && (base_reg == 0x05); // No base or RIP base.
- int32_t disp_value = 0;
- if (mode == 0x80 || is_baseless) {
- // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
- disp_value = ReadUnalignedValue<int32_t>(
- reinterpret_cast<Address>(&operand.data().buf[disp_offset]));
- } else if (mode == 0x40) {
- // Mode 1: Byte displacement.
- disp_value = static_cast<signed char>(operand.data().buf[disp_offset]);
- }
-
- // Write new operand with same registers, but with modified displacement.
- DCHECK(offset >= 0 ? disp_value + offset > disp_value
- : disp_value + offset < disp_value); // No overflow.
- disp_value += offset;
- data_.rex = operand.data().rex;
- if (!is_int8(disp_value) || is_baseless) {
- // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
- data_.buf[0] = (modrm & 0x3F) | (is_baseless ? 0x00 : 0x80);
- data_.len = disp_offset + 4;
- WriteUnalignedValue(reinterpret_cast<Address>(&data_.buf[disp_offset]),
- disp_value);
- } else if (disp_value != 0 || (base_reg == 0x05)) {
- // Need 8 bits of displacement.
- data_.buf[0] = (modrm & 0x3F) | 0x40; // Mode 1.
- data_.len = disp_offset + 1;
- data_.buf[disp_offset] = static_cast<byte>(disp_value);
- } else {
- // Need no displacement.
- data_.buf[0] = (modrm & 0x3F); // Mode 0.
- data_.len = disp_offset;
- }
- if (has_sib) {
- data_.buf[1] = operand.data().buf[1];
- }
- }
-
- void set_modrm(int mod, Register rm_reg) {
- DCHECK(is_uint2(mod));
- data_.buf[0] = mod << 6 | rm_reg.low_bits();
- // Set REX.B to the high bit of rm.code().
- data_.rex |= rm_reg.high_bit();
- }
-
- void set_sib(ScaleFactor scale, Register index, Register base) {
- DCHECK_EQ(data_.len, 1);
- DCHECK(is_uint2(scale));
- // Use SIB with no index register only for base rsp or r12. Otherwise we
- // would skip the SIB byte entirely.
- DCHECK(index != rsp || base == rsp || base == r12);
- data_.buf[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
- data_.rex |= index.high_bit() << 1 | base.high_bit();
- data_.len = 2;
- }
-
- void set_disp8(int disp) {
- DCHECK(is_int8(disp));
- DCHECK(data_.len == 1 || data_.len == 2);
- int8_t* p = reinterpret_cast<int8_t*>(&data_.buf[data_.len]);
- *p = disp;
- data_.len += sizeof(int8_t);
- }
-
- void set_disp32(int disp) {
- DCHECK(data_.len == 1 || data_.len == 2);
- Address p = reinterpret_cast<Address>(&data_.buf[data_.len]);
- WriteUnalignedValue(p, disp);
- data_.len += sizeof(int32_t);
+ // Write new operand with same registers, but with modified displacement.
+ DCHECK(offset >= 0 ? disp_value + offset > disp_value
+ : disp_value + offset < disp_value); // No overflow.
+ disp_value += offset;
+ data_.rex = operand.data().rex;
+ if (!is_int8(disp_value) || is_baseless) {
+ // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
+ data_.buf[0] = (modrm & 0x3F) | (is_baseless ? 0x00 : 0x80);
+ data_.len = disp_offset + 4;
+ WriteUnalignedValue(reinterpret_cast<Address>(&data_.buf[disp_offset]),
+ disp_value);
+ } else if (disp_value != 0 || (base_reg == 0x05)) {
+ // Need 8 bits of displacement.
+ data_.buf[0] = (modrm & 0x3F) | 0x40; // Mode 1.
+ data_.len = disp_offset + 1;
+ data_.buf[disp_offset] = static_cast<byte>(disp_value);
+ } else {
+ // Need no displacement.
+ data_.buf[0] = (modrm & 0x3F); // Mode 0.
+ data_.len = disp_offset;
}
-
- void set_disp64(int64_t disp) {
- DCHECK_EQ(1, data_.len);
- Address p = reinterpret_cast<Address>(&data_.buf[data_.len]);
- WriteUnalignedValue(p, disp);
- data_.len += sizeof(disp);
+ if (has_sib) {
+ data_.buf[1] = operand.data().buf[1];
}
-
- const Operand::Data& data() const { return data_; }
-
- private:
- Operand::Data data_;
-};
-} // namespace
-
-Operand::Operand(Register base, int32_t disp)
- : data_(OperandBuilder(base, disp).data()) {}
-
-Operand::Operand(Register base, Register index, ScaleFactor scale, int32_t disp)
- : data_(OperandBuilder(base, index, scale, disp).data()) {}
-
-Operand::Operand(Register index, ScaleFactor scale, int32_t disp)
- : data_(OperandBuilder(index, scale, disp).data()) {}
-
-Operand::Operand(Label* label, int addend)
- : data_(OperandBuilder(label, addend).data()) {}
-
-Operand::Operand(Operand operand, int32_t offset)
- : data_(OperandBuilder(operand, offset).data()) {}
+}
bool Operand::AddressUsesRegister(Register reg) const {
int code = reg.code();
@@ -3424,6 +3309,20 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
emit(static_cast<byte>(mode) | 0x8);
}
+void Assembler::roundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(!IsEnabled(AVX));
+ sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x08);
+ // Mask precision exception.
+ emit(static_cast<byte>(mode) | 0x8);
+}
+
+void Assembler::roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(!IsEnabled(AVX));
+ sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x09);
+ // Mask precision exception.
+ emit(static_cast<byte>(mode) | 0x8);
+}
+
void Assembler::movmskpd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3443,8 +3342,8 @@ void Assembler::movmskps(Register dst, XMMRegister src) {
void Assembler::pmovmskb(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
emit(0x66);
+ emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xD7);
emit_sse_operand(dst, src);
diff --git a/chromium/v8/src/codegen/x64/assembler-x64.h b/chromium/v8/src/codegen/x64/assembler-x64.h
index 24eb9765782..bf876945265 100644
--- a/chromium/v8/src/codegen/x64/assembler-x64.h
+++ b/chromium/v8/src/codegen/x64/assembler-x64.h
@@ -173,13 +173,48 @@ class V8_EXPORT_PRIVATE Operand {
};
// [base + disp/r]
- Operand(Register base, int32_t disp);
+ V8_INLINE Operand(Register base, int32_t disp) {
+ if (base == rsp || base == r12) {
+ // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
+ set_sib(times_1, rsp, base);
+ }
+
+ if (disp == 0 && base != rbp && base != r13) {
+ set_modrm(0, base);
+ } else if (is_int8(disp)) {
+ set_modrm(1, base);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, base);
+ set_disp32(disp);
+ }
+ }
// [base + index*scale + disp/r]
- Operand(Register base, Register index, ScaleFactor scale, int32_t disp);
+ V8_INLINE Operand(Register base, Register index, ScaleFactor scale,
+ int32_t disp) {
+ DCHECK(index != rsp);
+ set_sib(scale, index, base);
+ if (disp == 0 && base != rbp && base != r13) {
+ // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
+ // possibly set by set_sib.
+ set_modrm(0, rsp);
+ } else if (is_int8(disp)) {
+ set_modrm(1, rsp);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, rsp);
+ set_disp32(disp);
+ }
+ }
// [index*scale + disp/r]
- Operand(Register index, ScaleFactor scale, int32_t disp);
+ V8_INLINE Operand(Register index, ScaleFactor scale, int32_t disp) {
+ DCHECK(index != rsp);
+ set_modrm(0, rsp);
+ set_sib(scale, index, rbp);
+ set_disp32(disp);
+ }
// Offset from existing memory operand.
// Offset is added to existing displacement as 32-bit signed values and
@@ -187,25 +222,64 @@ class V8_EXPORT_PRIVATE Operand {
Operand(Operand base, int32_t offset);
// [rip + disp/r]
- explicit Operand(Label* label, int addend = 0);
+ V8_INLINE explicit Operand(Label* label, int addend = 0) {
+ data_.addend = addend;
+ DCHECK_NOT_NULL(label);
+ DCHECK(addend == 0 || (is_int8(addend) && label->is_bound()));
+ set_modrm(0, rbp);
+ set_disp64(reinterpret_cast<intptr_t>(label));
+ }
Operand(const Operand&) V8_NOEXCEPT = default;
+ const Data& data() const { return data_; }
+
// Checks whether either base or index register is the given register.
// Does not check the "reg" part of the Operand.
bool AddressUsesRegister(Register reg) const;
- // Queries related to the size of the generated instruction.
- // Whether the generated instruction will have a REX prefix.
- bool requires_rex() const { return data_.rex != 0; }
- // Size of the ModR/M, SIB and displacement parts of the generated
- // instruction.
- int operand_size() const { return data_.len; }
+ private:
+ V8_INLINE void set_modrm(int mod, Register rm_reg) {
+ DCHECK(is_uint2(mod));
+ data_.buf[0] = mod << 6 | rm_reg.low_bits();
+ // Set REX.B to the high bit of rm.code().
+ data_.rex |= rm_reg.high_bit();
+ }
- const Data& data() const { return data_; }
+ V8_INLINE void set_sib(ScaleFactor scale, Register index, Register base) {
+ DCHECK_EQ(data_.len, 1);
+ DCHECK(is_uint2(scale));
+ // Use SIB with no index register only for base rsp or r12. Otherwise we
+ // would skip the SIB byte entirely.
+ DCHECK(index != rsp || base == rsp || base == r12);
+ data_.buf[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
+ data_.rex |= index.high_bit() << 1 | base.high_bit();
+ data_.len = 2;
+ }
- private:
- const Data data_;
+ V8_INLINE void set_disp8(int disp) {
+ DCHECK(is_int8(disp));
+ DCHECK(data_.len == 1 || data_.len == 2);
+ int8_t* p = reinterpret_cast<int8_t*>(&data_.buf[data_.len]);
+ *p = disp;
+ data_.len += sizeof(int8_t);
+ }
+
+ V8_INLINE void set_disp32(int disp) {
+ DCHECK(data_.len == 1 || data_.len == 2);
+ Address p = reinterpret_cast<Address>(&data_.buf[data_.len]);
+ WriteUnalignedValue(p, disp);
+ data_.len += sizeof(int32_t);
+ }
+
+ V8_INLINE void set_disp64(int64_t disp) {
+ DCHECK_EQ(1, data_.len);
+ Address p = reinterpret_cast<Address>(&data_.buf[data_.len]);
+ WriteUnalignedValue(p, disp);
+ data_.len += sizeof(disp);
+ }
+
+ Data data_;
};
ASSERT_TRIVIALLY_COPYABLE(Operand);
static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
@@ -1141,6 +1215,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void roundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void cmpps(XMMRegister dst, XMMRegister src, int8_t cmp);
void cmpps(XMMRegister dst, Operand src, int8_t cmp);
@@ -1358,6 +1434,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
}
+ void vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ vinstr(0x08, dst, xmm0, src, k66, k0F3A, kWIG);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+ }
+ void vroundpd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ vinstr(0x09, dst, xmm0, src, k66, k0F3A, kWIG);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+ }
void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
diff --git a/chromium/v8/src/codegen/x64/interface-descriptors-x64.cc b/chromium/v8/src/codegen/x64/interface-descriptors-x64.cc
index 6b9754efca1..31b2b67a4fb 100644
--- a/chromium/v8/src/codegen/x64/interface-descriptors-x64.cc
+++ b/chromium/v8/src/codegen/x64/interface-descriptors-x64.cc
@@ -129,6 +129,16 @@ void CallWithSpreadDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rbx : the object to spread
+ // rdx : the feedback slot
+ Register registers[] = {rdi, rax, rbx, rdx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rdi : the target to call
@@ -137,6 +147,16 @@ void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rdi : the target to call
+ // rbx : the arguments list
+ // rdx : the feedback slot
+ // rax : the feedback vector
+ Register registers[] = {rdi, rbx, rdx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments (on the stack, not including receiver)
@@ -168,6 +188,16 @@ void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rdx : the new target
+ // rbx : the feedback slot
+ Register registers[] = {rdi, rdx, rax, rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rdi : the target to call
@@ -177,6 +207,16 @@ void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rdi : the target to call
+ // rdx : the new target
+ // rbx : the arguments list
+ // rax : the feedback slot
+ Register registers[] = {rdi, rdx, rbx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments
@@ -193,11 +233,6 @@ void AbortDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdx, rax};
@@ -289,7 +324,7 @@ void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdx, // kLeft
rax, // kRight
- rdi, // Slot
+ rdi, // kSlot
rbx}; // kMaybeFeedbackVector
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -307,7 +342,7 @@ void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdx, // kLeft
rax, // kRight
- rdi, // Slot
+ rdi, // kSlot
rbx}; // kMaybeFeedbackVector
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/chromium/v8/src/codegen/x64/macro-assembler-x64.cc b/chromium/v8/src/codegen/x64/macro-assembler-x64.cc
index 7d6fdc5eb3d..fabf70bb036 100644
--- a/chromium/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/chromium/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -2756,10 +2756,10 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
andq(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
- testb(Operand(scratch, MemoryChunk::kFlagsOffset),
+ testb(Operand(scratch, BasicMemoryChunk::kFlagsOffset),
Immediate(static_cast<uint8_t>(mask)));
} else {
- testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+ testl(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
}
j(cc, condition_met, condition_met_distance);
}
diff --git a/chromium/v8/src/codegen/x64/macro-assembler-x64.h b/chromium/v8/src/codegen/x64/macro-assembler-x64.h
index 8382bf5a287..8c4fd898064 100644
--- a/chromium/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/chromium/v8/src/codegen/x64/macro-assembler-x64.h
@@ -174,8 +174,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Cmpneqpd, cmpneqpd)
AVX_OP(Cmpnltpd, cmpnltpd)
AVX_OP(Cmpnlepd, cmpnlepd)
- AVX_OP(Roundss, roundss)
- AVX_OP(Roundsd, roundsd)
AVX_OP(Sqrtss, sqrtss)
AVX_OP(Sqrtsd, sqrtsd)
AVX_OP(Sqrtps, sqrtps)
@@ -204,6 +202,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP(Psrlw, psrlw)
AVX_OP(Psrld, psrld)
AVX_OP(Psrlq, psrlq)
+ AVX_OP(Pmaddwd, pmaddwd)
AVX_OP(Paddb, paddb)
AVX_OP(Paddw, paddw)
AVX_OP(Paddd, paddd)
@@ -283,6 +282,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP_SSE4_1(Pmovzxwd, pmovzxwd)
AVX_OP_SSE4_1(Pmovzxdq, pmovzxdq)
AVX_OP_SSE4_1(Pextrq, pextrq)
+ AVX_OP_SSE4_1(Roundps, roundps)
+ AVX_OP_SSE4_1(Roundpd, roundpd)
+ AVX_OP_SSE4_1(Roundss, roundss)
+ AVX_OP_SSE4_1(Roundsd, roundsd)
AVX_OP_SSE4_2(Pcmpgtq, pcmpgtq)
#undef AVX_OP
@@ -442,7 +445,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(Register dst, Address ptr, RelocInfo::Mode rmode) {
// This method must not be used with heap object references. The stored
// address is not GC safe. Use the handle version instead.
- DCHECK(rmode > RelocInfo::LAST_GCED_ENUM);
+ DCHECK(rmode == RelocInfo::NONE || rmode > RelocInfo::LAST_GCED_ENUM);
movq(dst, Immediate64(ptr, rmode));
}
diff --git a/chromium/v8/src/codegen/x64/sse-instr.h b/chromium/v8/src/codegen/x64/sse-instr.h
index 74ec16d6a23..52107ed6b93 100644
--- a/chromium/v8/src/codegen/x64/sse-instr.h
+++ b/chromium/v8/src/codegen/x64/sse-instr.h
@@ -57,6 +57,7 @@
V(packssdw, 66, 0F, 6B) \
V(punpcklqdq, 66, 0F, 6C) \
V(punpckhqdq, 66, 0F, 6D) \
+ V(pmaddwd, 66, 0F, F5) \
V(paddb, 66, 0F, FC) \
V(paddw, 66, 0F, FD) \
V(paddd, 66, 0F, FE) \