summaryrefslogtreecommitdiff
path: root/deps/v8/src/codegen
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/codegen')
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc5
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h56
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc53
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h34
-rw-r--r--deps/v8/src/codegen/arm64/reglist-arm64.h8
-rw-r--r--deps/v8/src/codegen/bailout-reason.h1
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc7
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h10
-rw-r--r--deps/v8/src/codegen/compiler.cc595
-rw-r--r--deps/v8/src/codegen/compiler.h106
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc5
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h14
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.cc5
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.h24
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc5
-rw-r--r--deps/v8/src/codegen/mips/register-mips.h7
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc5
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h24
-rw-r--r--deps/v8/src/codegen/mips64/register-mips64.h7
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc3
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc121
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h44
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h115
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc35
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h18
-rw-r--r--deps/v8/src/codegen/register.h22
-rw-r--r--deps/v8/src/codegen/reloc-info.cc2
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.cc2
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc5
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h35
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc47
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h31
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc6
-rw-r--r--deps/v8/src/codegen/tnode.h2
-rw-r--r--deps/v8/src/codegen/turbo-assembler.cc2
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc36
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h91
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc39
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h4
39 files changed, 1049 insertions, 582 deletions
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 0c7df90dbf..bae50a2a5c 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -2648,9 +2648,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
Call(ip);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
// The above code must not emit constants either.
DCHECK(!has_pending_constants());
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 48b8a5f06a..803afc367d 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -1041,13 +1041,13 @@ void TurboAssembler::InitializeRootRegister() {
#endif
}
-void MacroAssembler::SmiTag(Register dst, Register src) {
+void TurboAssembler::SmiTag(Register dst, Register src) {
DCHECK(dst.Is64Bits() && src.Is64Bits());
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
Lsl(dst, src, kSmiShift);
}
-void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
+void TurboAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
void TurboAssembler::SmiUntag(Register dst, Register src) {
DCHECK(dst.Is64Bits() && src.Is64Bits());
@@ -1241,58 +1241,6 @@ void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) {
#endif
}
-template <TurboAssembler::StoreLRMode lr_mode>
-void TurboAssembler::PushCPURegList(CPURegList registers) {
- DCHECK_IMPLIES((lr_mode == kDontStoreLR), !registers.IncludesAliasOf(lr));
-#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- if (lr_mode == kSignLR && registers.IncludesAliasOf(lr)) {
- Pacibsp();
- }
-#endif
-
- int size = registers.RegisterSizeInBytes();
- DCHECK_EQ(0, (size * registers.Count()) % 16);
-
- // Push up to four registers at a time.
- while (!registers.IsEmpty()) {
- int count_before = registers.Count();
- const CPURegister& src0 = registers.PopHighestIndex();
- const CPURegister& src1 = registers.PopHighestIndex();
- const CPURegister& src2 = registers.PopHighestIndex();
- const CPURegister& src3 = registers.PopHighestIndex();
- int count = count_before - registers.Count();
- PushHelper(count, size, src0, src1, src2, src3);
- }
-}
-
-template <TurboAssembler::LoadLRMode lr_mode>
-void TurboAssembler::PopCPURegList(CPURegList registers) {
- int size = registers.RegisterSizeInBytes();
- DCHECK_EQ(0, (size * registers.Count()) % 16);
-
-#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- bool contains_lr = registers.IncludesAliasOf(lr);
- DCHECK_IMPLIES((lr_mode == kDontLoadLR), !contains_lr);
-#endif
-
- // Pop up to four registers at a time.
- while (!registers.IsEmpty()) {
- int count_before = registers.Count();
- const CPURegister& dst0 = registers.PopLowestIndex();
- const CPURegister& dst1 = registers.PopLowestIndex();
- const CPURegister& dst2 = registers.PopLowestIndex();
- const CPURegister& dst3 = registers.PopLowestIndex();
- int count = count_before - registers.Count();
- PopHelper(count, size, dst0, dst1, dst2, dst3);
- }
-
-#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
- if (lr_mode == kAuthLR && contains_lr) {
- Autibsp();
- }
-#endif
-}
-
void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
DCHECK_GE(count, 0);
uint64_t size = count * unit_size;
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index 191eb4bd20..552425edd4 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -56,6 +56,46 @@ constexpr int kStackSavedSavedFPSizeInBits = kDRegSizeInBits;
} // namespace
+void TurboAssembler::PushCPURegList(CPURegList registers) {
+ // If LR was stored here, we would need to sign it if
+ // V8_ENABLE_CONTROL_FLOW_INTEGRITY is on.
+ DCHECK(!registers.IncludesAliasOf(lr));
+
+ int size = registers.RegisterSizeInBytes();
+ DCHECK_EQ(0, (size * registers.Count()) % 16);
+
+ // Push up to four registers at a time.
+ while (!registers.IsEmpty()) {
+ int count_before = registers.Count();
+ const CPURegister& src0 = registers.PopHighestIndex();
+ const CPURegister& src1 = registers.PopHighestIndex();
+ const CPURegister& src2 = registers.PopHighestIndex();
+ const CPURegister& src3 = registers.PopHighestIndex();
+ int count = count_before - registers.Count();
+ PushHelper(count, size, src0, src1, src2, src3);
+ }
+}
+
+void TurboAssembler::PopCPURegList(CPURegList registers) {
+ int size = registers.RegisterSizeInBytes();
+ DCHECK_EQ(0, (size * registers.Count()) % 16);
+
+ // If LR was loaded here, we would need to authenticate it if
+ // V8_ENABLE_CONTROL_FLOW_INTEGRITY is on.
+ DCHECK(!registers.IncludesAliasOf(lr));
+
+ // Pop up to four registers at a time.
+ while (!registers.IsEmpty()) {
+ int count_before = registers.Count();
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ const CPURegister& dst2 = registers.PopLowestIndex();
+ const CPURegister& dst3 = registers.PopLowestIndex();
+ int count = count_before - registers.Count();
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+ }
+}
+
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) const {
auto list = kCallerSaved;
@@ -79,7 +119,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
list.Remove(exclusion);
list.Align();
- PushCPURegList<kDontStoreLR>(list);
+ PushCPURegList(list);
int bytes = list.TotalSizeInBytes();
@@ -106,7 +146,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
list.Remove(exclusion);
list.Align();
- PopCPURegList<kDontLoadLR>(list);
+ PopCPURegList(list);
bytes += list.TotalSizeInBytes();
return bytes;
@@ -2219,9 +2259,8 @@ void TurboAssembler::CallForDeoptimization(
BlockPoolsScope scope(this);
bl(jump_deoptimization_entry_label);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
@@ -3575,7 +3614,7 @@ void TurboAssembler::Printf(const char* format, CPURegister arg0,
// Preserve all caller-saved registers as well as NZCV.
// PushCPURegList asserts that the size of each list is a multiple of 16
// bytes.
- PushCPURegList<kDontStoreLR>(saved_registers);
+ PushCPURegList(saved_registers);
PushCPURegList(kCallerSavedV);
// We can use caller-saved registers as scratch values (except for argN).
@@ -3628,7 +3667,7 @@ void TurboAssembler::Printf(const char* format, CPURegister arg0,
}
PopCPURegList(kCallerSavedV);
- PopCPURegList<kDontLoadLR>(saved_registers);
+ PopCPURegList(saved_registers);
TmpList()->set_bits(old_tmp_list);
FPTmpList()->set_bits(old_fp_tmp_list);
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 022d84cb60..ab56bba202 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -557,6 +557,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
inline void SmiUntag(Register dst, const MemOperand& src);
inline void SmiUntag(Register smi);
+ inline void SmiTag(Register dst, Register src);
+ inline void SmiTag(Register smi);
+
inline void SmiToInt32(Register smi);
// Calls Abort(msg) if the condition cond is not satisfied.
@@ -862,15 +865,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// kSRegSizeInBits are supported.
//
// Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
- //
- // The methods take an optional LoadLRMode or StoreLRMode template argument.
- // When control flow integrity measures are enabled and the link register is
- // included in 'registers', passing kSignLR to PushCPURegList will sign the
- // link register before pushing the list, and passing kAuthLR to
- // PopCPURegList will authenticate it after popping the list.
- template <StoreLRMode lr_mode = kDontStoreLR>
void PushCPURegList(CPURegList registers);
- template <LoadLRMode lr_mode = kDontLoadLR>
void PopCPURegList(CPURegList registers);
// Calculate how much stack space (in bytes) are required to store caller
@@ -1752,31 +1747,23 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
tbx(vd, vn, vn2, vn3, vn4, vm);
}
- // For the 'lr_mode' template argument of the following methods, see
- // PushCPURegList/PopCPURegList.
- template <StoreLRMode lr_mode = kDontStoreLR>
inline void PushSizeRegList(RegList registers, unsigned reg_size) {
- PushCPURegList<lr_mode>(CPURegList(reg_size, registers));
+ PushCPURegList(CPURegList(reg_size, registers));
}
- template <StoreLRMode lr_mode = kDontStoreLR>
inline void PushSizeRegList(DoubleRegList registers, unsigned reg_size) {
- PushCPURegList<lr_mode>(CPURegList(reg_size, registers));
+ PushCPURegList(CPURegList(reg_size, registers));
}
- template <LoadLRMode lr_mode = kDontLoadLR>
inline void PopSizeRegList(RegList registers, unsigned reg_size) {
- PopCPURegList<lr_mode>(CPURegList(reg_size, registers));
+ PopCPURegList(CPURegList(reg_size, registers));
}
- template <LoadLRMode lr_mode = kDontLoadLR>
inline void PopSizeRegList(DoubleRegList registers, unsigned reg_size) {
- PopCPURegList<lr_mode>(CPURegList(reg_size, registers));
+ PopCPURegList(CPURegList(reg_size, registers));
}
- template <StoreLRMode lr_mode = kDontStoreLR>
inline void PushXRegList(RegList regs) {
- PushSizeRegList<lr_mode>(regs, kXRegSizeInBits);
+ PushSizeRegList(regs, kXRegSizeInBits);
}
- template <LoadLRMode lr_mode = kDontLoadLR>
inline void PopXRegList(RegList regs) {
- PopSizeRegList<lr_mode>(regs, kXRegSizeInBits);
+ PopSizeRegList(regs, kXRegSizeInBits);
}
inline void PushWRegList(RegList regs) {
PushSizeRegList(regs, kWRegSizeInBits);
@@ -1855,9 +1842,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---- SMI and Number Utilities ----
- inline void SmiTag(Register dst, Register src);
- inline void SmiTag(Register smi);
-
inline void JumpIfNotSmi(Register value, Label* not_smi_label);
// Abort execution if argument is a smi, enabled via --debug-code.
diff --git a/deps/v8/src/codegen/arm64/reglist-arm64.h b/deps/v8/src/codegen/arm64/reglist-arm64.h
index 9f29589098..fdc14391c8 100644
--- a/deps/v8/src/codegen/arm64/reglist-arm64.h
+++ b/deps/v8/src/codegen/arm64/reglist-arm64.h
@@ -26,12 +26,8 @@ class V8_EXPORT_PRIVATE CPURegList {
public:
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
- : list_(base::fold(
- [](uint64_t acc, CPURegister v) {
- if (!v.is_valid()) return acc;
- return acc | (uint64_t{1} << v.code());
- },
- 0, reg0, regs...)),
+ : list_(((uint64_t{1} << reg0.code()) | ... |
+ (regs.is_valid() ? uint64_t{1} << regs.code() : 0))),
size_(reg0.SizeInBits()),
type_(reg0.type()) {
DCHECK(AreSameSizeAndType(reg0, regs...));
diff --git a/deps/v8/src/codegen/bailout-reason.h b/deps/v8/src/codegen/bailout-reason.h
index cf01b360d6..c99730d1c7 100644
--- a/deps/v8/src/codegen/bailout-reason.h
+++ b/deps/v8/src/codegen/bailout-reason.h
@@ -44,6 +44,7 @@ namespace internal {
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
V(kOperandIsASmiAndNotAGeneratorObject, \
"Operand is a smi and not a generator object") \
+ V(kOperandIsCleared, "Operand is cleared") \
V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
V(kOperandIsNotAConstructor, "Operand is not a constructor") \
V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 364bfa029d..e6ff700927 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -8296,6 +8296,13 @@ TNode<Uint32T> CodeStubAssembler::LoadDetailsByDescriptorEntry(
}
TNode<Object> CodeStubAssembler::LoadValueByDescriptorEntry(
+ TNode<DescriptorArray> container, TNode<IntPtrT> descriptor_entry) {
+ return LoadDescriptorArrayElement<Object>(
+ container, DescriptorEntryToIndex(descriptor_entry),
+ DescriptorArray::ToValueIndex(0) * kTaggedSize);
+}
+
+TNode<Object> CodeStubAssembler::LoadValueByDescriptorEntry(
TNode<DescriptorArray> container, int descriptor_entry) {
return LoadDescriptorArrayElement<Object>(
container, IntPtrConstant(0),
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 55485d004a..bccdc34b74 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -3229,10 +3229,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return CallBuiltin(Builtin::kSetProperty, context, receiver, key, value);
}
- TNode<Object> SetPropertyInLiteral(TNode<Context> context,
- TNode<JSObject> receiver,
- TNode<Object> key, TNode<Object> value) {
- return CallBuiltin(Builtin::kSetPropertyInLiteral, context, receiver, key,
+ TNode<Object> CreateDataProperty(TNode<Context> context,
+ TNode<JSObject> receiver, TNode<Object> key,
+ TNode<Object> value) {
+ return CallBuiltin(Builtin::kCreateDataProperty, context, receiver, key,
value);
}
@@ -3996,6 +3996,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Uint32T> LoadDetailsByDescriptorEntry(
TNode<DescriptorArray> descriptors, int descriptor);
TNode<Object> LoadValueByDescriptorEntry(TNode<DescriptorArray> descriptors,
+ TNode<IntPtrT> descriptor);
+ TNode<Object> LoadValueByDescriptorEntry(TNode<DescriptorArray> descriptors,
int descriptor);
TNode<MaybeObject> LoadFieldTypeByDescriptorEntry(
TNode<DescriptorArray> descriptors, TNode<IntPtrT> descriptor);
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index df237d44bd..a0d2e45ffc 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -48,7 +48,6 @@
#include "src/logging/counters-scopes.h"
#include "src/logging/log-inl.h"
#include "src/logging/runtime-call-stats-scope.h"
-#include "src/maglev/maglev-concurrent-dispatcher.h"
#include "src/objects/feedback-cell-inl.h"
#include "src/objects/js-function-inl.h"
#include "src/objects/map.h"
@@ -66,6 +65,7 @@
#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
#ifdef V8_ENABLE_MAGLEV
+#include "src/maglev/maglev-concurrent-dispatcher.h"
#include "src/maglev/maglev.h"
#endif // V8_ENABLE_MAGLEV
@@ -74,6 +74,31 @@ namespace internal {
namespace {
+constexpr bool IsOSR(BytecodeOffset osr_offset) { return !osr_offset.IsNone(); }
+
+void SetTieringState(JSFunction function, BytecodeOffset osr_offset,
+ TieringState value) {
+ if (IsOSR(osr_offset)) {
+ function.set_osr_tiering_state(value);
+ } else {
+ function.set_tiering_state(value);
+ }
+}
+
+void ResetTieringState(JSFunction function, BytecodeOffset osr_offset) {
+ if (function.has_feedback_vector()) {
+ SetTieringState(function, osr_offset, TieringState::kNone);
+ }
+}
+
+void ResetProfilerTicks(JSFunction function, BytecodeOffset osr_offset) {
+ if (!IsOSR(osr_offset)) {
+ // Reset profiler ticks, the function is no longer considered hot.
+ // TODO(v8:7700): Update for Maglev tiering.
+ function.feedback_vector().set_profiler_ticks(0);
+ }
+}
+
class CompilerTracer : public AllStatic {
public:
static void TracePrepareJob(Isolate* isolate, OptimizedCompilationInfo* info,
@@ -95,6 +120,28 @@ class CompilerTracer : public AllStatic {
PrintTraceSuffix(scope);
}
+ static void TraceOptimizeOSR(Isolate* isolate, Handle<JSFunction> function,
+ BytecodeOffset osr_offset,
+ ConcurrencyMode mode) {
+ if (!FLAG_trace_osr) return;
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(),
+ "[OSR - started. function: %s, osr offset: %d, mode: %s]\n",
+ function->DebugNameCStr().get(), osr_offset.ToInt(), ToString(mode));
+ }
+
+ static void TraceOptimizeOSRUnavailable(Isolate* isolate,
+ Handle<JSFunction> function,
+ BytecodeOffset osr_offset,
+ ConcurrencyMode mode) {
+ if (!FLAG_trace_osr) return;
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(),
+ "[OSR - unavailable (failed or in progress). function: %s, osr "
+ "offset: %d, mode: %s]\n",
+ function->DebugNameCStr().get(), osr_offset.ToInt(), ToString(mode));
+ }
+
static void TraceCompilationStats(Isolate* isolate,
OptimizedCompilationInfo* info,
double ms_creategraph, double ms_optimize,
@@ -142,7 +189,7 @@ class CompilerTracer : public AllStatic {
if (!FLAG_trace_opt) return;
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintTracePrefix(scope, "found optimized code for", function, code_kind);
- if (!osr_offset.IsNone()) {
+ if (IsOSR(osr_offset)) {
PrintF(scope.file(), " at OSR bytecode offset %d", osr_offset.ToInt());
}
PrintTraceSuffix(scope);
@@ -288,12 +335,21 @@ struct ScopedTimer {
namespace {
-ScriptOriginOptions OriginOptionsForEval(Object script) {
- if (!script.IsScript()) return ScriptOriginOptions();
-
- const auto outer_origin_options = Script::cast(script).origin_options();
- return ScriptOriginOptions(outer_origin_options.IsSharedCrossOrigin(),
- outer_origin_options.IsOpaque());
+ScriptOriginOptions OriginOptionsForEval(
+ Object script, ParsingWhileDebugging parsing_while_debugging) {
+ bool is_shared_cross_origin =
+ parsing_while_debugging == ParsingWhileDebugging::kYes;
+ bool is_opaque = false;
+ if (script.IsScript()) {
+ auto script_origin_options = Script::cast(script).origin_options();
+ if (script_origin_options.IsSharedCrossOrigin()) {
+ is_shared_cross_origin = true;
+ }
+ if (script_origin_options.IsOpaque()) {
+ is_opaque = true;
+ }
+ }
+ return ScriptOriginOptions(is_shared_cross_origin, is_opaque);
}
} // namespace
@@ -386,7 +442,6 @@ void RecordUnoptimizedFunctionCompilation(
CompilationJob::Status OptimizedCompilationJob::PrepareJob(Isolate* isolate) {
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
DisallowJavascriptExecution no_js(isolate);
- CompilerTracer::TracePrepareJob(isolate, compilation_info(), compiler_name_);
// Delegate to the underlying implementation.
DCHECK_EQ(state(), State::kReadyToPrepare);
@@ -414,22 +469,22 @@ CompilationJob::Status OptimizedCompilationJob::FinalizeJob(Isolate* isolate) {
return UpdateState(FinalizeJobImpl(isolate), State::kSucceeded);
}
-CompilationJob::Status OptimizedCompilationJob::RetryOptimization(
+CompilationJob::Status TurbofanCompilationJob::RetryOptimization(
BailoutReason reason) {
DCHECK(compilation_info_->IsOptimizing());
compilation_info_->RetryOptimization(reason);
return UpdateState(FAILED, State::kFailed);
}
-CompilationJob::Status OptimizedCompilationJob::AbortOptimization(
+CompilationJob::Status TurbofanCompilationJob::AbortOptimization(
BailoutReason reason) {
DCHECK(compilation_info_->IsOptimizing());
compilation_info_->AbortOptimization(reason);
return UpdateState(FAILED, State::kFailed);
}
-void OptimizedCompilationJob::RecordCompilationStats(CompilationMode mode,
- Isolate* isolate) const {
+void TurbofanCompilationJob::RecordCompilationStats(ConcurrencyMode mode,
+ Isolate* isolate) const {
DCHECK(compilation_info()->IsOptimizing());
Handle<JSFunction> function = compilation_info()->closure();
double ms_creategraph = time_taken_to_prepare_.InMillisecondsF();
@@ -477,12 +532,12 @@ void OptimizedCompilationJob::RecordCompilationStats(CompilationMode mode,
base::TimeDelta time_foreground =
time_taken_to_prepare_ + time_taken_to_finalize_;
switch (mode) {
- case OptimizedCompilationJob::kConcurrent:
+ case ConcurrencyMode::kConcurrent:
time_background += time_taken_to_execute_;
counters->turbofan_optimize_concurrent_total_time()->AddSample(
static_cast<int>(ElapsedTime().InMicroseconds()));
break;
- case OptimizedCompilationJob::kSynchronous:
+ case ConcurrencyMode::kSynchronous:
counters->turbofan_optimize_non_concurrent_total_time()->AddSample(
static_cast<int>(ElapsedTime().InMicroseconds()));
time_foreground += time_taken_to_execute_;
@@ -498,7 +553,7 @@ void OptimizedCompilationJob::RecordCompilationStats(CompilationMode mode,
}
}
-void OptimizedCompilationJob::RecordFunctionCompilation(
+void TurbofanCompilationJob::RecordFunctionCompilation(
CodeEventListener::LogEventsAndTags tag, Isolate* isolate) const {
Handle<AbstractCode> abstract_code =
Handle<AbstractCode>::cast(compilation_info()->code());
@@ -835,75 +890,81 @@ bool FinalizeDeferredUnoptimizedCompilationJobs(
return true;
}
-V8_WARN_UNUSED_RESULT MaybeHandle<CodeT> GetCodeFromOptimizedCodeCache(
- Handle<JSFunction> function, BytecodeOffset osr_offset,
- CodeKind code_kind) {
- Isolate* isolate = function->GetIsolate();
- RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
- Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- DisallowGarbageCollection no_gc;
- CodeT code;
- if (osr_offset.IsNone() && function->has_feedback_vector()) {
- FeedbackVector feedback_vector = function->feedback_vector();
- feedback_vector.EvictOptimizedCodeMarkedForDeoptimization(
- function->shared(), "GetCodeFromOptimizedCodeCache");
- code = feedback_vector.optimized_code();
- } else if (!osr_offset.IsNone()) {
- code = function->context()
- .native_context()
- .GetOSROptimizedCodeCache()
- .GetOptimizedCode(shared, osr_offset, isolate);
- }
- DCHECK_IMPLIES(!code.is_null(), code.kind() <= code_kind);
- if (!code.is_null() && code.kind() == code_kind) {
- // Caching of optimized code enabled and optimized code found.
+// A wrapper to access either the OSR optimized code cache (one per native
+// context), or the optimized code cache slot on the feedback vector.
+class OptimizedCodeCache : public AllStatic {
+ public:
+ static V8_WARN_UNUSED_RESULT MaybeHandle<CodeT> Get(
+ Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
+ CodeKind code_kind) {
+ if (!CodeKindIsStoredInOptimizedCodeCache(code_kind)) return {};
+
+ DisallowGarbageCollection no_gc;
+ SharedFunctionInfo shared = function->shared();
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
+
+ CodeT code;
+ if (IsOSR(osr_offset)) {
+ // For OSR, check the OSR optimized code cache.
+ code = function->native_context().osr_code_cache().TryGet(
+ shared, osr_offset, isolate);
+ } else {
+ // Non-OSR code may be cached on the feedback vector.
+ if (function->has_feedback_vector()) {
+ FeedbackVector feedback_vector = function->feedback_vector();
+ feedback_vector.EvictOptimizedCodeMarkedForDeoptimization(
+ shared, "OptimizedCodeCache::Get");
+ code = feedback_vector.optimized_code();
+ }
+ }
+
+ DCHECK_IMPLIES(!code.is_null(), code.kind() <= code_kind);
+ if (code.is_null() || code.kind() != code_kind) return {};
+
DCHECK(!code.marked_for_deoptimization());
- DCHECK(function->shared().is_compiled());
+ DCHECK(shared.is_compiled());
DCHECK(CodeKindIsStoredInOptimizedCodeCache(code.kind()));
- DCHECK_IMPLIES(!osr_offset.IsNone(), CodeKindCanOSR(code.kind()));
- return Handle<CodeT>(code, isolate);
- }
- return MaybeHandle<CodeT>();
-}
+ DCHECK_IMPLIES(IsOSR(osr_offset), CodeKindCanOSR(code.kind()));
-void ClearOptimizedCodeCache(OptimizedCompilationInfo* compilation_info) {
- Handle<JSFunction> function = compilation_info->closure();
- if (compilation_info->osr_offset().IsNone()) {
- Handle<FeedbackVector> vector =
- handle(function->feedback_vector(), function->GetIsolate());
- vector->ClearOptimizationMarker();
+ CompilerTracer::TraceOptimizedCodeCacheHit(isolate, function, osr_offset,
+ code_kind);
+ return handle(code, isolate);
}
-}
-void InsertCodeIntoOptimizedCodeCache(
- OptimizedCompilationInfo* compilation_info) {
- const CodeKind kind = compilation_info->code_kind();
- if (!CodeKindIsStoredInOptimizedCodeCache(kind)) return;
+ static void Insert(OptimizedCompilationInfo* compilation_info) {
+ const CodeKind kind = compilation_info->code_kind();
+ if (!CodeKindIsStoredInOptimizedCodeCache(kind)) return;
- if (compilation_info->function_context_specializing()) {
- // Function context specialization folds-in the function context, so no
- // sharing can occur. Make sure the optimized code cache is cleared.
- ClearOptimizedCodeCache(compilation_info);
- return;
- }
+ // Cache optimized code.
+ Handle<JSFunction> function = compilation_info->closure();
+ Isolate* isolate = function->GetIsolate();
+ Handle<CodeT> code = ToCodeT(compilation_info->code(), isolate);
+ const BytecodeOffset osr_offset = compilation_info->osr_offset();
- // Cache optimized code.
- Handle<JSFunction> function = compilation_info->closure();
- Isolate* isolate = function->GetIsolate();
- Handle<CodeT> code = ToCodeT(compilation_info->code(), isolate);
- Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- Handle<NativeContext> native_context(function->context().native_context(),
- isolate);
- if (compilation_info->osr_offset().IsNone()) {
- Handle<FeedbackVector> vector =
- handle(function->feedback_vector(), isolate);
- FeedbackVector::SetOptimizedCode(vector, code);
- } else {
- DCHECK(CodeKindCanOSR(kind));
- OSROptimizedCodeCache::AddOptimizedCode(native_context, shared, code,
- compilation_info->osr_offset());
+ if (IsOSR(osr_offset)) {
+ DCHECK(CodeKindCanOSR(kind));
+ DCHECK(!compilation_info->function_context_specializing());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ Handle<NativeContext> native_context(function->native_context(), isolate);
+ OSROptimizedCodeCache::Insert(isolate, native_context, shared, code,
+ osr_offset);
+ return;
+ }
+
+ DCHECK(!IsOSR(osr_offset));
+
+ if (compilation_info->function_context_specializing()) {
+ // Function context specialization folds-in the function context, so no
+ // sharing can occur. Make sure the optimized code cache is cleared.
+ if (function->feedback_vector().has_optimized_code()) {
+ function->feedback_vector().ClearOptimizedCode();
+ }
+ return;
+ }
+
+ function->feedback_vector().SetOptimizedCode(code);
}
-}
+};
// Runs PrepareJob in the proper compilation & canonical scopes. Handles will be
// allocated in a persistent handle scope that is detached and handed off to the
@@ -912,12 +973,17 @@ bool PrepareJobWithHandleScope(OptimizedCompilationJob* job, Isolate* isolate,
OptimizedCompilationInfo* compilation_info) {
CompilationHandleScope compilation(isolate, compilation_info);
CanonicalHandleScopeForTurbofan canonical(isolate, compilation_info);
+ CompilerTracer::TracePrepareJob(isolate, compilation_info,
+ job->compiler_name());
compilation_info->ReopenHandlesInNewHandleScope(isolate);
return job->PrepareJob(isolate) == CompilationJob::SUCCEEDED;
}
-bool GetOptimizedCodeNow(OptimizedCompilationJob* job, Isolate* isolate,
- OptimizedCompilationInfo* compilation_info) {
+bool CompileTurbofan_NotConcurrent(Isolate* isolate,
+ TurbofanCompilationJob* job) {
+ OptimizedCompilationInfo* const compilation_info = job->compilation_info();
+ DCHECK_EQ(compilation_info->code_kind(), CodeKind::TURBOFAN);
+
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeNonConcurrent);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
@@ -945,21 +1011,23 @@ bool GetOptimizedCodeNow(OptimizedCompilationJob* job, Isolate* isolate,
}
// Success!
- job->RecordCompilationStats(OptimizedCompilationJob::kSynchronous, isolate);
+ job->RecordCompilationStats(ConcurrencyMode::kSynchronous, isolate);
DCHECK(!isolate->has_pending_exception());
- InsertCodeIntoOptimizedCodeCache(compilation_info);
+ OptimizedCodeCache::Insert(compilation_info);
job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, isolate);
return true;
}
-bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
- Isolate* isolate,
- OptimizedCompilationInfo* compilation_info,
- CodeKind code_kind, Handle<JSFunction> function) {
+bool CompileTurbofan_Concurrent(Isolate* isolate,
+ std::unique_ptr<TurbofanCompilationJob> job) {
+ OptimizedCompilationInfo* const compilation_info = job->compilation_info();
+ DCHECK_EQ(compilation_info->code_kind(), CodeKind::TURBOFAN);
+ Handle<JSFunction> function = compilation_info->closure();
+
if (!isolate->optimizing_compile_dispatcher()->IsQueueAvailable()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Compilation queue full, will retry optimizing ");
- compilation_info->closure()->ShortPrint();
+ function->ShortPrint();
PrintF(" later.\n");
}
return false;
@@ -968,7 +1036,7 @@ bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
if (isolate->heap()->HighMemoryPressure()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** High memory pressure, will retry optimizing ");
- compilation_info->closure()->ShortPrint();
+ function->ShortPrint();
PrintF(" later.\n");
}
return false;
@@ -984,41 +1052,22 @@ bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
}
// The background recompile will own this job.
- isolate->optimizing_compile_dispatcher()->QueueForOptimization(job.get());
- job.release();
+ isolate->optimizing_compile_dispatcher()->QueueForOptimization(job.release());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Queued ");
- compilation_info->closure()->ShortPrint();
+ function->ShortPrint();
PrintF(" for concurrent optimization.\n");
}
- if (CodeKindIsStoredInOptimizedCodeCache(code_kind)) {
- function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
- }
+ SetTieringState(*function, compilation_info->osr_offset(),
+ TieringState::kInProgress);
- // Note: Usually the active tier is expected to be Ignition at this point (in
- // other words we don't expect to optimize if the function is already
- // TF-optimized). There is a special case for OSR though, for which we *can*
- // reach this point even if we've already generated non-OSR'd TF code.
- DCHECK(function->shared().HasBytecodeArray());
+ DCHECK(compilation_info->shared_info()->HasBytecodeArray());
return true;
}
-// Returns the code object at which execution continues after a concurrent
-// optimization job has been started (but not finished).
-Handle<CodeT> ContinuationForConcurrentOptimization(
- Isolate* isolate, Handle<JSFunction> function) {
- if (function->shared().HasBaselineCode()) {
- CodeT baseline_code = function->shared().baseline_code(kAcquireLoad);
- function->set_code(baseline_code);
- return handle(baseline_code, isolate);
- }
- DCHECK(function->ActiveTierIsIgnition());
- return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
-}
-
-enum class GetOptimizedCodeResultHandling {
+enum class CompileResultBehavior {
// Default behavior, i.e. install the result, insert into caches, etc.
kDefault,
// Used only for stress testing. The compilation result should be discarded.
@@ -1038,43 +1087,40 @@ bool ShouldOptimize(CodeKind code_kind, Handle<SharedFunctionInfo> shared) {
}
}
-MaybeHandle<CodeT> CompileTurbofan(
- Isolate* isolate, Handle<JSFunction> function,
- Handle<SharedFunctionInfo> shared, ConcurrencyMode mode,
- BytecodeOffset osr_offset, JavaScriptFrame* osr_frame,
- GetOptimizedCodeResultHandling result_handling) {
+MaybeHandle<CodeT> CompileTurbofan(Isolate* isolate,
+ Handle<JSFunction> function,
+ Handle<SharedFunctionInfo> shared,
+ ConcurrencyMode mode,
+ BytecodeOffset osr_offset,
+ JavaScriptFrame* osr_frame,
+ CompileResultBehavior result_behavior) {
VMState<COMPILER> state(isolate);
TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeCode);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
- static constexpr CodeKind kCodeKind = CodeKind::TURBOFAN;
-
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
bool has_script = shared->script().IsScript();
// BUG(5946): This DCHECK is necessary to make certain that we won't
// tolerate the lack of a script without bytecode.
DCHECK_IMPLIES(!has_script, shared->HasBytecodeArray());
- std::unique_ptr<OptimizedCompilationJob> job(
- compiler::Pipeline::NewCompilationJob(isolate, function, kCodeKind,
- has_script, osr_offset, osr_frame));
- OptimizedCompilationInfo* compilation_info = job->compilation_info();
+ std::unique_ptr<TurbofanCompilationJob> job(
+ compiler::Pipeline::NewCompilationJob(isolate, function,
+ CodeKind::TURBOFAN, has_script,
+ osr_offset, osr_frame));
- if (result_handling == GetOptimizedCodeResultHandling::kDiscardForTesting) {
- compilation_info->set_discard_result_for_testing();
+ if (result_behavior == CompileResultBehavior::kDiscardForTesting) {
+ job->compilation_info()->set_discard_result_for_testing();
}
// Prepare the job and launch concurrent compilation, or compile now.
- if (mode == ConcurrencyMode::kConcurrent) {
- if (GetOptimizedCodeLater(std::move(job), isolate, compilation_info,
- kCodeKind, function)) {
- return ContinuationForConcurrentOptimization(isolate, function);
- }
+ if (IsConcurrent(mode)) {
+ if (CompileTurbofan_Concurrent(isolate, std::move(job))) return {};
} else {
- DCHECK_EQ(mode, ConcurrencyMode::kNotConcurrent);
- if (GetOptimizedCodeNow(job.get(), isolate, compilation_info)) {
- return ToCodeT(compilation_info->code(), isolate);
+ DCHECK(IsSynchronous(mode));
+ if (CompileTurbofan_NotConcurrent(isolate, job.get())) {
+ return ToCodeT(job->compilation_info()->code(), isolate);
}
}
@@ -1082,30 +1128,43 @@ MaybeHandle<CodeT> CompileTurbofan(
return {};
}
-MaybeHandle<CodeT> CompileMaglev(
- Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode,
- BytecodeOffset osr_offset, JavaScriptFrame* osr_frame,
- GetOptimizedCodeResultHandling result_handling) {
+#ifdef V8_ENABLE_MAGLEV
+// TODO(v8:7700): Record maglev compilations better.
+void RecordMaglevFunctionCompilation(Isolate* isolate,
+ Handle<JSFunction> function) {
+ Handle<AbstractCode> abstract_code(
+ AbstractCode::cast(FromCodeT(function->code())), isolate);
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+ Handle<Script> script(Script::cast(shared->script()), isolate);
+ Handle<FeedbackVector> feedback_vector(function->feedback_vector(), isolate);
+
+ // Optimistic estimate.
+ double time_taken_ms = 0;
+
+ LogFunctionCompilation(isolate, CodeEventListener::FUNCTION_TAG, script,
+ shared, feedback_vector, abstract_code,
+ abstract_code->kind(), time_taken_ms);
+}
+#endif // V8_ENABLE_MAGLEV
+
+MaybeHandle<CodeT> CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
+ ConcurrencyMode mode,
+ BytecodeOffset osr_offset,
+ JavaScriptFrame* osr_frame,
+ CompileResultBehavior result_behavior) {
#ifdef V8_ENABLE_MAGLEV
DCHECK(FLAG_maglev);
// TODO(v8:7700): Add missing support.
- CHECK(osr_offset.IsNone());
+ CHECK(!IsOSR(osr_offset));
CHECK(osr_frame == nullptr);
- CHECK(result_handling == GetOptimizedCodeResultHandling::kDefault);
+ CHECK(result_behavior == CompileResultBehavior::kDefault);
// TODO(v8:7700): Tracing, see CompileTurbofan.
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
- if (mode == ConcurrencyMode::kNotConcurrent) {
- function->ClearOptimizationMarker();
- return Maglev::Compile(isolate, function);
- }
-
- DCHECK_EQ(mode, ConcurrencyMode::kConcurrent);
-
- // TODO(v8:7700): See everything in GetOptimizedCodeLater.
+ // TODO(v8:7700): See everything in CompileTurbofan_Concurrent.
// - Tracing,
// - timers,
// - aborts on memory pressure,
@@ -1116,32 +1175,55 @@ MaybeHandle<CodeT> CompileMaglev(
CompilationJob::Status status = job->PrepareJob(isolate);
CHECK_EQ(status, CompilationJob::SUCCEEDED); // TODO(v8:7700): Use status.
+ if (IsSynchronous(mode)) {
+ function->reset_tiering_state();
+ {
+ // Park the main thread Isolate here, to be in the same state as
+ // background threads.
+ ParkedScope parked_scope(isolate->main_thread_local_isolate());
+ if (job->ExecuteJob(isolate->counters()->runtime_call_stats(),
+ isolate->main_thread_local_isolate()) !=
+ CompilationJob::SUCCEEDED) {
+ return {};
+ }
+ }
+
+ if (job->FinalizeJob(isolate) != CompilationJob::SUCCEEDED) {
+ return {};
+ }
+
+ RecordMaglevFunctionCompilation(isolate, function);
+ return handle(function->code(), isolate);
+ }
+
+ DCHECK(IsConcurrent(mode));
+
// Enqueue it.
isolate->maglev_concurrent_dispatcher()->EnqueueJob(std::move(job));
// Remember that the function is currently being processed.
- function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
+ SetTieringState(*function, osr_offset, TieringState::kInProgress);
- // The code that triggered optimization continues execution here.
- return ContinuationForConcurrentOptimization(isolate, function);
+ return {};
#else // V8_ENABLE_MAGLEV
UNREACHABLE();
#endif // V8_ENABLE_MAGLEV
}
-MaybeHandle<CodeT> GetOptimizedCode(
+MaybeHandle<CodeT> GetOrCompileOptimized(
Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode,
CodeKind code_kind, BytecodeOffset osr_offset = BytecodeOffset::None(),
JavaScriptFrame* osr_frame = nullptr,
- GetOptimizedCodeResultHandling result_handling =
- GetOptimizedCodeResultHandling::kDefault) {
+ CompileResultBehavior result_behavior = CompileResultBehavior::kDefault) {
DCHECK(CodeKindIsOptimizedJSFunction(code_kind));
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- // Make sure we clear the optimization marker on the function so that we
- // don't try to re-optimize.
- if (function->HasOptimizationMarker()) function->ClearOptimizationMarker();
+ // Clear the optimization marker on the function so that we don't try to
+ // re-optimize.
+ if (!IsOSR(osr_offset)) {
+ ResetTieringState(*function, osr_offset);
+ }
// TODO(v8:7700): Distinguish between Maglev and Turbofan.
if (shared->optimization_disabled() &&
@@ -1165,29 +1247,23 @@ MaybeHandle<CodeT> GetOptimizedCode(
PendingOptimizationTable::FunctionWasOptimized(isolate, function);
}
- // Check the optimized code cache (stored on the SharedFunctionInfo).
- if (CodeKindIsStoredInOptimizedCodeCache(code_kind)) {
- Handle<CodeT> cached_code;
- if (GetCodeFromOptimizedCodeCache(function, osr_offset, code_kind)
- .ToHandle(&cached_code)) {
- CompilerTracer::TraceOptimizedCodeCacheHit(isolate, function, osr_offset,
- code_kind);
- return cached_code;
- }
+ Handle<CodeT> cached_code;
+ if (OptimizedCodeCache::Get(isolate, function, osr_offset, code_kind)
+ .ToHandle(&cached_code)) {
+ return cached_code;
}
- // Reset profiler ticks, the function is no longer considered hot.
- // TODO(v8:7700): Update for Maglev tiering.
DCHECK(shared->is_compiled());
- function->feedback_vector().set_profiler_ticks(0);
+
+ ResetProfilerTicks(*function, osr_offset);
if (code_kind == CodeKind::TURBOFAN) {
return CompileTurbofan(isolate, function, shared, mode, osr_offset,
- osr_frame, result_handling);
+ osr_frame, result_behavior);
} else {
DCHECK_EQ(code_kind, CodeKind::MAGLEV);
return CompileMaglev(isolate, function, mode, osr_offset, osr_frame,
- result_handling);
+ result_behavior);
}
}
@@ -1203,16 +1279,15 @@ void SpawnDuplicateConcurrentJobForStressTesting(Isolate* isolate,
if (code_kind == CodeKind::MAGLEV) return;
DCHECK(FLAG_stress_concurrent_inlining &&
- isolate->concurrent_recompilation_enabled() &&
- mode == ConcurrencyMode::kNotConcurrent &&
+ isolate->concurrent_recompilation_enabled() && IsSynchronous(mode) &&
isolate->node_observer() == nullptr);
- GetOptimizedCodeResultHandling result_handling =
+ CompileResultBehavior result_behavior =
FLAG_stress_concurrent_inlining_attach_code
- ? GetOptimizedCodeResultHandling::kDefault
- : GetOptimizedCodeResultHandling::kDiscardForTesting;
- USE(GetOptimizedCode(isolate, function, ConcurrencyMode::kConcurrent,
- code_kind, BytecodeOffset::None(), nullptr,
- result_handling));
+ ? CompileResultBehavior::kDefault
+ : CompileResultBehavior::kDiscardForTesting;
+ USE(GetOrCompileOptimized(isolate, function, ConcurrencyMode::kConcurrent,
+ code_kind, BytecodeOffset::None(), nullptr,
+ result_behavior));
}
bool FailAndClearPendingException(Isolate* isolate) {
@@ -1988,7 +2063,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
// We should never reach here if the function is already compiled or
// optimized.
DCHECK(!function->is_compiled());
- DCHECK(!function->HasOptimizationMarker());
+ DCHECK(IsNone(function->tiering_state()));
DCHECK(!function->HasAvailableOptimizedCode());
// Reset the JSFunction if we are recompiling due to the bytecode having been
@@ -2025,18 +2100,17 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
CodeKindForTopTier());
const CodeKind code_kind = CodeKindForTopTier();
- const ConcurrencyMode concurrency_mode = ConcurrencyMode::kNotConcurrent;
+ const ConcurrencyMode concurrency_mode = ConcurrencyMode::kSynchronous;
if (FLAG_stress_concurrent_inlining &&
isolate->concurrent_recompilation_enabled() &&
- concurrency_mode == ConcurrencyMode::kNotConcurrent &&
isolate->node_observer() == nullptr) {
SpawnDuplicateConcurrentJobForStressTesting(isolate, function,
concurrency_mode, code_kind);
}
Handle<CodeT> maybe_code;
- if (GetOptimizedCode(isolate, function, concurrency_mode, code_kind)
+ if (GetOrCompileOptimized(isolate, function, concurrency_mode, code_kind)
.ToHandle(&maybe_code)) {
code = maybe_code;
}
@@ -2092,9 +2166,7 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
shared->set_baseline_code(ToCodeT(*code), kReleaseStore);
if (V8_LIKELY(FLAG_use_osr)) {
- // Arm back edges for OSR
- shared->GetBytecodeArray(isolate).set_osr_loop_nesting_level(
- AbstractCode::kMaxLoopNestingMarker);
+ shared->GetBytecodeArray(isolate).RequestOsrAtNextOpportunity();
}
}
double time_taken_ms = time_taken.InMillisecondsF();
@@ -2138,7 +2210,7 @@ bool Compiler::CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
// Bytecode must be available for maglev compilation.
DCHECK(is_compiled_scope->is_compiled());
// TODO(v8:7700): Support concurrent compilation.
- DCHECK_EQ(mode, ConcurrencyMode::kNotConcurrent);
+ DCHECK(IsSynchronous(mode));
// Maglev code needs a feedback vector.
JSFunction::EnsureFeedbackVector(isolate, function, is_compiled_scope);
@@ -2188,36 +2260,27 @@ void Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
DCHECK(AllowCompilation::IsAllowed(isolate));
if (FLAG_stress_concurrent_inlining &&
- isolate->concurrent_recompilation_enabled() &&
- mode == ConcurrencyMode::kNotConcurrent &&
+ isolate->concurrent_recompilation_enabled() && IsSynchronous(mode) &&
isolate->node_observer() == nullptr) {
SpawnDuplicateConcurrentJobForStressTesting(isolate, function, mode,
code_kind);
}
Handle<CodeT> code;
- if (!GetOptimizedCode(isolate, function, mode, code_kind).ToHandle(&code)) {
- // Optimization failed, get the existing code. We could have optimized code
- // from a lower tier here. Unoptimized code must exist already if we are
- // optimizing.
- DCHECK(!isolate->has_pending_exception());
- DCHECK(function->shared().is_compiled());
- DCHECK(function->shared().HasBytecodeArray());
- code = ContinuationForConcurrentOptimization(isolate, function);
+ if (GetOrCompileOptimized(isolate, function, mode, code_kind)
+ .ToHandle(&code)) {
+ function->set_code(*code, kReleaseStore);
}
- function->set_code(*code, kReleaseStore);
-
- // Check postconditions on success.
+#ifdef DEBUG
DCHECK(!isolate->has_pending_exception());
- DCHECK(function->shared().is_compiled());
DCHECK(function->is_compiled());
- DCHECK_IMPLIES(function->HasOptimizationMarker(),
- function->IsInOptimizationQueue());
- DCHECK_IMPLIES(function->HasOptimizationMarker(),
- function->ChecksOptimizationMarker());
- DCHECK_IMPLIES(function->IsInOptimizationQueue(),
- mode == ConcurrencyMode::kConcurrent);
+ DCHECK(function->shared().HasBytecodeArray());
+ const TieringState tiering_state = function->tiering_state();
+ DCHECK(IsNone(tiering_state) || IsInProgress(tiering_state));
+ DCHECK_IMPLIES(IsInProgress(tiering_state), function->ChecksTieringState());
+ DCHECK_IMPLIES(IsInProgress(tiering_state), IsConcurrent(mode));
+#endif // DEBUG
}
// static
@@ -2290,9 +2353,9 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
if (!context->IsNativeContext()) {
maybe_outer_scope_info = handle(context->scope_info(), isolate);
}
- script =
- parse_info.CreateScript(isolate, source, kNullMaybeHandle,
- OriginOptionsForEval(outer_info->script()));
+ script = parse_info.CreateScript(
+ isolate, source, kNullMaybeHandle,
+ OriginOptionsForEval(outer_info->script(), parsing_while_debugging));
script->set_eval_from_shared(*outer_info);
if (eval_position == kNoSourcePosition) {
// If the position is missing, attempt to get the code offset by
@@ -2303,7 +2366,8 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
FrameSummary summary = it.GetTopValidFrame();
script->set_eval_from_shared(
summary.AsJavaScript().function()->shared());
- script->set_origin_options(OriginOptionsForEval(*summary.script()));
+ script->set_origin_options(
+ OriginOptionsForEval(*summary.script(), parsing_while_debugging));
eval_position = -summary.code_offset();
} else {
eval_position = 0;
@@ -3309,21 +3373,67 @@ template Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
FunctionLiteral* literal, Handle<Script> script, LocalIsolate* isolate);
// static
-MaybeHandle<CodeT> Compiler::GetOptimizedCodeForOSR(
- Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
- JavaScriptFrame* osr_frame) {
- DCHECK(!osr_offset.IsNone());
- DCHECK_NOT_NULL(osr_frame);
- return GetOptimizedCode(isolate, function, ConcurrencyMode::kNotConcurrent,
- CodeKindForOSR(), osr_offset, osr_frame);
+MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate,
+ Handle<JSFunction> function,
+ BytecodeOffset osr_offset,
+ UnoptimizedFrame* frame,
+ ConcurrencyMode mode) {
+ DCHECK(IsOSR(osr_offset));
+ DCHECK_NOT_NULL(frame);
+
+ if (V8_UNLIKELY(isolate->serializer_enabled())) return {};
+ if (V8_UNLIKELY(function->shared().optimization_disabled())) return {};
+
+ // TODO(chromium:1031479): Currently, OSR triggering mechanism is tied to the
+ // bytecode array. So, it might be possible to mark closure in one native
+ // context and optimize a closure from a different native context. So check if
+ // there is a feedback vector before OSRing. We don't expect this to happen
+ // often.
+ if (V8_UNLIKELY(!function->has_feedback_vector())) return {};
+
+ // One OSR job per function at a time.
+ if (IsInProgress(function->osr_tiering_state())) {
+ return {};
+ }
+
+ // -- Alright, decided to proceed. --
+
+ // Disarm all back edges, i.e. reset the OSR urgency and install target.
+ //
+ // Note that the bytecode array active on the stack might be different from
+ // the one installed on the function (e.g. patched by debugger). This however
+ // is fine because we guarantee the layout to be in sync, hence any
+ // BytecodeOffset representing the entry point will be valid for any copy of
+ // the bytecode.
+ Handle<BytecodeArray> bytecode(frame->GetBytecodeArray(), isolate);
+ bytecode->reset_osr_urgency_and_install_target();
+
+ CompilerTracer::TraceOptimizeOSR(isolate, function, osr_offset, mode);
+ MaybeHandle<CodeT> result = GetOrCompileOptimized(
+ isolate, function, mode, CodeKind::TURBOFAN, osr_offset, frame);
+
+ if (result.is_null()) {
+ CompilerTracer::TraceOptimizeOSRUnavailable(isolate, function, osr_offset,
+ mode);
+ }
+
+ return result;
}
// static
-bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
- Isolate* isolate) {
+void Compiler::DisposeTurbofanCompilationJob(TurbofanCompilationJob* job,
+ bool restore_function_code) {
+ Handle<JSFunction> function = job->compilation_info()->closure();
+ ResetTieringState(*function, job->compilation_info()->osr_offset());
+ if (restore_function_code) {
+ function->set_code(function->shared().GetCode(), kReleaseStore);
+ }
+}
+
+// static
+bool Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job,
+ Isolate* isolate) {
VMState<COMPILER> state(isolate);
- // Take ownership of the job. Deleting the job also tears down the zone.
- std::unique_ptr<OptimizedCompilationJob> job_scope(job);
OptimizedCompilationInfo* compilation_info = job->compilation_info();
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
@@ -3331,12 +3441,14 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OptimizeConcurrentFinalize");
+ Handle<JSFunction> function = compilation_info->closure();
Handle<SharedFunctionInfo> shared = compilation_info->shared_info();
const bool use_result = !compilation_info->discard_result_for_testing();
+ const BytecodeOffset osr_offset = compilation_info->osr_offset();
+
if (V8_LIKELY(use_result)) {
- // Reset profiler ticks, function is no longer considered hot.
- compilation_info->closure()->feedback_vector().set_profiler_ticks(0);
+ ResetProfilerTicks(*function, osr_offset);
}
DCHECK(!shared->HasBreakInfo());
@@ -3350,15 +3462,23 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
if (shared->optimization_disabled()) {
job->RetryOptimization(BailoutReason::kOptimizationDisabled);
} else if (job->FinalizeJob(isolate) == CompilationJob::SUCCEEDED) {
- job->RecordCompilationStats(OptimizedCompilationJob::kConcurrent,
- isolate);
+ job->RecordCompilationStats(ConcurrencyMode::kConcurrent, isolate);
job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
isolate);
if (V8_LIKELY(use_result)) {
- InsertCodeIntoOptimizedCodeCache(compilation_info);
+ ResetTieringState(*function, osr_offset);
+ OptimizedCodeCache::Insert(compilation_info);
CompilerTracer::TraceCompletedJob(isolate, compilation_info);
- compilation_info->closure()->set_code(*compilation_info->code(),
- kReleaseStore);
+ if (IsOSR(osr_offset)) {
+ if (FLAG_trace_osr) {
+ PrintF(CodeTracer::Scope{isolate->GetCodeTracer()}.file(),
+ "[OSR - requesting install. function: %s, osr offset: %d]\n",
+ function->DebugNameCStr().get(), osr_offset.ToInt());
+ }
+ shared->GetBytecodeArray(isolate).set_osr_install_target(osr_offset);
+ } else {
+ function->set_code(*compilation_info->code(), kReleaseStore);
+ }
}
return CompilationJob::SUCCEEDED;
}
@@ -3367,16 +3487,25 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
DCHECK_EQ(job->state(), CompilationJob::State::kFailed);
CompilerTracer::TraceAbortedJob(isolate, compilation_info);
if (V8_LIKELY(use_result)) {
- compilation_info->closure()->set_code(shared->GetCode(), kReleaseStore);
- // Clear the InOptimizationQueue marker, if it exists.
- if (compilation_info->closure()->IsInOptimizationQueue()) {
- compilation_info->closure()->ClearOptimizationMarker();
+ ResetTieringState(*function, osr_offset);
+ if (!IsOSR(osr_offset)) {
+ function->set_code(shared->GetCode(), kReleaseStore);
}
}
return CompilationJob::FAILED;
}
// static
+bool Compiler::FinalizeMaglevCompilationJob(maglev::MaglevCompilationJob* job,
+ Isolate* isolate) {
+#ifdef V8_ENABLE_MAGLEV
+ VMState<COMPILER> state(isolate);
+ RecordMaglevFunctionCompilation(isolate, job->function());
+#endif
+ return CompilationJob::SUCCEEDED;
+}
+
+// static
void Compiler::PostInstantiation(Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
@@ -3416,7 +3545,7 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
CompilerTracer::TraceMarkForAlwaysOpt(isolate, function);
JSFunction::EnsureFeedbackVector(isolate, function, &is_compiled_scope);
function->MarkForOptimization(isolate, CodeKind::TURBOFAN,
- ConcurrencyMode::kNotConcurrent);
+ ConcurrencyMode::kSynchronous);
}
}
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index f34c0a3326..4f8270f1e4 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -30,24 +30,23 @@ namespace internal {
// Forward declarations.
class AlignedCachedData;
-class AstRawString;
class BackgroundCompileTask;
class IsCompiledScope;
-class JavaScriptFrame;
class OptimizedCompilationInfo;
-class OptimizedCompilationJob;
class ParseInfo;
-class Parser;
class RuntimeCallStats;
class TimedHistogram;
+class TurbofanCompilationJob;
class UnoptimizedCompilationInfo;
class UnoptimizedCompilationJob;
+class UnoptimizedFrame;
class WorkerThreadRuntimeCallStats;
struct ScriptDetails;
struct ScriptStreamingData;
-using UnoptimizedCompilationJobList =
- std::forward_list<std::unique_ptr<UnoptimizedCompilationJob>>;
+namespace maglev {
+class MaglevCompilationJob;
+} // namespace maglev
// The V8 compiler API.
//
@@ -96,6 +95,13 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static void CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode, CodeKind code_kind);
+ // Generate and return optimized code for OSR. The empty handle is returned
+ // either on failure, or after spawning a concurrent OSR task (in which case
+ // a future OSR request will pick up the resulting code object).
+ V8_WARN_UNUSED_RESULT static MaybeHandle<CodeT> CompileOptimizedOSR(
+ Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
+ UnoptimizedFrame* frame, ConcurrencyMode mode);
+
V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo>
CompileForLiveEdit(ParseInfo* parse_info, Handle<Script> script,
Isolate* isolate);
@@ -111,9 +117,17 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
Isolate* isolate,
ClearExceptionFlag flag);
- // Finalize and install optimized code from previously run job.
- static bool FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
- Isolate* isolate);
+ // Dispose a job without finalization.
+ static void DisposeTurbofanCompilationJob(TurbofanCompilationJob* job,
+ bool restore_function_code);
+
+ // Finalize and install Turbofan code from a previously run job.
+ static bool FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job,
+ Isolate* isolate);
+
+ // Finalize and install Maglev code from a previously run job.
+ static bool FinalizeMaglevCompilationJob(maglev::MaglevCompilationJob* job,
+ Isolate* isolate);
// Give the compiler a chance to perform low-latency initialization tasks of
// the given {function} on its instantiation. Note that only the runtime will
@@ -222,20 +236,6 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static Handle<SharedFunctionInfo> GetSharedFunctionInfo(FunctionLiteral* node,
Handle<Script> script,
IsolateT* isolate);
-
- // ===========================================================================
- // The following family of methods provides support for OSR. Code generated
- // for entry via OSR might not be suitable for normal entry, hence will be
- // returned directly to the caller.
- //
- // Please note this interface is the only part dealing with {Code} objects
- // directly. Other methods are agnostic to {Code} and can use an interpreter
- // instead of generating JIT code for a function at all.
-
- // Generate and return optimized code for OSR, or empty handle on failure.
- V8_WARN_UNUSED_RESULT static MaybeHandle<CodeT> GetOptimizedCodeForOSR(
- Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
- JavaScriptFrame* osr_frame);
};
// A base class for compilation jobs intended to run concurrent to the main
@@ -364,24 +364,48 @@ class UnoptimizedCompilationJob : public CompilationJob {
// Each of the three phases can either fail or succeed.
class OptimizedCompilationJob : public CompilationJob {
public:
- OptimizedCompilationJob(OptimizedCompilationInfo* compilation_info,
- const char* compiler_name,
- State initial_state = State::kReadyToPrepare)
- : CompilationJob(initial_state),
- compilation_info_(compilation_info),
- compiler_name_(compiler_name) {}
+ OptimizedCompilationJob(const char* compiler_name, State initial_state)
+ : CompilationJob(initial_state), compiler_name_(compiler_name) {}
// Prepare the compile job. Must be called on the main thread.
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT Status PrepareJob(Isolate* isolate);
- // Executes the compile job. Can be called on a background thread if
- // can_execute_on_background_thread() returns true.
+ // Executes the compile job. Can be called on a background thread.
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT Status
ExecuteJob(RuntimeCallStats* stats, LocalIsolate* local_isolate = nullptr);
// Finalizes the compile job. Must be called on the main thread.
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT Status FinalizeJob(Isolate* isolate);
+ const char* compiler_name() const { return compiler_name_; }
+
+ protected:
+ // Overridden by the actual implementation.
+ virtual Status PrepareJobImpl(Isolate* isolate) = 0;
+ virtual Status ExecuteJobImpl(RuntimeCallStats* stats,
+ LocalIsolate* local_heap) = 0;
+ virtual Status FinalizeJobImpl(Isolate* isolate) = 0;
+
+ base::TimeDelta time_taken_to_prepare_;
+ base::TimeDelta time_taken_to_execute_;
+ base::TimeDelta time_taken_to_finalize_;
+
+ private:
+ const char* const compiler_name_;
+};
+
+// Thin wrapper to split off Turbofan-specific parts.
+class TurbofanCompilationJob : public OptimizedCompilationJob {
+ public:
+ TurbofanCompilationJob(OptimizedCompilationInfo* compilation_info,
+ State initial_state)
+ : OptimizedCompilationJob("Turbofan", initial_state),
+ compilation_info_(compilation_info) {}
+
+ OptimizedCompilationInfo* compilation_info() const {
+ return compilation_info_;
+ }
+
// Report a transient failure, try again next time. Should only be called on
// optimization compilation jobs.
Status RetryOptimization(BailoutReason reason);
@@ -390,28 +414,12 @@ class OptimizedCompilationJob : public CompilationJob {
// Should only be called on optimization compilation jobs.
Status AbortOptimization(BailoutReason reason);
- enum CompilationMode { kConcurrent, kSynchronous };
- void RecordCompilationStats(CompilationMode mode, Isolate* isolate) const;
+ void RecordCompilationStats(ConcurrencyMode mode, Isolate* isolate) const;
void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
Isolate* isolate) const;
- OptimizedCompilationInfo* compilation_info() const {
- return compilation_info_;
- }
-
- protected:
- // Overridden by the actual implementation.
- virtual Status PrepareJobImpl(Isolate* isolate) = 0;
- virtual Status ExecuteJobImpl(RuntimeCallStats* stats,
- LocalIsolate* local_heap) = 0;
- virtual Status FinalizeJobImpl(Isolate* isolate) = 0;
-
private:
- OptimizedCompilationInfo* compilation_info_;
- base::TimeDelta time_taken_to_prepare_;
- base::TimeDelta time_taken_to_execute_;
- base::TimeDelta time_taken_to_finalize_;
- const char* compiler_name_;
+ OptimizedCompilationInfo* const compilation_info_;
};
class FinalizeUnoptimizedCompilationData {
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 150ffd6608..2755cd3e22 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -2033,9 +2033,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
ASM_CODE_COMMENT(this);
CallBuiltin(target);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::Trap() { int3(); }
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index 081614e9c4..ccee1823d2 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -114,7 +114,6 @@ namespace internal {
V(StoreGlobalWithVector) \
V(StoreTransition) \
V(StoreWithVector) \
- V(StringAt) \
V(StringAtAsString) \
V(StringSubstring) \
IF_TSAN(V, TSANStore) \
@@ -1553,19 +1552,6 @@ class BinarySmiOp_BaselineDescriptor
static constexpr inline auto registers();
};
-// This desciptor is shared among String.p.charAt/charCodeAt/codePointAt
-// as they all have the same interface.
-class StringAtDescriptor final
- : public StaticCallInterfaceDescriptor<StringAtDescriptor> {
- public:
- DEFINE_PARAMETERS(kReceiver, kPosition)
- // TODO(turbofan): Return untagged value here.
- DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedSigned(), // result 1
- MachineType::AnyTagged(), // kReceiver
- MachineType::IntPtr()) // kPosition
- DECLARE_DESCRIPTOR(StringAtDescriptor)
-};
-
class StringAtAsStringDescriptor final
: public StaticCallInterfaceDescriptor<StringAtAsStringDescriptor> {
public:
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
index d685aaafdd..11acc7c4d2 100644
--- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
@@ -4087,9 +4087,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
Call(t7);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
index 734e7cf931..23e99b1c2f 100644
--- a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
@@ -417,6 +417,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef DEFINE_INSTRUCTION2
#undef DEFINE_INSTRUCTION3
+ void SmiTag(Register dst, Register src) {
+ STATIC_ASSERT(kSmiTag == 0);
+ if (SmiValuesAre32Bits()) {
+ slli_d(dst, src, 32);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ add_w(dst, src, src);
+ }
+ }
+
+ void SmiTag(Register reg) { SmiTag(reg, reg); }
+
void SmiUntag(Register dst, const MemOperand& src);
void SmiUntag(Register dst, Register src) {
if (SmiValuesAre32Bits()) {
@@ -998,18 +1010,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Smi utilities.
- void SmiTag(Register dst, Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- if (SmiValuesAre32Bits()) {
- slli_d(dst, src, 32);
- } else {
- DCHECK(SmiValuesAre31Bits());
- add_w(dst, src, src);
- }
- }
-
- void SmiTag(Register reg) { SmiTag(reg, reg); }
-
// Test if the register contains a smi.
inline void SmiTst(Register value, Register scratch) {
And(scratch, value, Operand(kSmiTagMask));
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 338c0debf6..b911fb9bfb 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -5571,9 +5571,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
Call(t9);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/mips/register-mips.h b/deps/v8/src/codegen/mips/register-mips.h
index f2ed9786c6..26f04401b9 100644
--- a/deps/v8/src/codegen/mips/register-mips.h
+++ b/deps/v8/src/codegen/mips/register-mips.h
@@ -29,6 +29,13 @@ namespace internal {
V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
+// Currently, MIPS just use even float point register, except
+// for C function param registers.
+#define DOUBLE_USE_REGISTERS(V) \
+ V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f13) \
+ V(f14) V(f15) V(f16) V(f18) V(f20) V(f22) V(f24) V(f26) \
+ V(f28) V(f30)
+
#define FLOAT_REGISTERS DOUBLE_REGISTERS
#define SIMD128_REGISTERS(V) \
V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 46be9ee787..d9eb08e1d1 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -6114,9 +6114,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
Call(t9);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index 43f28ac40e..edcb8cda37 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -482,6 +482,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef DEFINE_INSTRUCTION2
#undef DEFINE_INSTRUCTION3
+ void SmiTag(Register dst, Register src) {
+ STATIC_ASSERT(kSmiTag == 0);
+ if (SmiValuesAre32Bits()) {
+ dsll32(dst, src, 0);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ Addu(dst, src, src);
+ }
+ }
+
+ void SmiTag(Register reg) { SmiTag(reg, reg); }
+
void SmiUntag(Register dst, const MemOperand& src);
void SmiUntag(Register dst, Register src) {
if (SmiValuesAre32Bits()) {
@@ -1184,18 +1196,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Smi utilities.
- void SmiTag(Register dst, Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- if (SmiValuesAre32Bits()) {
- dsll32(dst, src, 0);
- } else {
- DCHECK(SmiValuesAre31Bits());
- Addu(dst, src, src);
- }
- }
-
- void SmiTag(Register reg) { SmiTag(reg, reg); }
-
// Test if the register contains a smi.
inline void SmiTst(Register value, Register scratch) {
And(scratch, value, Operand(kSmiTagMask));
diff --git a/deps/v8/src/codegen/mips64/register-mips64.h b/deps/v8/src/codegen/mips64/register-mips64.h
index 6c9980f50d..00feb1c01c 100644
--- a/deps/v8/src/codegen/mips64/register-mips64.h
+++ b/deps/v8/src/codegen/mips64/register-mips64.h
@@ -29,6 +29,13 @@ namespace internal {
V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
+// Currently, MIPS64 just use even float point register, except
+// for C function param registers.
+#define DOUBLE_USE_REGISTERS(V) \
+ V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f13) \
+ V(f14) V(f15) V(f16) V(f17) V(f18) V(f19) V(f20) V(f22) \
+ V(f24) V(f26) V(f28) V(f30)
+
#define FLOAT_REGISTERS DOUBLE_REGISTERS
#define SIMD128_REGISTERS(V) \
V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index 7678298ab3..f699f2a80d 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -73,9 +73,6 @@ void OptimizedCompilationInfo::ConfigureFlags() {
if (FLAG_analyze_environment_liveness) {
set_analyze_environment_liveness();
}
- if (FLAG_function_context_specialization) {
- set_function_context_specializing();
- }
if (FLAG_turbo_splitting) set_splitting();
break;
case CodeKind::BYTECODE_HANDLER:
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index 9543f5f4b1..89bf6c3a3b 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -76,7 +76,13 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#else
base::CPU cpu;
if (cpu.part() == base::CPU::kPPCPower10) {
+// IBMi does not yet support prefixed instructions introduced on Power10.
+// Run on P9 mode until OS adds support.
+#if defined(__PASE__)
+ supported_ |= (1u << PPC_9_PLUS);
+#else
supported_ |= (1u << PPC_10_PLUS);
+#endif
} else if (cpu.part() == base::CPU::kPPCPower9) {
supported_ |= (1u << PPC_9_PLUS);
} else if (cpu.part() == base::CPU::kPPCPower8) {
@@ -1135,6 +1141,110 @@ void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
}
#endif
+// Prefixed instructions.
+#define GENERATE_PREFIX_SUFFIX_BITS(immediate, prefix, suffix) \
+ CHECK(is_int34(immediate)); \
+ int32_t prefix = \
+ SIGN_EXT_IMM18((immediate >> 16) & kImm18Mask); /* 18 bits.*/ \
+ int16_t suffix = immediate & kImm16Mask; /* 16 bits.*/ \
+ DCHECK(is_int18(prefix));
+
+void Assembler::paddi(Register dst, Register src, const Operand& imm) {
+ CHECK(CpuFeatures::IsSupported(PPC_10_PLUS));
+ DCHECK(src != r0); // use pli instead to show intent.
+ intptr_t immediate = imm.immediate();
+ GENERATE_PREFIX_SUFFIX_BITS(immediate, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_mls(Operand(hi));
+ addi(dst, src, Operand(lo));
+}
+
+void Assembler::pli(Register dst, const Operand& imm) {
+ CHECK(CpuFeatures::IsSupported(PPC_10_PLUS));
+ intptr_t immediate = imm.immediate();
+ GENERATE_PREFIX_SUFFIX_BITS(immediate, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_mls(Operand(hi));
+ li(dst, Operand(lo));
+}
+
+void Assembler::psubi(Register dst, Register src, const Operand& imm) {
+ paddi(dst, src, Operand(-(imm.immediate())));
+}
+
+void Assembler::plbz(Register dst, const MemOperand& src) {
+ DCHECK(src.ra_ != r0);
+ int64_t offset = src.offset();
+ GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_mls(Operand(hi));
+ lbz(dst, MemOperand(src.ra(), lo));
+}
+
+void Assembler::plhz(Register dst, const MemOperand& src) {
+ DCHECK(src.ra_ != r0);
+ int64_t offset = src.offset();
+ GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_mls(Operand(hi));
+ lhz(dst, MemOperand(src.ra(), lo));
+}
+
+void Assembler::plha(Register dst, const MemOperand& src) {
+ DCHECK(src.ra_ != r0);
+ int64_t offset = src.offset();
+ GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_mls(Operand(hi));
+ lha(dst, MemOperand(src.ra(), lo));
+}
+
+void Assembler::plwz(Register dst, const MemOperand& src) {
+ DCHECK(src.ra_ != r0);
+ int64_t offset = src.offset();
+ GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_mls(Operand(hi));
+ lwz(dst, MemOperand(src.ra(), lo));
+}
+
+void Assembler::plwa(Register dst, const MemOperand& src) {
+ DCHECK(src.ra_ != r0);
+ int64_t offset = src.offset();
+ GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_8ls(Operand(hi));
+ emit(PPLWA | dst.code() * B21 | src.ra().code() * B16 | (lo & kImm16Mask));
+}
+
+void Assembler::pld(Register dst, const MemOperand& src) {
+ DCHECK(src.ra_ != r0);
+ int64_t offset = src.offset();
+ GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_8ls(Operand(hi));
+ emit(PPLD | dst.code() * B21 | src.ra().code() * B16 | (lo & kImm16Mask));
+}
+
+void Assembler::plfs(DoubleRegister dst, const MemOperand& src) {
+ DCHECK(src.ra_ != r0);
+ int64_t offset = src.offset();
+ GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_mls(Operand(hi));
+ lfs(dst, MemOperand(src.ra(), lo));
+}
+
+void Assembler::plfd(DoubleRegister dst, const MemOperand& src) {
+ DCHECK(src.ra_ != r0);
+ int64_t offset = src.offset();
+ GENERATE_PREFIX_SUFFIX_BITS(offset, hi, lo)
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ pload_store_mls(Operand(hi));
+ lfd(dst, MemOperand(src.ra(), lo));
+}
+#undef GENERATE_PREFIX_SUFFIX_BITS
+
int Assembler::instructions_required_for_mov(Register dst,
const Operand& src) const {
bool canOptimize =
@@ -1162,7 +1272,9 @@ bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
#else
bool allowOverflow = !(canOptimize || dst == r0);
#endif
- if (canOptimize && is_int16(value)) {
+ if (canOptimize &&
+ (is_int16(value) ||
+ (CpuFeatures::IsSupported(PPC_10_PLUS) && is_int34(value)))) {
// Prefer a single-instruction load-immediate.
return false;
}
@@ -1209,7 +1321,10 @@ void Assembler::mov(Register dst, const Operand& src) {
bool canOptimize;
canOptimize =
- !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
+ !(relocatable ||
+ (is_trampoline_pool_blocked() &&
+ (!is_int16(value) ||
+ !(CpuFeatures::IsSupported(PPC_10_PLUS) && is_int34(value)))));
if (!src.IsHeapObjectRequest() &&
use_constant_pool_for_mov(dst, src, canOptimize)) {
@@ -1239,6 +1354,8 @@ void Assembler::mov(Register dst, const Operand& src) {
if (canOptimize) {
if (is_int16(value)) {
li(dst, Operand(value));
+ } else if (CpuFeatures::IsSupported(PPC_10_PLUS) && is_int34(value)) {
+ pli(dst, Operand(value));
} else {
uint16_t u16;
#if V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index fe21a1c8ad..b5b1899852 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -604,6 +604,22 @@ class Assembler : public AssemblerBase {
PPC_VC_OPCODE_LIST(DECLARE_PPC_VC_INSTRUCTIONS)
#undef DECLARE_PPC_VC_INSTRUCTIONS
+#define DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_00(name, instr_name, instr_value) \
+ inline void name(const Operand& imm, const PRBit pr = LeavePR) { \
+ prefix_form(instr_name, imm, pr); \
+ }
+#define DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_10(name, instr_name, instr_value) \
+ inline void name(const Operand& imm, const PRBit pr = LeavePR) { \
+ prefix_form(instr_name, imm, pr); \
+ }
+ inline void prefix_form(Instr instr, const Operand& imm, int pr) {
+ emit_prefix(instr | pr * B20 | (imm.immediate() & kImm18Mask));
+ }
+ PPC_PREFIX_OPCODE_TYPE_00_LIST(DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_00)
+ PPC_PREFIX_OPCODE_TYPE_10_LIST(DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_10)
+#undef DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_00
+#undef DECLARE_PPC_PREFIX_INSTRUCTIONS_TYPE_10
+
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
// ---------------------------------------------------------------------------
// Code generation
@@ -1119,6 +1135,19 @@ class Assembler : public AssemblerBase {
void stxvx(const Simd128Register rt, const MemOperand& dst);
void xxspltib(const Simd128Register rt, const Operand& imm);
+ // Prefixed instructioons.
+ void paddi(Register dst, Register src, const Operand& imm);
+ void pli(Register dst, const Operand& imm);
+ void psubi(Register dst, Register src, const Operand& imm);
+ void plbz(Register dst, const MemOperand& src);
+ void plhz(Register dst, const MemOperand& src);
+ void plha(Register dst, const MemOperand& src);
+ void plwz(Register dst, const MemOperand& src);
+ void plwa(Register dst, const MemOperand& src);
+ void pld(Register dst, const MemOperand& src);
+ void plfs(DoubleRegister dst, const MemOperand& src);
+ void plfd(DoubleRegister dst, const MemOperand& src);
+
// Pseudo instructions
// Different nop operations are used by the code generator to detect certain
@@ -1403,6 +1432,21 @@ class Assembler : public AssemblerBase {
pc_ += kInstrSize;
CheckTrampolinePoolQuick();
}
+
+ void emit_prefix(Instr x) {
+ // Prefixed instructions cannot cross 64-byte boundaries. Add a nop if the
+ // boundary will be crossed mid way.
+ // Code is set to be 64-byte aligned on PPC64 after relocation (look for
+ // kCodeAlignment). We use pc_offset() instead of pc_ as current pc_
+ // alignment could be different after relocation.
+ if (((pc_offset() + sizeof(Instr)) & 63) == 0) {
+ nop();
+ }
+ // Do not emit trampoline pool in between prefix and suffix.
+ CHECK(is_trampoline_pool_blocked());
+ emit(x);
+ }
+
void TrackBranch() {
DCHECK(!trampoline_emitted_);
int count = tracked_branch_count_++;
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index 37593003e1..74a1bfc89f 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -92,12 +92,18 @@ constexpr int kRootRegisterBias = 128;
// sign-extend the least significant 16-bits of value <imm>
#define SIGN_EXT_IMM16(imm) ((static_cast<int>(imm) << 16) >> 16)
+// sign-extend the least significant 14-bits of value <imm>
+#define SIGN_EXT_IMM18(imm) ((static_cast<int>(imm) << 14) >> 14)
+
// sign-extend the least significant 22-bits of value <imm>
#define SIGN_EXT_IMM22(imm) ((static_cast<int>(imm) << 10) >> 10)
// sign-extend the least significant 26-bits of value <imm>
#define SIGN_EXT_IMM26(imm) ((static_cast<int>(imm) << 6) >> 6)
+// sign-extend the least significant 34-bits of prefix+suffix value <imm>
+#define SIGN_EXT_IMM34(imm) ((static_cast<int64_t>(imm) << 30) >> 30)
+
// -----------------------------------------------------------------------------
// Conditions.
@@ -2672,49 +2678,60 @@ immediate-specified index */ \
/* System Call */ \
V(sc, SC, 0x44000002)
-#define PPC_OPCODE_LIST(V) \
- PPC_X_OPCODE_LIST(V) \
- PPC_X_OPCODE_EH_S_FORM_LIST(V) \
- PPC_XO_OPCODE_LIST(V) \
- PPC_DS_OPCODE_LIST(V) \
- PPC_DQ_OPCODE_LIST(V) \
- PPC_MDS_OPCODE_LIST(V) \
- PPC_MD_OPCODE_LIST(V) \
- PPC_XS_OPCODE_LIST(V) \
- PPC_D_OPCODE_LIST(V) \
- PPC_I_OPCODE_LIST(V) \
- PPC_B_OPCODE_LIST(V) \
- PPC_XL_OPCODE_LIST(V) \
- PPC_A_OPCODE_LIST(V) \
- PPC_XFX_OPCODE_LIST(V) \
- PPC_M_OPCODE_LIST(V) \
- PPC_SC_OPCODE_LIST(V) \
- PPC_Z23_OPCODE_LIST(V) \
- PPC_Z22_OPCODE_LIST(V) \
- PPC_EVX_OPCODE_LIST(V) \
- PPC_XFL_OPCODE_LIST(V) \
- PPC_EVS_OPCODE_LIST(V) \
- PPC_VX_OPCODE_LIST(V) \
- PPC_VA_OPCODE_LIST(V) \
- PPC_VC_OPCODE_LIST(V) \
- PPC_XX1_OPCODE_LIST(V) \
- PPC_XX2_OPCODE_LIST(V) \
- PPC_XX3_OPCODE_VECTOR_LIST(V) \
- PPC_XX3_OPCODE_SCALAR_LIST(V) \
- PPC_XX4_OPCODE_LIST(V)
+#define PPC_PREFIX_OPCODE_TYPE_00_LIST(V) \
+ V(pload_store_8ls, PLOAD_STORE_8LS, 0x4000000) \
+ V(pplwa, PPLWA, 0xA4000000) \
+ V(ppld, PPLD, 0xE4000000)
+
+#define PPC_PREFIX_OPCODE_TYPE_10_LIST(V) \
+ V(pload_store_mls, PLOAD_STORE_MLS, 0x6000000)
+
+#define PPC_OPCODE_LIST(V) \
+ PPC_X_OPCODE_LIST(V) \
+ PPC_X_OPCODE_EH_S_FORM_LIST(V) \
+ PPC_XO_OPCODE_LIST(V) \
+ PPC_DS_OPCODE_LIST(V) \
+ PPC_DQ_OPCODE_LIST(V) \
+ PPC_MDS_OPCODE_LIST(V) \
+ PPC_MD_OPCODE_LIST(V) \
+ PPC_XS_OPCODE_LIST(V) \
+ PPC_D_OPCODE_LIST(V) \
+ PPC_I_OPCODE_LIST(V) \
+ PPC_B_OPCODE_LIST(V) \
+ PPC_XL_OPCODE_LIST(V) \
+ PPC_A_OPCODE_LIST(V) \
+ PPC_XFX_OPCODE_LIST(V) \
+ PPC_M_OPCODE_LIST(V) \
+ PPC_SC_OPCODE_LIST(V) \
+ PPC_Z23_OPCODE_LIST(V) \
+ PPC_Z22_OPCODE_LIST(V) \
+ PPC_EVX_OPCODE_LIST(V) \
+ PPC_XFL_OPCODE_LIST(V) \
+ PPC_EVS_OPCODE_LIST(V) \
+ PPC_VX_OPCODE_LIST(V) \
+ PPC_VA_OPCODE_LIST(V) \
+ PPC_VC_OPCODE_LIST(V) \
+ PPC_XX1_OPCODE_LIST(V) \
+ PPC_XX2_OPCODE_LIST(V) \
+ PPC_XX3_OPCODE_VECTOR_LIST(V) \
+ PPC_XX3_OPCODE_SCALAR_LIST(V) \
+ PPC_XX4_OPCODE_LIST(V) \
+ PPC_PREFIX_OPCODE_TYPE_00_LIST(V) \
+ PPC_PREFIX_OPCODE_TYPE_10_LIST(V)
enum Opcode : uint32_t {
#define DECLARE_INSTRUCTION(name, opcode_name, opcode_value) \
opcode_name = opcode_value,
PPC_OPCODE_LIST(DECLARE_INSTRUCTION)
#undef DECLARE_INSTRUCTION
- EXT0 = 0x10000000, // Extended code set 0
- EXT1 = 0x4C000000, // Extended code set 1
- EXT2 = 0x7C000000, // Extended code set 2
- EXT3 = 0xEC000000, // Extended code set 3
- EXT4 = 0xFC000000, // Extended code set 4
- EXT5 = 0x78000000, // Extended code set 5 - 64bit only
- EXT6 = 0xF0000000, // Extended code set 6
+ EXTP = 0x4000000, // Extended code set prefixed
+ EXT0 = 0x10000000, // Extended code set 0
+ EXT1 = 0x4C000000, // Extended code set 1
+ EXT2 = 0x7C000000, // Extended code set 2
+ EXT3 = 0xEC000000, // Extended code set 3
+ EXT4 = 0xFC000000, // Extended code set 4
+ EXT5 = 0x78000000, // Extended code set 5 - 64bit only
+ EXT6 = 0xF0000000, // Extended code set 6
};
// Instruction encoding bits and masks.
@@ -2752,6 +2769,7 @@ enum {
kImm24Mask = (1 << 24) - 1,
kOff16Mask = (1 << 16) - 1,
kImm16Mask = (1 << 16) - 1,
+ kImm18Mask = (1 << 18) - 1,
kImm22Mask = (1 << 22) - 1,
kImm26Mask = (1 << 26) - 1,
kBOfieldMask = 0x1f << 21,
@@ -2795,6 +2813,9 @@ enum LKBit { // Bit 0
LeaveLK = 0 // No action
};
+// Prefixed R bit.
+enum PRBit { SetPR = 1, LeavePR = 0 };
+
enum BOfield { // Bits 25-21
DCBNZF = 0 << 21, // Decrement CTR; branch if CTR != 0 and condition false
DCBEZF = 2 << 21, // Decrement CTR; branch if CTR == 0 and condition false
@@ -2968,12 +2989,28 @@ class Instruction {
inline uint32_t OpcodeField() const {
return static_cast<Opcode>(BitField(31, 26));
}
+ inline uint32_t PrefixOpcodeField() const {
+ return static_cast<Opcode>(BitField(31, 25));
+ }
#define OPCODE_CASES(name, opcode_name, opcode_value) case opcode_name:
inline Opcode OpcodeBase() const {
- uint32_t opcode = OpcodeField();
- uint32_t extcode = OpcodeField();
+ uint32_t opcode = PrefixOpcodeField();
+ uint32_t extcode = PrefixOpcodeField();
+ // Check for prefix.
+ switch (opcode) {
+ PPC_PREFIX_OPCODE_TYPE_00_LIST(OPCODE_CASES)
+ PPC_PREFIX_OPCODE_TYPE_10_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
+ opcode = OpcodeField();
+ extcode = OpcodeField();
+ // Check for suffix.
+ switch (opcode) {
+ PPC_PREFIX_OPCODE_TYPE_00_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
switch (opcode) {
PPC_D_OPCODE_LIST(OPCODE_CASES)
PPC_I_OPCODE_LIST(OPCODE_CASES)
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 6275d14e89..b00ce7f1c5 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -279,6 +279,13 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
bind(&skip);
}
+void TurboAssembler::TailCallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(this,
+ CommentForOffHeapTrampoline("tail call", builtin));
+ mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Jump(ip);
+}
+
void TurboAssembler::Drop(int count) {
if (count > 0) {
AddS64(sp, sp, Operand(count * kSystemPointerSize), r0);
@@ -620,6 +627,16 @@ void TurboAssembler::DecompressAnyTagged(Register destination,
RecordComment("]");
}
+void TurboAssembler::LoadTaggedSignedField(Register destination,
+ MemOperand field_operand,
+ Register scratch) {
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedSigned(destination, field_operand);
+ } else {
+ LoadU64(destination, field_operand, scratch);
+ }
+}
+
void MacroAssembler::RecordWriteField(Register object, int offset,
Register value, Register slot_address,
LinkRegisterStatus lr_status,
@@ -3603,6 +3620,19 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(builtin_index);
}
+void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+ Register destination) {
+ ASM_CODE_COMMENT(this);
+ LoadU64(destination, EntryFromBuiltinAsOperand(builtin));
+}
+
+MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+ ASM_CODE_COMMENT(this);
+ DCHECK(root_array_available());
+ return MemOperand(kRootRegister,
+ IsolateData::BuiltinEntrySlotOffset(builtin));
+}
+
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
@@ -3707,9 +3737,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
IsolateData::BuiltinEntrySlotOffset(target)));
Call(ip);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::ZeroExtByte(Register dst, Register src) {
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index 76ed4c2018..db6a53780e 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -50,6 +50,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
using TurboAssemblerBase::TurboAssemblerBase;
void CallBuiltin(Builtin builtin, Condition cond);
+ void TailCallBuiltin(Builtin builtin);
void Popcnt32(Register dst, Register src);
void Popcnt64(Register dst, Register src);
// Converts the integer (untagged smi) in |src| to a double, storing
@@ -707,6 +708,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void LoadEntryFromBuiltin(Builtin builtin, Register destination);
+ MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
void LoadCodeObjectEntry(Register destination, Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
@@ -768,6 +771,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
+ void Move(Register dst, const MemOperand& src) { LoadU64(dst, src); }
void SmiUntag(Register dst, const MemOperand& src, RCBit rc = LeaveRC,
Register scratch = no_reg);
@@ -788,6 +792,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
SmiUntag(smi);
}
+ // Shift left by kSmiShift
+ void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
+ void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
+ ShiftLeftU64(dst, src, Operand(kSmiShift), rc);
+ }
+
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
@@ -972,6 +982,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand,
const Register& scratch = no_reg);
+ void LoadTaggedSignedField(Register destination, MemOperand field_operand,
+ Register scratch);
// Loads a field containing any tagged value and decompresses it if necessary.
void LoadAnyTaggedField(const Register& destination,
@@ -1295,12 +1307,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Smi utilities
- // Shift left by kSmiShift
- void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
- void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
- ShiftLeftU64(dst, src, Operand(kSmiShift), rc);
- }
-
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
TestIfSmi(value, r0);
diff --git a/deps/v8/src/codegen/register.h b/deps/v8/src/codegen/register.h
index 28dc5981fe..e36e4d1e9a 100644
--- a/deps/v8/src/codegen/register.h
+++ b/deps/v8/src/codegen/register.h
@@ -20,22 +20,14 @@ constexpr bool ShouldPadArguments(int argument_count) {
}
#ifdef DEBUG
-struct CountIfValidRegisterFunctor {
- template <typename RegType>
- constexpr int operator()(int count, RegType reg) const {
- return count + (reg.is_valid() ? 1 : 0);
- }
-};
-
-template <typename RegType, typename... RegTypes,
+template <typename... RegTypes,
// All arguments must be either Register or DoubleRegister.
- typename = typename std::enable_if<
- base::is_same<Register, RegType, RegTypes...>::value ||
- base::is_same<DoubleRegister, RegType, RegTypes...>::value>::type>
-inline constexpr bool AreAliased(RegType first_reg, RegTypes... regs) {
- int num_different_regs = RegListBase<RegType>{first_reg, regs...}.Count();
- int num_given_regs =
- base::fold(CountIfValidRegisterFunctor{}, 0, first_reg, regs...);
+ typename = typename std::enable_if_t<
+ std::conjunction_v<std::is_same<Register, RegTypes>...> ||
+ std::conjunction_v<std::is_same<DoubleRegister, RegTypes>...>>>
+inline constexpr bool AreAliased(RegTypes... regs) {
+ int num_different_regs = RegListBase{regs...}.Count();
+ int num_given_regs = (... + (regs.is_valid() ? 1 : 0));
return num_different_regs < num_given_regs;
}
#endif
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index d1b4ed2b92..d110e387b4 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -12,7 +12,7 @@
#include "src/deoptimizer/deoptimizer.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/code-inl.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
index a5809286ef..c24fb31a7b 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
@@ -57,7 +57,7 @@ static unsigned CpuFeaturesImpliedByCompiler() {
answer |= 1u << FPU;
#endif // def CAN_USE_FPU_INSTRUCTIONS
-#if (defined CAN_USE_RVV_INSTRUCTIONS) || (defined USE_SIMULATOR)
+#if (defined CAN_USE_RVV_INSTRUCTIONS)
answer |= 1u << RISCV_SIMD;
#endif // def CAN_USE_RVV_INSTRUCTIONS || USE_SIMULATOR
return answer;
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
index 52bba9f21c..33816db57f 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -4987,9 +4987,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
Call(t6);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
index 1b04c73e6a..cb738a26dc 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
@@ -862,6 +862,24 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
VRegister v_scratch);
void Round_d(VRegister dst, VRegister src, Register scratch,
VRegister v_scratch);
+
+ // -------------------------------------------------------------------------
+ // Smi utilities.
+
+ void SmiTag(Register dst, Register src) {
+ STATIC_ASSERT(kSmiTag == 0);
+ if (SmiValuesAre32Bits()) {
+ // Smi goes to upper 32
+ slli(dst, src, 32);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ // Smi is shifted left by 1
+ Add32(dst, src, src);
+ }
+ }
+
+ void SmiTag(Register reg) { SmiTag(reg, reg); }
+
// Jump the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label);
@@ -1231,23 +1249,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register scratch2, Label* stack_overflow,
Label* done = nullptr);
- // -------------------------------------------------------------------------
- // Smi utilities.
-
- void SmiTag(Register dst, Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- if (SmiValuesAre32Bits()) {
- // Smi goes to upper 32
- slli(dst, src, 32);
- } else {
- DCHECK(SmiValuesAre31Bits());
- // Smi is shifted left by 1
- Add32(dst, src, src);
- }
- }
-
- void SmiTag(Register reg) { SmiTag(reg, reg); }
-
// Left-shifted from int32 equivalent of Smi.
void SmiScale(Register dst, Register src, int scale) {
if (SmiValuesAre32Bits()) {
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 1037eff0cd..352b002327 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -296,8 +296,9 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
return bytes;
}
-int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
- Register exclusion2, Register exclusion3) {
+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
+ Register exclusion1, Register exclusion2,
+ Register exclusion3) {
int bytes = 0;
RegList exclusions = {exclusion1, exclusion2, exclusion3};
@@ -306,18 +307,19 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
bytes += list.Count() * kSystemPointerSize;
if (fp_mode == SaveFPRegsMode::kSave) {
- MultiPushF64OrV128(kCallerSavedDoubles);
+ MultiPushF64OrV128(kCallerSavedDoubles, scratch);
bytes += kStackSavedSavedFPSizeInBytes;
}
return bytes;
}
-int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
- Register exclusion2, Register exclusion3) {
+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
+ Register exclusion1, Register exclusion2,
+ Register exclusion3) {
int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) {
- MultiPopF64OrV128(kCallerSavedDoubles);
+ MultiPopF64OrV128(kCallerSavedDoubles, scratch);
bytes += kStackSavedSavedFPSizeInBytes;
}
@@ -667,7 +669,8 @@ void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) {
}
}
-void TurboAssembler::MultiPushV128(DoubleRegList dregs, Register location) {
+void TurboAssembler::MultiPushV128(DoubleRegList dregs, Register scratch,
+ Register location) {
int16_t num_to_push = dregs.Count();
int16_t stack_offset = num_to_push * kSimd128Size;
@@ -676,7 +679,7 @@ void TurboAssembler::MultiPushV128(DoubleRegList dregs, Register location) {
if ((dregs.bits() & (1 << i)) != 0) {
Simd128Register dreg = Simd128Register::from_code(i);
stack_offset -= kSimd128Size;
- StoreV128(dreg, MemOperand(location, stack_offset), r0);
+ StoreV128(dreg, MemOperand(location, stack_offset), scratch);
}
}
}
@@ -694,20 +697,21 @@ void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) {
AddS64(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPopV128(DoubleRegList dregs, Register location) {
+void TurboAssembler::MultiPopV128(DoubleRegList dregs, Register scratch,
+ Register location) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < Simd128Register::kNumRegisters; i++) {
if ((dregs.bits() & (1 << i)) != 0) {
Simd128Register dreg = Simd128Register::from_code(i);
- LoadV128(dreg, MemOperand(location, stack_offset), r0);
+ LoadV128(dreg, MemOperand(location, stack_offset), scratch);
stack_offset += kSimd128Size;
}
}
AddS64(location, location, Operand(stack_offset));
}
-void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs,
+void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs, Register scratch,
Register location) {
#if V8_ENABLE_WEBASSEMBLY
bool generating_bultins =
@@ -719,7 +723,7 @@ void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs,
LoadAndTestP(r1, r1); // If > 0 then simd is available.
ble(&push_doubles, Label::kNear);
// Save vector registers, don't save double registers anymore.
- MultiPushV128(dregs);
+ MultiPushV128(dregs, scratch);
b(&simd_pushed);
bind(&push_doubles);
// Simd not supported, only save double registers.
@@ -730,7 +734,7 @@ void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs,
bind(&simd_pushed);
} else {
if (CpuFeatures::SupportsWasmSimd128()) {
- MultiPushV128(dregs);
+ MultiPushV128(dregs, scratch);
} else {
MultiPushDoubles(dregs);
lay(sp, MemOperand(sp, -(dregs.Count() * kDoubleSize)));
@@ -741,7 +745,8 @@ void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs,
#endif
}
-void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register location) {
+void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register scratch,
+ Register location) {
#if V8_ENABLE_WEBASSEMBLY
bool generating_bultins =
isolate() && isolate()->IsGeneratingEmbeddedBuiltins();
@@ -752,7 +757,7 @@ void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register location) {
LoadAndTestP(r1, r1); // If > 0 then simd is available.
ble(&pop_doubles, Label::kNear);
// Pop vector registers, don't pop double registers anymore.
- MultiPopV128(dregs);
+ MultiPopV128(dregs, scratch);
b(&simd_popped);
bind(&pop_doubles);
// Simd not supported, only pop double registers.
@@ -761,7 +766,7 @@ void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register location) {
bind(&simd_popped);
} else {
if (CpuFeatures::SupportsWasmSimd128()) {
- MultiPopV128(dregs);
+ MultiPopV128(dregs, scratch);
} else {
lay(sp, MemOperand(sp, dregs.Count() * kDoubleSize));
MultiPopDoubles(dregs);
@@ -3771,6 +3776,7 @@ void TurboAssembler::LoadU32(Register dst, const MemOperand& mem,
}
void TurboAssembler::LoadU16(Register dst, const MemOperand& mem) {
+ // TODO(s390x): Add scratch reg
#if V8_TARGET_ARCH_S390X
llgh(dst, mem);
#else
@@ -3787,6 +3793,7 @@ void TurboAssembler::LoadU16(Register dst, Register src) {
}
void TurboAssembler::LoadS8(Register dst, const MemOperand& mem) {
+ // TODO(s390x): Add scratch reg
#if V8_TARGET_ARCH_S390X
lgb(dst, mem);
#else
@@ -3803,6 +3810,7 @@ void TurboAssembler::LoadS8(Register dst, Register src) {
}
void TurboAssembler::LoadU8(Register dst, const MemOperand& mem) {
+ // TODO(s390x): Add scratch reg
#if V8_TARGET_ARCH_S390X
llgc(dst, mem);
#else
@@ -4073,6 +4081,7 @@ void TurboAssembler::LoadF32(DoubleRegister dst, const MemOperand& mem) {
void TurboAssembler::LoadV128(Simd128Register dst, const MemOperand& mem,
Register scratch) {
+ DCHECK(scratch != r0);
if (is_uint12(mem.offset())) {
vl(dst, mem, Condition(0));
} else {
@@ -4102,6 +4111,7 @@ void TurboAssembler::StoreF32(DoubleRegister src, const MemOperand& mem) {
void TurboAssembler::StoreV128(Simd128Register src, const MemOperand& mem,
Register scratch) {
+ DCHECK(scratch != r0);
if (is_uint12(mem.offset())) {
vst(src, mem, Condition(0));
} else {
@@ -4826,9 +4836,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
IsolateData::BuiltinEntrySlotOffset(target)));
Call(ip);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::Trap() { stop(); }
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index f5abeb9860..8e89f3d1f9 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -178,11 +178,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MultiPushDoubles(DoubleRegList dregs, Register location = sp);
void MultiPopDoubles(DoubleRegList dregs, Register location = sp);
- void MultiPushV128(DoubleRegList dregs, Register location = sp);
- void MultiPopV128(DoubleRegList dregs, Register location = sp);
+ void MultiPushV128(DoubleRegList dregs, Register scratch,
+ Register location = sp);
+ void MultiPopV128(DoubleRegList dregs, Register scratch,
+ Register location = sp);
- void MultiPushF64OrV128(DoubleRegList dregs, Register location = sp);
- void MultiPopF64OrV128(DoubleRegList dregs, Register location = sp);
+ void MultiPushF64OrV128(DoubleRegList dregs, Register scratch,
+ Register location = sp);
+ void MultiPopF64OrV128(DoubleRegList dregs, Register scratch,
+ Register location = sp);
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
@@ -193,13 +197,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push caller saved registers on the stack, and return the number of bytes
// stack pointer is adjusted.
- int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ int PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
+ Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
// Restore caller saved registers from the stack, and return the number of
// bytes stack pointer is adjusted.
- int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
+ int PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch,
+ Register exclusion1 = no_reg, Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
// Load an object from the root table.
@@ -1054,6 +1059,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
SmiUntag(smi);
}
+ // Shift left by kSmiShift
+ void SmiTag(Register reg) { SmiTag(reg, reg); }
+ void SmiTag(Register dst, Register src) {
+ ShiftLeftU64(dst, src, Operand(kSmiShift));
+ }
+
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
@@ -1682,12 +1693,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Smi utilities
- // Shift left by kSmiShift
- void SmiTag(Register reg) { SmiTag(reg, reg); }
- void SmiTag(Register dst, Register src) {
- ShiftLeftU64(dst, src, Operand(kSmiShift));
- }
-
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
TestIfSmi(value);
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
index 93ec1ae54f..7b6ae44ead 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
@@ -67,7 +67,11 @@ void SharedTurboAssembler::And(Register dst, Immediate src) {
#if V8_TARGET_ARCH_IA32
and_(dst, src);
#elif V8_TARGET_ARCH_X64
- andq(dst, src);
+ if (is_uint32(src.value())) {
+ andl(dst, src);
+ } else {
+ andq(dst, src);
+ }
#else
#error Unsupported target architecture.
#endif
diff --git a/deps/v8/src/codegen/tnode.h b/deps/v8/src/codegen/tnode.h
index bffaa5c326..1f2a3dcfd9 100644
--- a/deps/v8/src/codegen/tnode.h
+++ b/deps/v8/src/codegen/tnode.h
@@ -185,7 +185,7 @@ struct MachineRepresentationOf {
// If T defines kMachineType, then we take the machine representation from
// there.
template <class T>
-struct MachineRepresentationOf<T, base::void_t<decltype(T::kMachineType)>> {
+struct MachineRepresentationOf<T, std::void_t<decltype(T::kMachineType)>> {
static const MachineRepresentation value = T::kMachineType.representation();
};
template <class T>
diff --git a/deps/v8/src/codegen/turbo-assembler.cc b/deps/v8/src/codegen/turbo-assembler.cc
index 24a237c16a..e12be0d567 100644
--- a/deps/v8/src/codegen/turbo-assembler.cc
+++ b/deps/v8/src/codegen/turbo-assembler.cc
@@ -10,7 +10,7 @@
#include "src/common/globals.h"
#include "src/execution/isolate-data.h"
#include "src/execution/isolate-inl.h"
-#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index df15db18cc..a29357e78b 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -3372,6 +3372,28 @@ void Assembler::haddps(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
+void Assembler::cmpeqss(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xC2);
+ emit_sse_operand(dst, src);
+ emit(0x00); // EQ == 0
+}
+
+void Assembler::cmpeqsd(XMMRegister dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xC2);
+ emit_sse_operand(dst, src);
+ emit(0x00); // EQ == 0
+}
+
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3389,6 +3411,13 @@ void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
emit(static_cast<byte>(mode) | 0x8);
}
+void Assembler::roundss(XMMRegister dst, Operand src, RoundingMode mode) {
+ DCHECK(!IsEnabled(AVX));
+ sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x0A);
+ // Mask precision exception.
+ emit(static_cast<byte>(mode) | 0x8);
+}
+
void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(!IsEnabled(AVX));
sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x0B);
@@ -3396,6 +3425,13 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
emit(static_cast<byte>(mode) | 0x8);
}
+void Assembler::roundsd(XMMRegister dst, Operand src, RoundingMode mode) {
+ DCHECK(!IsEnabled(AVX));
+ sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x0B);
+ // Mask precision exception.
+ emit(static_cast<byte>(mode) | 0x8);
+}
+
void Assembler::roundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(!IsEnabled(AVX));
sse4_instr(dst, src, 0x66, 0x0F, 0x3A, 0x08);
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index 1d2d07ffdd..c6c2e7ed41 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -1288,6 +1288,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void haddps(XMMRegister dst, XMMRegister src);
void haddps(XMMRegister dst, Operand src);
+ void cmpeqsd(XMMRegister dst, XMMRegister src);
+ void cmpeqss(XMMRegister dst, XMMRegister src);
void cmpltsd(XMMRegister dst, XMMRegister src);
void movmskpd(Register dst, XMMRegister src);
@@ -1309,7 +1311,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void pinsrq(XMMRegister dst, Operand src, uint8_t imm8);
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void roundss(XMMRegister dst, Operand src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void roundsd(XMMRegister dst, Operand src, RoundingMode mode);
void roundps(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundpd(XMMRegister dst, XMMRegister src, RoundingMode mode);
@@ -1556,11 +1560,21 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vinstr(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
}
+ void vroundss(XMMRegister dst, XMMRegister src1, Operand src2,
+ RoundingMode mode) {
+ vinstr(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+ }
void vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
RoundingMode mode) {
vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
}
+ void vroundsd(XMMRegister dst, XMMRegister src1, Operand src2,
+ RoundingMode mode) {
+ vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+ }
void vroundps(XMMRegister dst, XMMRegister src, RoundingMode mode) {
vinstr(0x08, dst, xmm0, src, k66, k0F3A, kWIG);
emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
@@ -1625,45 +1639,76 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
vpd(0x50, idst, xmm0, src);
}
void vpmovmskb(Register dst, XMMRegister src);
+ void vcmpeqss(XMMRegister dst, XMMRegister src) {
+ vss(0xC2, dst, dst, src);
+ emit(0x00); // EQ == 0
+ }
+ void vcmpeqsd(XMMRegister dst, XMMRegister src) {
+ vsd(0xC2, dst, dst, src);
+ emit(0x00); // EQ == 0
+ }
void vcmpps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp) {
vps(0xC2, dst, src1, src2);
emit(cmp);
}
+ void vcmpps(YMMRegister dst, YMMRegister src1, YMMRegister src2, int8_t cmp) {
+ vps(0xC2, dst, src1, src2);
+ emit(cmp);
+ }
void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp) {
vps(0xC2, dst, src1, src2);
emit(cmp);
}
+ void vcmpps(YMMRegister dst, YMMRegister src1, Operand src2, int8_t cmp) {
+ vps(0xC2, dst, src1, src2);
+ emit(cmp);
+ }
void vcmppd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp) {
vpd(0xC2, dst, src1, src2);
emit(cmp);
}
+ void vcmppd(YMMRegister dst, YMMRegister src1, YMMRegister src2, int8_t cmp) {
+ vpd(0xC2, dst, src1, src2);
+ emit(cmp);
+ }
void vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp) {
vpd(0xC2, dst, src1, src2);
emit(cmp);
}
-
-#define AVX_CMP_P(instr, imm8) \
- void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vcmpps(dst, src1, src2, imm8); \
- } \
- void instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) { \
- vcmpps(dst, src1, src2, imm8); \
- } \
- void instr##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
- vcmppd(dst, src1, src2, imm8); \
- } \
- void instr##pd(XMMRegister dst, XMMRegister src1, Operand src2) { \
- vcmppd(dst, src1, src2, imm8); \
- }
-
- AVX_CMP_P(vcmpeq, 0x0)
- AVX_CMP_P(vcmplt, 0x1)
- AVX_CMP_P(vcmple, 0x2)
- AVX_CMP_P(vcmpunord, 0x3)
- AVX_CMP_P(vcmpneq, 0x4)
- AVX_CMP_P(vcmpnlt, 0x5)
- AVX_CMP_P(vcmpnle, 0x6)
- AVX_CMP_P(vcmpge, 0xd)
+ void vcmppd(YMMRegister dst, YMMRegister src1, Operand src2, int8_t cmp) {
+ vpd(0xC2, dst, src1, src2);
+ emit(cmp);
+ }
+#define AVX_CMP_P(instr, imm8, SIMDRegister) \
+ void instr##ps(SIMDRegister dst, SIMDRegister src1, SIMDRegister src2) { \
+ vcmpps(dst, src1, src2, imm8); \
+ } \
+ void instr##ps(SIMDRegister dst, SIMDRegister src1, Operand src2) { \
+ vcmpps(dst, src1, src2, imm8); \
+ } \
+ void instr##pd(SIMDRegister dst, SIMDRegister src1, SIMDRegister src2) { \
+ vcmppd(dst, src1, src2, imm8); \
+ } \
+ void instr##pd(SIMDRegister dst, SIMDRegister src1, Operand src2) { \
+ vcmppd(dst, src1, src2, imm8); \
+ }
+
+ AVX_CMP_P(vcmpeq, 0x0, XMMRegister)
+ AVX_CMP_P(vcmpeq, 0x0, YMMRegister)
+ AVX_CMP_P(vcmplt, 0x1, XMMRegister)
+ AVX_CMP_P(vcmplt, 0x1, YMMRegister)
+ AVX_CMP_P(vcmple, 0x2, XMMRegister)
+ AVX_CMP_P(vcmple, 0x2, YMMRegister)
+ AVX_CMP_P(vcmpunord, 0x3, XMMRegister)
+ AVX_CMP_P(vcmpunord, 0x3, YMMRegister)
+ AVX_CMP_P(vcmpneq, 0x4, XMMRegister)
+ AVX_CMP_P(vcmpneq, 0x4, YMMRegister)
+ AVX_CMP_P(vcmpnlt, 0x5, XMMRegister)
+ AVX_CMP_P(vcmpnlt, 0x5, YMMRegister)
+ AVX_CMP_P(vcmpnle, 0x6, XMMRegister)
+ AVX_CMP_P(vcmpnle, 0x6, YMMRegister)
+ AVX_CMP_P(vcmpge, 0xd, XMMRegister)
+ AVX_CMP_P(vcmpge, 0xd, YMMRegister)
#undef AVX_CMP_P
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index 4e28e4df66..ebaa40be0c 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -1215,6 +1215,23 @@ void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* fail) {
ConvertFloatToUint64<XMMRegister, false>(this, dst, src, fail);
}
+void TurboAssembler::Cmpeqss(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vcmpeqss(dst, src);
+ } else {
+ cmpeqss(dst, src);
+ }
+}
+
+void TurboAssembler::Cmpeqsd(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vcmpeqsd(dst, src);
+ } else {
+ cmpeqsd(dst, src);
+ }
+}
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.
@@ -3099,6 +3116,23 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
leaq(dst, Operand(&current, -pc));
}
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. read from memory the word that contains that bit, which can be found in
+// the flags in the referenced {CodeDataContainer} object;
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
+void TurboAssembler::BailoutIfDeoptimized(Register scratch) {
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ LoadTaggedPointerField(scratch,
+ Operand(kJavaScriptCallCodeStartRegister, offset));
+ testl(FieldOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
+ Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET, not_zero);
+}
+
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
@@ -3108,9 +3142,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
// performance tuning which emits a different instruction sequence.
call(EntryFromBuiltinAsOperand(target));
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
- (kind == DeoptimizeKind::kLazy)
- ? Deoptimizer::kLazyDeoptExitSize
- : Deoptimizer::kNonLazyDeoptExitSize);
+ (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kEagerDeoptExitSize);
}
void TurboAssembler::Trap() { int3(); }
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index f1aba1355c..1f07fdcf2b 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -169,6 +169,9 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Cvtlsi2sd(XMMRegister dst, Register src);
void Cvtlsi2sd(XMMRegister dst, Operand src);
+ void Cmpeqss(XMMRegister dst, XMMRegister src);
+ void Cmpeqsd(XMMRegister dst, XMMRegister src);
+
void PextrdPreSse41(Register dst, XMMRegister src, uint8_t imm8);
void Pextrq(Register dst, XMMRegister src, int8_t imm8);
@@ -412,6 +415,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode,
Condition cc = always);
+ void BailoutIfDeoptimized(Register scratch);
void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label);