summaryrefslogtreecommitdiff
path: root/chromium/v8/src/objects/code.cc
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/objects/code.cc')
-rw-r--r--chromium/v8/src/objects/code.cc124
1 files changed, 38 insertions, 86 deletions
diff --git a/chromium/v8/src/objects/code.cc b/chromium/v8/src/objects/code.cc
index 7268f001ce9..5b4ad4406d0 100644
--- a/chromium/v8/src/objects/code.cc
+++ b/chromium/v8/src/objects/code.cc
@@ -145,22 +145,42 @@ int Code::OffHeapInstructionSize() const {
return d.InstructionSizeOfBuiltin(builtin_index());
}
+namespace {
+
+// Helper function for getting an EmbeddedData that can handle un-embedded
+// builtins when short builtin calls are enabled.
+inline EmbeddedData EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(Code code) {
+#if defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE)
+ // GetIsolateFromWritableObject(*this) works for both read-only and writable
+ // objects when pointer compression is enabled with a per-Isolate cage.
+ return EmbeddedData::FromBlob(GetIsolateFromWritableObject(code));
+#elif defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
+ // When pointer compression is enabled with a shared cage, there is also a
+ // shared CodeRange. When short builtin calls are enabled, there is a single
+ // copy of the re-embedded builtins in the shared CodeRange, so use that if
+ // it's present.
+ if (FLAG_jitless) return EmbeddedData::FromBlob();
+ CodeRange* code_range = CodeRange::GetProcessWideCodeRange().get();
+ return (code_range && code_range->embedded_blob_code_copy() != nullptr)
+ ? EmbeddedData::FromBlob(code_range)
+ : EmbeddedData::FromBlob();
+#else
+ // Otherwise there is a single copy of the blob across all Isolates, use the
+ // global atomic variables.
+ return EmbeddedData::FromBlob();
+#endif
+}
+
+} // namespace
+
Address Code::OffHeapInstructionStart() const {
DCHECK(is_off_heap_trampoline());
if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
return raw_instruction_size();
}
- // TODO(11527): pass Isolate as an argument.
- // GetIsolateFromWritableObject(*this) works for both read-only and writable
- // objects here because short builtin calls feature requires pointer
- // compression.
- // We don't have to check the Isolate::is_short_builtin_calls_enabled() value
- // because if the short builtin calls wasn't actually enabled because of not
- // enough memory, the FromBlob(isolate) would still be the correct one to use.
- EmbeddedData d =
- FLAG_short_builtin_calls
- ? EmbeddedData::FromBlob(GetIsolateFromWritableObject(*this))
- : EmbeddedData::FromBlob();
+
+ // TODO(11527): pass Isolate as an argument for getting the EmbeddedData.
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.InstructionStartOfBuiltin(builtin_index());
}
@@ -169,17 +189,9 @@ Address Code::OffHeapInstructionEnd() const {
if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
return raw_instruction_size();
}
- // TODO(11527): pass Isolate as an argument.
- // GetIsolateFromWritableObject(*this) works for both read-only and writable
- // objects here because short builtin calls feature requires pointer
- // compression.
- // We don't have to check the Isolate::is_short_builtin_calls_enabled() value
- // because if the short builtin calls wasn't actually enabled because of not
- // enough memory, the FromBlob(isolate) would still be the correct one to use.
- EmbeddedData d =
- FLAG_short_builtin_calls
- ? EmbeddedData::FromBlob(GetIsolateFromWritableObject(*this))
- : EmbeddedData::FromBlob();
+
+ // TODO(11527): pass Isolate as an argument for getting the EmbeddedData.
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.InstructionStartOfBuiltin(builtin_index()) +
d.InstructionSizeOfBuiltin(builtin_index());
}
@@ -299,11 +311,12 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL)));
#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \
- defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_RISCV64)
+ defined(V8_TARGET_ARCH_MIPS64)
return RelocIterator(*this, kModeMask).done();
#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
- defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32)
+ defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32) || \
+ defined(V8_TARGET_ARCH_RISCV64)
for (RelocIterator it(*this, kModeMask); !it.done(); it.next()) {
// On these platforms we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. They are later
@@ -325,67 +338,6 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
return true;
}
-// Multiple native contexts live on the same heap, and V8 currently
-// draws no clear distinction between native-context-dependent and
-// independent objects. A good guideline is "objects embedded into
-// bytecode are nc-independent", since bytecode is shared between
-// native contexts. Among others, this is the case for ScopeInfo,
-// SharedFunctionInfo, String, etc.
-bool Code::IsNativeContextIndependent(Isolate* isolate) {
- static constexpr int kModeMask =
- RelocInfo::AllRealModesMask() &
- ~RelocInfo::ModeMask(RelocInfo::CONST_POOL) &
- ~RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) &
- ~RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) &
- ~RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) &
- ~RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) &
- ~RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) &
- ~RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
- STATIC_ASSERT(kModeMask ==
- (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::DATA_EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
- RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL)));
-
- bool is_independent = true;
- for (RelocIterator it(*this, kModeMask); !it.done(); it.next()) {
- if (RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode())) {
- HeapObject o = it.rinfo()->target_object();
- // TODO(jgruber,v8:8888): Extend this with further NCI objects,
- // and define a more systematic
- // IsNativeContextIndependent<T>() predicate.
- if (o.IsString()) continue;
- if (o.IsScopeInfo()) continue;
- if (o.IsHeapNumber()) continue;
- if (o.IsBigInt()) continue;
- if (o.IsSharedFunctionInfo()) continue;
- if (o.IsArrayBoilerplateDescription()) continue;
- if (o.IsObjectBoilerplateDescription()) continue;
- if (o.IsTemplateObjectDescription()) continue;
- if (o.IsFixedArray()) {
- // Some uses of FixedArray are valid.
- // 1. Passed as arg to %DeclareGlobals, contains only strings
- // and SFIs.
- // 2. Passed as arg to %DefineClass. No well defined contents.
- // .. ?
- // TODO(jgruber): Consider assigning dedicated instance
- // types instead of assuming fixed arrays are okay.
- continue;
- }
- // Other objects are expected to be context-dependent.
- PrintF("Found native-context-dependent object:\n");
- o.Print();
- o.map().Print();
- }
- is_independent = false;
- }
-
- return is_independent;
-}
-
bool Code::Inlines(SharedFunctionInfo sfi) {
// We can only check for inlining for optimized code.
DCHECK(is_optimized_code());
@@ -474,7 +426,7 @@ void print_pc(std::ostream& os, int pc) {
}
} // anonymous namespace
-void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
+void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) {
if (length() == 0) {
os << "Deoptimization Input Data invalidated by lazy deoptimization\n";
return;