summaryrefslogtreecommitdiff
path: root/chromium/v8/src/profiler
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/profiler')
-rw-r--r--chromium/v8/src/profiler/cpu-profiler-inl.h2
-rw-r--r--chromium/v8/src/profiler/cpu-profiler.cc12
-rw-r--r--chromium/v8/src/profiler/cpu-profiler.h2
-rw-r--r--chromium/v8/src/profiler/heap-profiler.cc11
-rw-r--r--chromium/v8/src/profiler/heap-profiler.h2
-rw-r--r--chromium/v8/src/profiler/heap-snapshot-generator.cc141
-rw-r--r--chromium/v8/src/profiler/profile-generator-inl.h4
-rw-r--r--chromium/v8/src/profiler/profile-generator.cc133
-rw-r--r--chromium/v8/src/profiler/profile-generator.h17
-rw-r--r--chromium/v8/src/profiler/profiler-listener.cc77
-rw-r--r--chromium/v8/src/profiler/profiler-listener.h10
-rw-r--r--chromium/v8/src/profiler/tick-sample.cc7
12 files changed, 235 insertions, 183 deletions
diff --git a/chromium/v8/src/profiler/cpu-profiler-inl.h b/chromium/v8/src/profiler/cpu-profiler-inl.h
index 52291d63807..e0834e9eb2d 100644
--- a/chromium/v8/src/profiler/cpu-profiler-inl.h
+++ b/chromium/v8/src/profiler/cpu-profiler-inl.h
@@ -53,7 +53,7 @@ void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
if (builtin == Builtin::kGenericJSToWasmWrapper) {
// Make sure to add the generic js-to-wasm wrapper builtin, because that
// one is supposed to show up in profiles.
- entry = code_map->code_entries().Create(LogEventListener::BUILTIN_TAG,
+ entry = code_map->code_entries().Create(LogEventListener::CodeTag::kBuiltin,
Builtins::name(builtin));
code_map->AddCode(instruction_start, entry, instruction_size);
}
diff --git a/chromium/v8/src/profiler/cpu-profiler.cc b/chromium/v8/src/profiler/cpu-profiler.cc
index 3f6d3079959..a79bcba9f22 100644
--- a/chromium/v8/src/profiler/cpu-profiler.cc
+++ b/chromium/v8/src/profiler/cpu-profiler.cc
@@ -398,7 +398,7 @@ void ProfilerCodeObserver::CreateEntriesForRuntimeCallStats() {
for (int i = 0; i < RuntimeCallStats::kNumberOfCounters; ++i) {
RuntimeCallCounter* counter = rcs->GetCounter(i);
DCHECK(counter->name());
- auto entry = code_entries_.Create(LogEventListener::FUNCTION_TAG,
+ auto entry = code_entries_.Create(LogEventListener::CodeTag::kFunction,
counter->name(), "native V8Runtime");
code_map_.AddCode(reinterpret_cast<Address>(counter), entry, 1);
}
@@ -412,7 +412,7 @@ void ProfilerCodeObserver::LogBuiltins() {
++builtin) {
CodeEventsContainer evt_rec(CodeEventRecord::Type::kReportBuiltin);
ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
- Code code = FromCodeT(builtins->code(builtin));
+ CodeT code = builtins->code(builtin);
rec->instruction_start = code.InstructionStart();
rec->instruction_size = code.InstructionSize();
rec->builtin = builtin;
@@ -568,7 +568,7 @@ void CpuProfiler::DisableLogging() {
code_observer_->ClearCodeMap();
}
-base::TimeDelta CpuProfiler::ComputeSamplingInterval() const {
+base::TimeDelta CpuProfiler::ComputeSamplingInterval() {
return profiles_->GetCommonSamplingInterval();
}
@@ -602,14 +602,14 @@ size_t CpuProfiler::GetEstimatedMemoryUsage() const {
CpuProfilingResult CpuProfiler::StartProfiling(
CpuProfilingOptions options,
std::unique_ptr<DiscardedSamplesDelegate> delegate) {
- return StartProfiling(nullptr, options, std::move(delegate));
+ return StartProfiling(nullptr, std::move(options), std::move(delegate));
}
CpuProfilingResult CpuProfiler::StartProfiling(
const char* title, CpuProfilingOptions options,
std::unique_ptr<DiscardedSamplesDelegate> delegate) {
CpuProfilingResult result =
- profiles_->StartProfiling(title, options, std::move(delegate));
+ profiles_->StartProfiling(title, std::move(options), std::move(delegate));
// TODO(nicodubus): Revisit logic for if we want to do anything different for
// kAlreadyStarted
@@ -626,7 +626,7 @@ CpuProfilingResult CpuProfiler::StartProfiling(
CpuProfilingResult CpuProfiler::StartProfiling(
String title, CpuProfilingOptions options,
std::unique_ptr<DiscardedSamplesDelegate> delegate) {
- return StartProfiling(profiles_->GetName(title), options,
+ return StartProfiling(profiles_->GetName(title), std::move(options),
std::move(delegate));
}
diff --git a/chromium/v8/src/profiler/cpu-profiler.h b/chromium/v8/src/profiler/cpu-profiler.h
index ae5e2d9edcc..fd1aae58950 100644
--- a/chromium/v8/src/profiler/cpu-profiler.h
+++ b/chromium/v8/src/profiler/cpu-profiler.h
@@ -386,7 +386,7 @@ class V8_EXPORT_PRIVATE CpuProfiler {
void DisableLogging();
// Computes a sampling interval sufficient to accomodate attached profiles.
- base::TimeDelta ComputeSamplingInterval() const;
+ base::TimeDelta ComputeSamplingInterval();
// Dynamically updates the sampler to use a sampling interval sufficient for
// child profiles.
void AdjustSamplingInterval();
diff --git a/chromium/v8/src/profiler/heap-profiler.cc b/chromium/v8/src/profiler/heap-profiler.cc
index dfe37b99111..7a1317ab4f0 100644
--- a/chromium/v8/src/profiler/heap-profiler.cc
+++ b/chromium/v8/src/profiler/heap-profiler.cc
@@ -102,6 +102,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
}
ids_->RemoveDeadEntries();
is_tracking_object_moves_ = true;
+ heap()->isolate()->UpdateLogObjectRelocation();
is_taking_snapshot_ = false;
heap()->isolate()->debug()->feature_tracker()->Track(
@@ -140,6 +141,7 @@ v8::AllocationProfile* HeapProfiler::GetAllocationProfile() {
void HeapProfiler::StartHeapObjectsTracking(bool track_allocations) {
ids_->UpdateHeapObjectsMap();
is_tracking_object_moves_ = true;
+ heap()->isolate()->UpdateLogObjectRelocation();
DCHECK(!allocation_tracker_);
if (track_allocations) {
allocation_tracker_.reset(new AllocationTracker(ids_.get(), names_.get()));
@@ -231,7 +233,10 @@ Handle<HeapObject> HeapProfiler::FindHeapObjectById(SnapshotObjectId id) {
void HeapProfiler::ClearHeapObjectMap() {
ids_.reset(new HeapObjectsMap(heap()));
- if (!allocation_tracker_) is_tracking_object_moves_ = false;
+ if (!allocation_tracker_) {
+ is_tracking_object_moves_ = false;
+ heap()->isolate()->UpdateLogObjectRelocation();
+ }
}
@@ -241,7 +246,7 @@ Isolate* HeapProfiler::isolate() const { return heap()->isolate(); }
void HeapProfiler::QueryObjects(Handle<Context> context,
debug::QueryObjectPredicate* predicate,
- PersistentValueVector<v8::Object>* objects) {
+ std::vector<v8::Global<v8::Object>>* objects) {
{
HandleScope handle_scope(isolate());
std::vector<Handle<JSTypedArray>> on_heap_typed_arrays;
@@ -279,7 +284,7 @@ void HeapProfiler::QueryObjects(Handle<Context> context,
v8::Local<v8::Object> v8_obj(
Utils::ToLocal(handle(JSObject::cast(heap_obj), isolate())));
if (!predicate->Filter(v8_obj)) continue;
- objects->Append(v8_obj);
+ objects->emplace_back(reinterpret_cast<v8::Isolate*>(isolate()), v8_obj);
}
}
diff --git a/chromium/v8/src/profiler/heap-profiler.h b/chromium/v8/src/profiler/heap-profiler.h
index 0b8fe975162..6a919dbddff 100644
--- a/chromium/v8/src/profiler/heap-profiler.h
+++ b/chromium/v8/src/profiler/heap-profiler.h
@@ -90,7 +90,7 @@ class HeapProfiler : public HeapObjectAllocationTracker {
void QueryObjects(Handle<Context> context,
debug::QueryObjectPredicate* predicate,
- v8::PersistentValueVector<v8::Object>* objects);
+ std::vector<v8::Global<v8::Object>>* objects);
private:
void MaybeClearStringsStorage();
diff --git a/chromium/v8/src/profiler/heap-snapshot-generator.cc b/chromium/v8/src/profiler/heap-snapshot-generator.cc
index 9d71f8df837..64a27a68340 100644
--- a/chromium/v8/src/profiler/heap-snapshot-generator.cc
+++ b/chromium/v8/src/profiler/heap-snapshot-generator.cc
@@ -801,59 +801,73 @@ void V8HeapExplorer::ExtractLocationForJSFunction(HeapEntry* entry,
}
HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
- if (object.IsJSFunction()) {
- JSFunction func = JSFunction::cast(object);
- SharedFunctionInfo shared = func.shared();
- const char* name = names_->GetName(shared.Name());
- return AddEntry(object, HeapEntry::kClosure, name);
- } else if (object.IsJSBoundFunction()) {
- return AddEntry(object, HeapEntry::kClosure, "native_bind");
- } else if (object.IsJSRegExp()) {
- JSRegExp re = JSRegExp::cast(object);
- return AddEntry(object, HeapEntry::kRegExp, names_->GetName(re.source()));
- } else if (object.IsJSObject()) {
+ PtrComprCageBase cage_base(isolate());
+ InstanceType instance_type = object.map(cage_base).instance_type();
+ if (InstanceTypeChecker::IsJSObject(instance_type)) {
+ if (InstanceTypeChecker::IsJSFunction(instance_type)) {
+ JSFunction func = JSFunction::cast(object);
+ SharedFunctionInfo shared = func.shared();
+ const char* name = names_->GetName(shared.Name());
+ return AddEntry(object, HeapEntry::kClosure, name);
+
+ } else if (InstanceTypeChecker::IsJSBoundFunction(instance_type)) {
+ return AddEntry(object, HeapEntry::kClosure, "native_bind");
+ }
+ if (InstanceTypeChecker::IsJSRegExp(instance_type)) {
+ JSRegExp re = JSRegExp::cast(object);
+ return AddEntry(object, HeapEntry::kRegExp, names_->GetName(re.source()));
+ }
// TODO(v8:12674) Fix and run full gcmole.
DisableGCMole no_gcmole;
const char* name = names_->GetName(
GetConstructorName(heap_->isolate(), JSObject::cast(object)));
- if (object.IsJSGlobalObject()) {
+ if (InstanceTypeChecker::IsJSGlobalObject(instance_type)) {
auto it = global_object_tag_map_.find(JSGlobalObject::cast(object));
if (it != global_object_tag_map_.end()) {
name = names_->GetFormatted("%s / %s", name, it->second);
}
}
return AddEntry(object, HeapEntry::kObject, name);
- } else if (object.IsString()) {
+
+ } else if (InstanceTypeChecker::IsString(instance_type)) {
String string = String::cast(object);
- if (string.IsConsString()) {
+ if (string.IsConsString(cage_base)) {
return AddEntry(object, HeapEntry::kConsString, "(concatenated string)");
- } else if (string.IsSlicedString()) {
+ } else if (string.IsSlicedString(cage_base)) {
return AddEntry(object, HeapEntry::kSlicedString, "(sliced string)");
} else {
return AddEntry(object, HeapEntry::kString,
names_->GetName(String::cast(object)));
}
- } else if (object.IsSymbol()) {
+ } else if (InstanceTypeChecker::IsSymbol(instance_type)) {
if (Symbol::cast(object).is_private())
return AddEntry(object, HeapEntry::kHidden, "private symbol");
else
return AddEntry(object, HeapEntry::kSymbol, "symbol");
- } else if (object.IsBigInt()) {
+
+ } else if (InstanceTypeChecker::IsBigInt(instance_type)) {
return AddEntry(object, HeapEntry::kBigInt, "bigint");
- } else if (object.IsCode()) {
+
+ } else if (InstanceTypeChecker::IsCode(instance_type) ||
+ InstanceTypeChecker::IsCodeDataContainer(instance_type)) {
return AddEntry(object, HeapEntry::kCode, "");
- } else if (object.IsSharedFunctionInfo()) {
+
+ } else if (InstanceTypeChecker::IsSharedFunctionInfo(instance_type)) {
String name = SharedFunctionInfo::cast(object).Name();
return AddEntry(object, HeapEntry::kCode, names_->GetName(name));
- } else if (object.IsScript()) {
+
+ } else if (InstanceTypeChecker::IsScript(instance_type)) {
Object name = Script::cast(object).name();
return AddEntry(object, HeapEntry::kCode,
name.IsString() ? names_->GetName(String::cast(name)) : "");
- } else if (object.IsNativeContext()) {
+
+ } else if (InstanceTypeChecker::IsNativeContext(instance_type)) {
return AddEntry(object, HeapEntry::kHidden, "system / NativeContext");
- } else if (object.IsContext()) {
+
+ } else if (InstanceTypeChecker::IsContext(instance_type)) {
return AddEntry(object, HeapEntry::kObject, "system / Context");
- } else if (object.IsHeapNumber()) {
+
+ } else if (InstanceTypeChecker::IsHeapNumber(instance_type)) {
return AddEntry(object, HeapEntry::kHeapNumber, "heap number");
}
return AddEntry(object, GetSystemEntryType(object),
@@ -984,6 +998,24 @@ uint32_t V8HeapExplorer::EstimateObjectsCount() {
return objects_count;
}
+#ifdef V8_TARGET_BIG_ENDIAN
+namespace {
+int AdjustEmbedderFieldIndex(HeapObject heap_obj, int field_index) {
+ Map map = heap_obj.map();
+ if (JSObject::MayHaveEmbedderFields(map)) {
+ int emb_start_index = (JSObject::GetEmbedderFieldsStartOffset(map) +
+ EmbedderDataSlot::kTaggedPayloadOffset) /
+ kTaggedSize;
+ int emb_field_count = JSObject::GetEmbedderFieldCount(map);
+ int emb_end_index = emb_start_index + emb_field_count;
+ if (base::IsInRange(field_index, emb_start_index, emb_end_index)) {
+ return -EmbedderDataSlot::kTaggedPayloadOffset / kTaggedSize;
+ }
+ }
+ return 0;
+}
+} // namespace
+#endif // V8_TARGET_BIG_ENDIAN
class IndexedReferencesExtractor : public ObjectVisitorWithCageBases {
public:
IndexedReferencesExtractor(V8HeapExplorer* generator, HeapObject parent_obj,
@@ -1038,6 +1070,10 @@ class IndexedReferencesExtractor : public ObjectVisitorWithCageBases {
V8_INLINE void VisitSlotImpl(PtrComprCageBase cage_base, TSlot slot) {
int field_index =
static_cast<int>(MaybeObjectSlot(slot.address()) - parent_start_);
+#ifdef V8_TARGET_BIG_ENDIAN
+ field_index += AdjustEmbedderFieldIndex(parent_obj_, field_index);
+#endif
+ DCHECK_GE(field_index, 0);
if (generator_->visited_fields_[field_index]) {
generator_->visited_fields_[field_index] = false;
} else {
@@ -1439,13 +1475,18 @@ void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map map) {
void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
HeapEntry* entry, SharedFunctionInfo shared) {
std::unique_ptr<char[]> name = shared.DebugNameCStr();
+ CodeT code = shared.GetCode();
+ // Don't try to get the Code object from Code-less embedded builtin.
+ HeapObject maybe_code_obj =
+ V8_REMOVE_BUILTINS_CODE_OBJECTS && code.is_off_heap_trampoline()
+ ? HeapObject::cast(code)
+ : FromCodeT(code);
if (name[0] != '\0') {
- TagObject(FromCodeT(shared.GetCode()),
+ TagObject(maybe_code_obj,
names_->GetFormatted("(code for %s)", name.get()));
} else {
- TagObject(FromCodeT(shared.GetCode()),
- names_->GetFormatted("(%s code)",
- CodeKindToString(shared.GetCode().kind())));
+ TagObject(maybe_code_obj,
+ names_->GetFormatted("(%s code)", CodeKindToString(code.kind())));
}
Object name_or_scope_info = shared.name_or_scope_info(kAcquireLoad);
@@ -1484,10 +1525,6 @@ void V8HeapExplorer::ExtractAccessorInfoReferences(HeapEntry* entry,
AccessorInfo accessor_info) {
SetInternalReference(entry, "name", accessor_info.name(),
AccessorInfo::kNameOffset);
- SetInternalReference(entry, "getter", accessor_info.getter(),
- AccessorInfo::kGetterOffset);
- SetInternalReference(entry, "setter", accessor_info.setter(),
- AccessorInfo::kSetterOffset);
SetInternalReference(entry, "data", accessor_info.data(),
AccessorInfo::kDataOffset);
}
@@ -1515,7 +1552,12 @@ void V8HeapExplorer::ExtractWeakCellReferences(HeapEntry* entry,
}
void V8HeapExplorer::TagBuiltinCodeObject(CodeT code, const char* name) {
- TagObject(FromCodeT(code), names_->GetFormatted("(%s builtin)", name));
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ TagObject(code, names_->GetFormatted("(%s builtin handle)", name));
+ }
+ if (!V8_REMOVE_BUILTINS_CODE_OBJECTS || !code.is_off_heap_trampoline()) {
+ TagObject(FromCodeT(code), names_->GetFormatted("(%s builtin)", name));
+ }
}
void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code code) {
@@ -1970,24 +2012,31 @@ class RootsReferencesExtractor : public RootVisitor {
// Must match behavior in
// MarkCompactCollector::RootMarkingVisitor::VisitRunningCode, which treats
// deoptimization literals in running code as stack roots.
- Code code = Code::cast(*p);
- if (code.kind() != CodeKind::BASELINE) {
- DeoptimizationData deopt_data =
- DeoptimizationData::cast(code.deoptimization_data());
- if (deopt_data.length() > 0) {
- DeoptimizationLiteralArray literals = deopt_data.LiteralArray();
- int literals_length = literals.length();
- for (int i = 0; i < literals_length; ++i) {
- MaybeObject maybe_literal = literals.Get(i);
- HeapObject heap_literal;
- if (maybe_literal.GetHeapObject(&heap_literal)) {
- VisitRootPointer(Root::kStackRoots, nullptr,
- FullObjectSlot(&heap_literal));
+ HeapObject value = HeapObject::cast(*p);
+ if (V8_EXTERNAL_CODE_SPACE_BOOL && !IsCodeSpaceObject(value)) {
+ // When external code space is enabled, the slot might contain a CodeT
+ // object representing an embedded builtin, which doesn't require
+ // additional processing.
+ DCHECK(CodeT::cast(value).is_off_heap_trampoline());
+ } else {
+ Code code = Code::cast(value);
+ if (code.kind() != CodeKind::BASELINE) {
+ DeoptimizationData deopt_data =
+ DeoptimizationData::cast(code.deoptimization_data());
+ if (deopt_data.length() > 0) {
+ DeoptimizationLiteralArray literals = deopt_data.LiteralArray();
+ int literals_length = literals.length();
+ for (int i = 0; i < literals_length; ++i) {
+ MaybeObject maybe_literal = literals.Get(i);
+ HeapObject heap_literal;
+ if (maybe_literal.GetHeapObject(&heap_literal)) {
+ VisitRootPointer(Root::kStackRoots, nullptr,
+ FullObjectSlot(&heap_literal));
+ }
}
}
}
}
-
// Finally visit the Code itself.
VisitRootPointer(Root::kStackRoots, nullptr, p);
}
diff --git a/chromium/v8/src/profiler/profile-generator-inl.h b/chromium/v8/src/profiler/profile-generator-inl.h
index 2383976c4a7..ecfdf95635b 100644
--- a/chromium/v8/src/profiler/profile-generator-inl.h
+++ b/chromium/v8/src/profiler/profile-generator-inl.h
@@ -12,12 +12,12 @@
namespace v8 {
namespace internal {
-CodeEntry::CodeEntry(LogEventListener::LogEventsAndTags tag, const char* name,
+CodeEntry::CodeEntry(LogEventListener::CodeTag tag, const char* name,
const char* resource_name, int line_number,
int column_number,
std::unique_ptr<SourcePositionTable> line_info,
bool is_shared_cross_origin, CodeType code_type)
- : bit_field_(TagField::encode(tag) |
+ : bit_field_(CodeTagField::encode(tag) |
BuiltinField::encode(Builtin::kIllegal) |
CodeTypeField::encode(code_type) |
SharedCrossOriginField::encode(is_shared_cross_origin)),
diff --git a/chromium/v8/src/profiler/profile-generator.cc b/chromium/v8/src/profiler/profile-generator.cc
index bb20eefbe35..1f39ec26d15 100644
--- a/chromium/v8/src/profiler/profile-generator.cc
+++ b/chromium/v8/src/profiler/profile-generator.cc
@@ -91,7 +91,7 @@ const char* const CodeEntry::kRootEntryName = "(root)";
// static
CodeEntry* CodeEntry::program_entry() {
static base::LeakyObject<CodeEntry> kProgramEntry(
- LogEventListener::FUNCTION_TAG, CodeEntry::kProgramEntryName,
+ LogEventListener::CodeTag::kFunction, CodeEntry::kProgramEntryName,
CodeEntry::kEmptyResourceName, v8::CpuProfileNode::kNoLineNumberInfo,
v8::CpuProfileNode::kNoColumnNumberInfo, nullptr, false,
CodeEntry::CodeType::OTHER);
@@ -101,7 +101,7 @@ CodeEntry* CodeEntry::program_entry() {
// static
CodeEntry* CodeEntry::idle_entry() {
static base::LeakyObject<CodeEntry> kIdleEntry(
- LogEventListener::FUNCTION_TAG, CodeEntry::kIdleEntryName,
+ LogEventListener::CodeTag::kFunction, CodeEntry::kIdleEntryName,
CodeEntry::kEmptyResourceName, v8::CpuProfileNode::kNoLineNumberInfo,
v8::CpuProfileNode::kNoColumnNumberInfo, nullptr, false,
CodeEntry::CodeType::OTHER);
@@ -111,8 +111,9 @@ CodeEntry* CodeEntry::idle_entry() {
// static
CodeEntry* CodeEntry::gc_entry() {
static base::LeakyObject<CodeEntry> kGcEntry(
- LogEventListener::BUILTIN_TAG, CodeEntry::kGarbageCollectorEntryName,
- CodeEntry::kEmptyResourceName, v8::CpuProfileNode::kNoLineNumberInfo,
+ LogEventListener::CodeTag::kBuiltin,
+ CodeEntry::kGarbageCollectorEntryName, CodeEntry::kEmptyResourceName,
+ v8::CpuProfileNode::kNoLineNumberInfo,
v8::CpuProfileNode::kNoColumnNumberInfo, nullptr, false,
CodeEntry::CodeType::OTHER);
return kGcEntry.get();
@@ -121,7 +122,7 @@ CodeEntry* CodeEntry::gc_entry() {
// static
CodeEntry* CodeEntry::unresolved_entry() {
static base::LeakyObject<CodeEntry> kUnresolvedEntry(
- LogEventListener::FUNCTION_TAG, CodeEntry::kUnresolvedFunctionName,
+ LogEventListener::CodeTag::kFunction, CodeEntry::kUnresolvedFunctionName,
CodeEntry::kEmptyResourceName, v8::CpuProfileNode::kNoLineNumberInfo,
v8::CpuProfileNode::kNoColumnNumberInfo, nullptr, false,
CodeEntry::CodeType::OTHER);
@@ -131,7 +132,7 @@ CodeEntry* CodeEntry::unresolved_entry() {
// static
CodeEntry* CodeEntry::root_entry() {
static base::LeakyObject<CodeEntry> kRootEntry(
- LogEventListener::FUNCTION_TAG, CodeEntry::kRootEntryName,
+ LogEventListener::CodeTag::kFunction, CodeEntry::kRootEntryName,
CodeEntry::kEmptyResourceName, v8::CpuProfileNode::kNoLineNumberInfo,
v8::CpuProfileNode::kNoColumnNumberInfo, nullptr, false,
CodeEntry::CodeType::OTHER);
@@ -163,7 +164,8 @@ bool CodeEntry::IsSameFunctionAs(const CodeEntry* entry) const {
}
void CodeEntry::SetBuiltinId(Builtin id) {
- bit_field_ = TagField::update(bit_field_, LogEventListener::BUILTIN_TAG);
+ bit_field_ =
+ CodeTagField::update(bit_field_, LogEventListener::CodeTag::kBuiltin);
bit_field_ = BuiltinField::update(bit_field_, id);
}
@@ -339,34 +341,26 @@ CpuProfileNode::SourceType ProfileNode::source_type() const {
return CpuProfileNode::kUnresolved;
// Otherwise, resolve based on logger tag.
- switch (entry_->tag()) {
- case LogEventListener::EVAL_TAG:
- case LogEventListener::SCRIPT_TAG:
- case LogEventListener::LAZY_COMPILE_TAG:
- case LogEventListener::FUNCTION_TAG:
+ switch (entry_->code_tag()) {
+ case LogEventListener::CodeTag::kEval:
+ case LogEventListener::CodeTag::kScript:
+ case LogEventListener::CodeTag::kFunction:
return CpuProfileNode::kScript;
- case LogEventListener::BUILTIN_TAG:
- case LogEventListener::HANDLER_TAG:
- case LogEventListener::BYTECODE_HANDLER_TAG:
- case LogEventListener::NATIVE_FUNCTION_TAG:
- case LogEventListener::NATIVE_SCRIPT_TAG:
- case LogEventListener::NATIVE_LAZY_COMPILE_TAG:
+ case LogEventListener::CodeTag::kBuiltin:
+ case LogEventListener::CodeTag::kHandler:
+ case LogEventListener::CodeTag::kBytecodeHandler:
+ case LogEventListener::CodeTag::kNativeFunction:
+ case LogEventListener::CodeTag::kNativeScript:
return CpuProfileNode::kBuiltin;
- case LogEventListener::CALLBACK_TAG:
+ case LogEventListener::CodeTag::kCallback:
return CpuProfileNode::kCallback;
- case LogEventListener::REG_EXP_TAG:
- case LogEventListener::STUB_TAG:
- case LogEventListener::CODE_CREATION_EVENT:
- case LogEventListener::CODE_DISABLE_OPT_EVENT:
- case LogEventListener::CODE_MOVE_EVENT:
- case LogEventListener::CODE_DELETE_EVENT:
- case LogEventListener::CODE_MOVING_GC:
- case LogEventListener::SHARED_FUNC_MOVE_EVENT:
- case LogEventListener::SNAPSHOT_CODE_NAME_EVENT:
- case LogEventListener::TICK_EVENT:
- case LogEventListener::NUMBER_OF_LOG_EVENTS:
+ case LogEventListener::CodeTag::kRegExp:
+ case LogEventListener::CodeTag::kStub:
+ case LogEventListener::CodeTag::kLength:
return CpuProfileNode::kInternal;
}
+ return CpuProfileNode::kInternal;
+ UNREACHABLE();
}
void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
@@ -576,7 +570,7 @@ CpuProfile::CpuProfile(CpuProfiler* profiler, ProfilerId id, const char* title,
CpuProfilingOptions options,
std::unique_ptr<DiscardedSamplesDelegate> delegate)
: title_(title),
- options_(options),
+ options_(std::move(options)),
delegate_(std::move(delegate)),
start_time_(base::TimeTicks::Now()),
top_down_(profiler->isolate(), profiler->code_entries()),
@@ -631,17 +625,16 @@ void CpuProfile::AddPath(base::TimeTicks timestamp,
ProfileNode* top_frame_node =
top_down_.AddPathFromEnd(path, src_line, update_stats, options_.mode());
+ bool is_buffer_full =
+ options_.max_samples() != CpuProfilingOptions::kNoSampleLimit &&
+ samples_.size() >= options_.max_samples();
bool should_record_sample =
- !timestamp.IsNull() && timestamp >= start_time_ &&
- (options_.max_samples() == CpuProfilingOptions::kNoSampleLimit ||
- samples_.size() < options_.max_samples());
+ !timestamp.IsNull() && timestamp >= start_time_ && !is_buffer_full;
if (should_record_sample) {
samples_.push_back(
{top_frame_node, timestamp, src_line, state_tag, embedder_state_tag});
- }
-
- if (!should_record_sample && delegate_ != nullptr) {
+ } else if (is_buffer_full && delegate_ != nullptr) {
const auto task_runner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
reinterpret_cast<v8::Isolate*>(profiler_->isolate()));
@@ -894,7 +887,7 @@ size_t CodeMap::GetEstimatedMemoryUsage() const {
}
CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
- : profiler_(nullptr), current_profiles_semaphore_(1), isolate_(isolate) {
+ : profiler_(nullptr), current_profiles_mutex_(), isolate_(isolate) {
USE(isolate_);
}
@@ -906,16 +899,15 @@ CpuProfilingResult CpuProfilesCollection::StartProfilingForTesting(
CpuProfilingResult CpuProfilesCollection::StartProfiling(
const char* title, CpuProfilingOptions options,
std::unique_ptr<DiscardedSamplesDelegate> delegate) {
- return StartProfiling(++last_id_, title, options, std::move(delegate));
+ return StartProfiling(++last_id_, title, std::move(options),
+ std::move(delegate));
}
CpuProfilingResult CpuProfilesCollection::StartProfiling(
ProfilerId id, const char* title, CpuProfilingOptions options,
std::unique_ptr<DiscardedSamplesDelegate> delegate) {
- current_profiles_semaphore_.Wait();
-
+ base::RecursiveMutexGuard profiles_guard{&current_profiles_mutex_};
if (static_cast<int>(current_profiles_.size()) >= kMaxSimultaneousProfiles) {
- current_profiles_semaphore_.Signal();
return {
0,
CpuProfilingStatus::kErrorTooManyProfilers,
@@ -927,7 +919,6 @@ CpuProfilingResult CpuProfilesCollection::StartProfiling(
strcmp(profile->title(), title) == 0) ||
profile->id() == id) {
// Ignore attempts to start profile with the same title or id
- current_profiles_semaphore_.Signal();
// ... though return kAlreadyStarted to force it collect a sample.
return {
profile->id(),
@@ -936,10 +927,9 @@ CpuProfilingResult CpuProfilesCollection::StartProfiling(
}
}
- CpuProfile* profile =
- new CpuProfile(profiler_, id, title, options, std::move(delegate));
+ CpuProfile* profile = new CpuProfile(profiler_, id, title, std::move(options),
+ std::move(delegate));
current_profiles_.emplace_back(profile);
- current_profiles_semaphore_.Signal();
return {
profile->id(),
@@ -948,7 +938,7 @@ CpuProfilingResult CpuProfilesCollection::StartProfiling(
}
CpuProfile* CpuProfilesCollection::StopProfiling(ProfilerId id) {
- current_profiles_semaphore_.Wait();
+ base::RecursiveMutexGuard profiles_guard{&current_profiles_mutex_};
CpuProfile* profile = nullptr;
auto it = std::find_if(
@@ -962,37 +952,27 @@ CpuProfile* CpuProfilesCollection::StopProfiling(ProfilerId id) {
// Convert reverse iterator to matching forward iterator.
current_profiles_.erase(--(it.base()));
}
- current_profiles_semaphore_.Signal();
return profile;
}
CpuProfile* CpuProfilesCollection::Lookup(const char* title) {
- // Called from VM thread, and only it can mutate the list,
- // so no locking is needed here.
- DCHECK_EQ(ThreadId::Current(), isolate_->thread_id());
- if (title == nullptr) {
- return nullptr;
- }
+ if (title == nullptr) return nullptr;
// http://crbug/51594, edge case console.profile may provide an empty title
// and must not crash
const bool empty_title = title[0] == '\0';
+ base::RecursiveMutexGuard profiles_guard{&current_profiles_mutex_};
auto it = std::find_if(
current_profiles_.rbegin(), current_profiles_.rend(),
[&](const std::unique_ptr<CpuProfile>& p) {
return (empty_title ||
(p->title() != nullptr && strcmp(p->title(), title) == 0));
});
- if (it != current_profiles_.rend()) {
- return it->get();
- }
-
+ if (it != current_profiles_.rend()) return it->get();
return nullptr;
}
bool CpuProfilesCollection::IsLastProfileLeft(ProfilerId id) {
- // Called from VM thread, and only it can mutate the list,
- // so no locking is needed here.
- DCHECK_EQ(ThreadId::Current(), isolate_->thread_id());
+ base::RecursiveMutexGuard profiles_guard{&current_profiles_mutex_};
if (current_profiles_.size() != 1) return false;
return id == current_profiles_[0]->id();
}
@@ -1017,7 +997,7 @@ int64_t GreatestCommonDivisor(int64_t a, int64_t b) {
} // namespace
-base::TimeDelta CpuProfilesCollection::GetCommonSamplingInterval() const {
+base::TimeDelta CpuProfilesCollection::GetCommonSamplingInterval() {
DCHECK(profiler_);
int64_t base_sampling_interval_us =
@@ -1025,16 +1005,19 @@ base::TimeDelta CpuProfilesCollection::GetCommonSamplingInterval() const {
if (base_sampling_interval_us == 0) return base::TimeDelta();
int64_t interval_us = 0;
- for (const auto& profile : current_profiles_) {
- // Snap the profile's requested sampling interval to the next multiple of
- // the base sampling interval.
- int64_t profile_interval_us =
- std::max<int64_t>(
- (profile->sampling_interval_us() + base_sampling_interval_us - 1) /
- base_sampling_interval_us,
- 1) *
- base_sampling_interval_us;
- interval_us = GreatestCommonDivisor(interval_us, profile_interval_us);
+ {
+ base::RecursiveMutexGuard profiles_guard{&current_profiles_mutex_};
+ for (const auto& profile : current_profiles_) {
+ // Snap the profile's requested sampling interval to the next multiple of
+ // the base sampling interval.
+ int64_t profile_interval_us =
+ std::max<int64_t>((profile->sampling_interval_us() +
+ base_sampling_interval_us - 1) /
+ base_sampling_interval_us,
+ 1) *
+ base_sampling_interval_us;
+ interval_us = GreatestCommonDivisor(interval_us, profile_interval_us);
+ }
}
return base::TimeDelta::FromMicroseconds(interval_us);
}
@@ -1047,8 +1030,8 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
// As starting / stopping profiles is rare relatively to this
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
- current_profiles_semaphore_.Wait();
const ProfileStackTrace empty_path;
+ base::RecursiveMutexGuard profiles_guard{&current_profiles_mutex_};
for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
ContextFilter& context_filter = profile->context_filter();
// If the context filter check failed, omit the contents of the stack.
@@ -1067,16 +1050,14 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
accepts_embedder_context ? embedder_state_tag
: EmbedderStateTag::EMPTY);
}
- current_profiles_semaphore_.Signal();
}
void CpuProfilesCollection::UpdateNativeContextAddressForCurrentProfiles(
Address from, Address to) {
- current_profiles_semaphore_.Wait();
+ base::RecursiveMutexGuard profiles_guard{&current_profiles_mutex_};
for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
profile->context_filter().OnMoveEvent(from, to);
}
- current_profiles_semaphore_.Signal();
}
} // namespace internal
diff --git a/chromium/v8/src/profiler/profile-generator.h b/chromium/v8/src/profiler/profile-generator.h
index af956f9ebd4..6228f2bba4e 100644
--- a/chromium/v8/src/profiler/profile-generator.h
+++ b/chromium/v8/src/profiler/profile-generator.h
@@ -65,7 +65,7 @@ class CodeEntry {
// CodeEntry may reference strings (|name|, |resource_name|) managed by a
// StringsStorage instance. These must be freed via ReleaseStrings.
- inline CodeEntry(LogEventListener::LogEventsAndTags tag, const char* name,
+ inline CodeEntry(LogEventListener::CodeTag tag, const char* name,
const char* resource_name = CodeEntry::kEmptyResourceName,
int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
@@ -164,8 +164,12 @@ class CodeEntry {
const std::vector<CodeEntryAndLineNumber>* GetInlineStack(
int pc_offset) const;
- LogEventListener::LogEventsAndTags tag() const {
- return TagField::decode(bit_field_);
+ LogEventListener::Event event() const {
+ return EventField::decode(bit_field_);
+ }
+
+ LogEventListener::CodeTag code_tag() const {
+ return CodeTagField::decode(bit_field_);
}
V8_EXPORT_PRIVATE static const char* const kEmptyResourceName;
@@ -227,7 +231,8 @@ class CodeEntry {
return ref_count_;
}
- using TagField = base::BitField<LogEventListener::LogEventsAndTags, 0, 8>;
+ using EventField = base::BitField<LogEventListener::Event, 0, 4>;
+ using CodeTagField = base::BitField<LogEventListener::CodeTag, 0, 4>;
using BuiltinField = base::BitField<Builtin, 8, 20>;
static_assert(Builtins::kBuiltinCount <= BuiltinField::kNumValues,
"builtin_count exceeds size of bitfield");
@@ -559,7 +564,7 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
// Finds a common sampling interval dividing each CpuProfile's interval,
// rounded up to the nearest multiple of the CpuProfiler's sampling interval.
// Returns 0 if no profiles are attached.
- base::TimeDelta GetCommonSamplingInterval() const;
+ base::TimeDelta GetCommonSamplingInterval();
// Called from profile generator thread.
void AddPathToCurrentProfiles(
@@ -586,7 +591,7 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
// Accessed by VM thread and profile generator thread.
std::vector<std::unique_ptr<CpuProfile>> current_profiles_;
- base::Semaphore current_profiles_semaphore_;
+ base::RecursiveMutex current_profiles_mutex_;
static std::atomic<ProfilerId> last_id_;
Isolate* isolate_;
};
diff --git a/chromium/v8/src/profiler/profiler-listener.cc b/chromium/v8/src/profiler/profiler-listener.cc
index 80138667fc3..458aac21f82 100644
--- a/chromium/v8/src/profiler/profiler-listener.cc
+++ b/chromium/v8/src/profiler/profiler-listener.cc
@@ -40,51 +40,52 @@ ProfilerListener::ProfilerListener(Isolate* isolate,
ProfilerListener::~ProfilerListener() = default;
-void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
- Handle<AbstractCode> code,
+void ProfilerListener::CodeCreateEvent(CodeTag tag, Handle<AbstractCode> code,
const char* name) {
CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = code->InstructionStart();
+ PtrComprCageBase cage_base(isolate_);
+ rec->instruction_start = code->InstructionStart(cage_base);
rec->entry =
code_entries_.Create(tag, GetName(name), CodeEntry::kEmptyResourceName,
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr);
- rec->instruction_size = code->InstructionSize();
+ rec->instruction_size = code->InstructionSize(cage_base);
weak_code_registry_.Track(rec->entry, code);
DispatchCodeEvent(evt_rec);
}
-void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
- Handle<AbstractCode> code,
+void ProfilerListener::CodeCreateEvent(CodeTag tag, Handle<AbstractCode> code,
Handle<Name> name) {
CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = code->InstructionStart();
+ PtrComprCageBase cage_base(isolate_);
+ rec->instruction_start = code->InstructionStart(cage_base);
rec->entry =
code_entries_.Create(tag, GetName(*name), CodeEntry::kEmptyResourceName,
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr);
- rec->instruction_size = code->InstructionSize();
+ rec->instruction_size = code->InstructionSize(cage_base);
weak_code_registry_.Track(rec->entry, code);
DispatchCodeEvent(evt_rec);
}
-void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
- Handle<AbstractCode> code,
+void ProfilerListener::CodeCreateEvent(CodeTag tag, Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared,
Handle<Name> script_name) {
CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = code->InstructionStart();
+ PtrComprCageBase cage_base(isolate_);
+ rec->instruction_start = code->InstructionStart(cage_base);
rec->entry =
code_entries_.Create(tag, GetName(shared->DebugNameCStr().get()),
GetName(InferScriptName(*script_name, *shared)),
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr);
- DCHECK_IMPLIES(code->IsCode(), code->kind() == CodeKind::BASELINE);
+ DCHECK_IMPLIES(code->IsCode(cage_base),
+ code->kind(cage_base) == CodeKind::BASELINE);
rec->entry->FillFunctionInfo(*shared);
- rec->instruction_size = code->InstructionSize();
+ rec->instruction_size = code->InstructionSize(cage_base);
weak_code_registry_.Track(rec->entry, code);
DispatchCodeEvent(evt_rec);
}
@@ -106,35 +107,37 @@ CodeEntry* GetOrInsertCachedEntry(
} // namespace
-void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
+void ProfilerListener::CodeCreateEvent(CodeTag tag,
Handle<AbstractCode> abstract_code,
Handle<SharedFunctionInfo> shared,
Handle<Name> script_name, int line,
int column) {
CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = abstract_code->InstructionStart();
+ PtrComprCageBase cage_base(isolate_);
+ rec->instruction_start = abstract_code->InstructionStart(cage_base);
std::unique_ptr<SourcePositionTable> line_table;
std::unordered_map<int, std::vector<CodeEntryAndLineNumber>> inline_stacks;
std::unordered_set<CodeEntry*, CodeEntry::Hasher, CodeEntry::Equals>
cached_inline_entries;
bool is_shared_cross_origin = false;
- if (shared->script().IsScript()) {
- Handle<Script> script = handle(Script::cast(shared->script()), isolate_);
+ if (shared->script(cage_base).IsScript(cage_base)) {
+ Handle<Script> script =
+ handle(Script::cast(shared->script(cage_base)), isolate_);
line_table.reset(new SourcePositionTable());
is_shared_cross_origin = script->origin_options().IsSharedCrossOrigin();
- bool is_baseline = abstract_code->kind() == CodeKind::BASELINE;
+ bool is_baseline = abstract_code->kind(cage_base) == CodeKind::BASELINE;
Handle<ByteArray> source_position_table(
- abstract_code->SourcePositionTable(*shared), isolate_);
- std::unique_ptr<baseline::BytecodeOffsetIterator> baseline_iterator =
- nullptr;
+ abstract_code->SourcePositionTable(cage_base, *shared), isolate_);
+ std::unique_ptr<baseline::BytecodeOffsetIterator> baseline_iterator;
if (is_baseline) {
Handle<BytecodeArray> bytecodes(shared->GetBytecodeArray(isolate_),
isolate_);
Handle<ByteArray> bytecode_offsets(
- abstract_code->GetCode().bytecode_offset_table(), isolate_);
+ abstract_code->ToCode(cage_base).bytecode_offset_table(cage_base),
+ isolate_);
baseline_iterator = std::make_unique<baseline::BytecodeOffsetIterator>(
bytecode_offsets, bytecodes);
}
@@ -160,7 +163,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
line_table->SetPosition(code_offset, line_number, inlining_id);
} else {
DCHECK(!is_baseline);
- DCHECK(abstract_code->IsCode());
+ DCHECK(abstract_code->IsCode(cage_base));
Handle<Code> code = handle(abstract_code->GetCode(), isolate_);
std::vector<SourcePositionInfo> stack =
it.source_position().InliningStack(code);
@@ -224,14 +227,13 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
}
rec->entry->FillFunctionInfo(*shared);
- rec->instruction_size = abstract_code->InstructionSize();
+ rec->instruction_size = abstract_code->InstructionSize(cage_base);
weak_code_registry_.Track(rec->entry, abstract_code);
DispatchCodeEvent(evt_rec);
}
#if V8_ENABLE_WEBASSEMBLY
-void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
- const wasm::WasmCode* code,
+void ProfilerListener::CodeCreateEvent(CodeTag tag, const wasm::WasmCode* code,
wasm::WasmName name,
const char* source_url, int code_offset,
int script_id) {
@@ -252,8 +254,8 @@ void ProfilerListener::CallbackEvent(Handle<Name> name, Address entry_point) {
CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = entry_point;
- rec->entry =
- code_entries_.Create(LogEventListener::CALLBACK_TAG, GetName(*name));
+ rec->entry = code_entries_.Create(LogEventListener::CodeTag::kCallback,
+ GetName(*name));
rec->instruction_size = 1;
DispatchCodeEvent(evt_rec);
}
@@ -263,7 +265,7 @@ void ProfilerListener::GetterCallbackEvent(Handle<Name> name,
CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = entry_point;
- rec->entry = code_entries_.Create(LogEventListener::CALLBACK_TAG,
+ rec->entry = code_entries_.Create(LogEventListener::CodeTag::kCallback,
GetConsName("get ", *name));
rec->instruction_size = 1;
DispatchCodeEvent(evt_rec);
@@ -274,7 +276,7 @@ void ProfilerListener::SetterCallbackEvent(Handle<Name> name,
CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = entry_point;
- rec->entry = code_entries_.Create(LogEventListener::CALLBACK_TAG,
+ rec->entry = code_entries_.Create(LogEventListener::CodeTag::kCallback,
GetConsName("set ", *name));
rec->instruction_size = 1;
DispatchCodeEvent(evt_rec);
@@ -284,12 +286,13 @@ void ProfilerListener::RegExpCodeCreateEvent(Handle<AbstractCode> code,
Handle<String> source) {
CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeCreation);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = code->InstructionStart();
+ PtrComprCageBase cage_base(isolate_);
+ rec->instruction_start = code->InstructionStart(cage_base);
rec->entry = code_entries_.Create(
- LogEventListener::REG_EXP_TAG, GetConsName("RegExp: ", *source),
+ LogEventListener::CodeTag::kRegExp, GetConsName("RegExp: ", *source),
CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr);
- rec->instruction_size = code->InstructionSize();
+ rec->instruction_size = code->InstructionSize(cage_base);
weak_code_registry_.Track(rec->entry, code);
DispatchCodeEvent(evt_rec);
}
@@ -298,8 +301,9 @@ void ProfilerListener::CodeMoveEvent(AbstractCode from, AbstractCode to) {
DisallowGarbageCollection no_gc;
CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeMove);
CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
- rec->from_instruction_start = from.InstructionStart();
- rec->to_instruction_start = to.InstructionStart();
+ PtrComprCageBase cage_base(isolate_);
+ rec->from_instruction_start = from.InstructionStart(cage_base);
+ rec->to_instruction_start = to.InstructionStart(cage_base);
DispatchCodeEvent(evt_rec);
}
@@ -314,7 +318,8 @@ void ProfilerListener::CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) {
CodeEventsContainer evt_rec(CodeEventRecord::Type::kCodeDisableOpt);
CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_;
- rec->instruction_start = code->InstructionStart();
+ PtrComprCageBase cage_base(isolate_);
+ rec->instruction_start = code->InstructionStart(cage_base);
rec->bailout_reason =
GetBailoutReason(shared->disabled_optimization_reason());
DispatchCodeEvent(evt_rec);
diff --git a/chromium/v8/src/profiler/profiler-listener.h b/chromium/v8/src/profiler/profiler-listener.h
index cda766375a7..f409d4cfeaa 100644
--- a/chromium/v8/src/profiler/profiler-listener.h
+++ b/chromium/v8/src/profiler/profiler-listener.h
@@ -35,18 +35,18 @@ class V8_EXPORT_PRIVATE ProfilerListener : public LogEventListener,
ProfilerListener(const ProfilerListener&) = delete;
ProfilerListener& operator=(const ProfilerListener&) = delete;
- void CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
+ void CodeCreateEvent(CodeTag tag, Handle<AbstractCode> code,
const char* name) override;
- void CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
+ void CodeCreateEvent(CodeTag tag, Handle<AbstractCode> code,
Handle<Name> name) override;
- void CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
+ void CodeCreateEvent(CodeTag tag, Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared,
Handle<Name> script_name) override;
- void CodeCreateEvent(LogEventsAndTags tag, Handle<AbstractCode> code,
+ void CodeCreateEvent(CodeTag tag, Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared,
Handle<Name> script_name, int line, int column) override;
#if V8_ENABLE_WEBASSEMBLY
- void CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
+ void CodeCreateEvent(CodeTag tag, const wasm::WasmCode* code,
wasm::WasmName name, const char* source_url,
int code_offset, int script_id) override;
#endif // V8_ENABLE_WEBASSEMBLY
diff --git a/chromium/v8/src/profiler/tick-sample.cc b/chromium/v8/src/profiler/tick-sample.cc
index cd65f9d6517..766b4188357 100644
--- a/chromium/v8/src/profiler/tick-sample.cc
+++ b/chromium/v8/src/profiler/tick-sample.cc
@@ -133,6 +133,13 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate,
state->sp = reinterpret_cast<void*>(simulator->get_register(Simulator::sp));
state->fp = reinterpret_cast<void*>(simulator->get_register(Simulator::fp));
state->lr = reinterpret_cast<void*>(simulator->get_register(Simulator::ra));
+#elif V8_TARGET_ARCH_RISCV32
+ if (!simulator->has_bad_pc()) {
+ state->pc = reinterpret_cast<void*>(simulator->get_pc());
+ }
+ state->sp = reinterpret_cast<void*>(simulator->get_register(Simulator::sp));
+ state->fp = reinterpret_cast<void*>(simulator->get_register(Simulator::fp));
+ state->lr = reinterpret_cast<void*>(simulator->get_register(Simulator::ra));
#endif
if (state->sp == 0 || state->fp == 0) {
// It possible that the simulator is interrupted while it is updating