summaryrefslogtreecommitdiff
path: root/chromium/base/profiler
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2018-08-28 15:28:34 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2018-08-28 13:54:51 +0000
commit2a19c63448c84c1805fb1a585c3651318bb86ca7 (patch)
treeeb17888e8531aa6ee5e85721bd553b832a7e5156 /chromium/base/profiler
parentb014812705fc80bff0a5c120dfcef88f349816dc (diff)
downloadqtwebengine-chromium-2a19c63448c84c1805fb1a585c3651318bb86ca7.tar.gz
BASELINE: Update Chromium to 69.0.3497.70
Change-Id: I2b7b56e4e7a8b26656930def0d4575dc32b900a0 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/base/profiler')
-rw-r--r--chromium/base/profiler/native_stack_sampler.h35
-rw-r--r--chromium/base/profiler/native_stack_sampler_mac.cc431
-rw-r--r--chromium/base/profiler/native_stack_sampler_posix.cc1
-rw-r--r--chromium/base/profiler/native_stack_sampler_win.cc224
-rw-r--r--chromium/base/profiler/stack_sampling_profiler.cc362
-rw-r--r--chromium/base/profiler/stack_sampling_profiler.h205
-rw-r--r--chromium/base/profiler/stack_sampling_profiler_unittest.cc698
-rw-r--r--chromium/base/profiler/win32_stack_frame_unwinder.cc8
8 files changed, 922 insertions, 1042 deletions
diff --git a/chromium/base/profiler/native_stack_sampler.h b/chromium/base/profiler/native_stack_sampler.h
index ebd7c3c4b81..5d7e9b07715 100644
--- a/chromium/base/profiler/native_stack_sampler.h
+++ b/chromium/base/profiler/native_stack_sampler.h
@@ -17,8 +17,8 @@ namespace base {
class NativeStackSamplerTestDelegate;
// NativeStackSampler is an implementation detail of StackSamplingProfiler. It
-// abstracts the native implementation required to record a stack sample for a
-// given thread.
+// abstracts the native implementation required to record a set of stack frames
+// for a given thread.
class NativeStackSampler {
public:
// This class contains a buffer for stack copies that can be shared across
@@ -41,22 +41,12 @@ class NativeStackSampler {
DISALLOW_COPY_AND_ASSIGN(StackBuffer);
};
- // The callback type used to add annotations to a sample during collection.
- // This is passed to the native sampler to be applied at the most appropriate
- // time. It is a simple function-pointer because the generated code must be
- // completely predictable and do nothing that could acquire a mutex; a
- // Callback object is code outside the control of this object and could,
- // for example, acquire a mutex as part of allocating memory for a LOG
- // message.
- using AnnotateCallback = void (*)(StackSamplingProfiler::Sample*);
-
virtual ~NativeStackSampler();
- // Creates a stack sampler that records samples for |thread_handle|. Returns
- // null if this platform does not support stack sampling.
+ // Creates a stack sampler that records samples for thread with |thread_id|.
+ // Returns null if this platform does not support stack sampling.
static std::unique_ptr<NativeStackSampler> Create(
PlatformThreadId thread_id,
- AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate);
// Gets the required size of the stack buffer.
@@ -69,18 +59,13 @@ class NativeStackSampler {
// The following functions are all called on the SamplingThread (not the
// thread being sampled).
- // Notifies the sampler that we're starting to record a new profile. Modules
- // shared across samples in the profile should be recorded in |modules|.
- virtual void ProfileRecordingStarting(
- std::vector<StackSamplingProfiler::Module>* modules) = 0;
-
- // Records a stack sample to |sample|.
- virtual void RecordStackSample(StackBuffer* stackbuffer,
- StackSamplingProfiler::Sample* sample) = 0;
+ // Notifies the sampler that we're starting to record a new profile.
+ virtual void ProfileRecordingStarting() = 0;
- // Notifies the sampler that we've stopped recording the current
- // profile.
- virtual void ProfileRecordingStopped(StackBuffer* stackbuffer) = 0;
+ // Records a set of internal frames and returns them.
+ virtual std::vector<StackSamplingProfiler::InternalFrame> RecordStackFrames(
+ StackBuffer* stackbuffer,
+ StackSamplingProfiler::ProfileBuilder* profile_builder) = 0;
protected:
NativeStackSampler();
diff --git a/chromium/base/profiler/native_stack_sampler_mac.cc b/chromium/base/profiler/native_stack_sampler_mac.cc
index a161173f060..d45c7a8c836 100644
--- a/chromium/base/profiler/native_stack_sampler_mac.cc
+++ b/chromium/base/profiler/native_stack_sampler_mac.cc
@@ -19,7 +19,6 @@
#include <sys/syslimits.h>
#include <algorithm>
-#include <map>
#include <memory>
#include "base/logging.h"
@@ -34,19 +33,32 @@ void _sigtramp(int, int, struct sigset*);
namespace base {
+using Frame = StackSamplingProfiler::Frame;
+using InternalFrame = StackSamplingProfiler::InternalFrame;
+using Module = StackSamplingProfiler::Module;
+using InternalModule = StackSamplingProfiler::InternalModule;
+using ProfileBuilder = StackSamplingProfiler::ProfileBuilder;
+
namespace {
-// Maps a module's address range (half-open) in memory to an index in a separate
-// data structure.
-struct ModuleIndex {
- ModuleIndex(uintptr_t start, uintptr_t end, size_t idx)
- : base_address(start), end_address(end), index(idx){};
+// ModuleCacheEntry records a module's address range (half-open) in memory and
+// the module itself.
+struct ModuleCacheEntry {
+ ModuleCacheEntry(uintptr_t start,
+ uintptr_t end,
+ InternalModule internal_module)
+ : base_address(start),
+ end_address(end),
+ internal_module(std::move(internal_module)){};
+
// Base address of the represented module.
uintptr_t base_address;
+
// First address off the end of the represented module.
uintptr_t end_address;
- // An index to the represented module in a separate container.
- size_t index;
+
+ // Module information.
+ InternalModule internal_module;
};
// Module identifiers ---------------------------------------------------------
@@ -66,9 +78,11 @@ std::string GetUniqueId(const void* module_addr) {
size_t offset = sizeof(mach_header_64);
size_t offset_limit = sizeof(mach_header_64) + mach_header->sizeofcmds;
- for (uint32_t i = 0; (i < mach_header->ncmds) &&
- (offset + sizeof(load_command) < offset_limit);
- ++i) {
+
+ for (uint32_t i = 0; i < mach_header->ncmds; ++i) {
+ if (offset + sizeof(load_command) >= offset_limit)
+ return std::string();
+
const load_command* current_cmd = reinterpret_cast<const load_command*>(
reinterpret_cast<const uint8_t*>(mach_header) + offset);
@@ -108,41 +122,6 @@ size_t GetModuleTextSize(const void* module_addr) {
return module_size;
}
-// Gets the index for the Module containing |instruction_pointer| in
-// |modules|, adding it if it's not already present. Returns
-// StackSamplingProfiler::Frame::kUnknownModuleIndex if no Module can be
-// determined for |module|.
-size_t GetModuleIndex(const uintptr_t instruction_pointer,
- std::vector<StackSamplingProfiler::Module>* modules,
- std::vector<ModuleIndex>* profile_module_index) {
- // Check if |instruction_pointer| is in the address range of a module we've
- // already seen.
- auto module_index =
- std::find_if(profile_module_index->begin(), profile_module_index->end(),
- [instruction_pointer](const ModuleIndex& index) {
- return instruction_pointer >= index.base_address &&
- instruction_pointer < index.end_address;
- });
- if (module_index != profile_module_index->end()) {
- return module_index->index;
- }
- Dl_info inf;
- if (!dladdr(reinterpret_cast<const void*>(instruction_pointer), &inf))
- return StackSamplingProfiler::Frame::kUnknownModuleIndex;
-
- StackSamplingProfiler::Module module(
- reinterpret_cast<uintptr_t>(inf.dli_fbase), GetUniqueId(inf.dli_fbase),
- base::FilePath(inf.dli_fname));
- modules->push_back(module);
-
- uintptr_t base_module_address = reinterpret_cast<uintptr_t>(inf.dli_fbase);
- size_t index = modules->size() - 1;
- profile_module_index->emplace_back(
- base_module_address,
- base_module_address + GetModuleTextSize(inf.dli_fbase), index);
- return index;
-}
-
// Stack walking --------------------------------------------------------------
// Fills |state| with |target_thread|'s context.
@@ -151,8 +130,7 @@ size_t GetModuleIndex(const uintptr_t instruction_pointer,
// that no shared resources (e.g. memory allocators) are used for the duration
// of this function.
bool GetThreadState(thread_act_t target_thread, x86_thread_state64_t* state) {
- mach_msg_type_number_t count =
- static_cast<mach_msg_type_number_t>(x86_THREAD_STATE64_COUNT);
+ auto count = static_cast<mach_msg_type_number_t>(x86_THREAD_STATE64_COUNT);
return thread_get_state(target_thread, x86_THREAD_STATE64,
reinterpret_cast<thread_state_t>(state),
&count) == KERN_SUCCESS;
@@ -169,17 +147,13 @@ uintptr_t RewritePointerIfInOriginalStack(
const uintptr_t* original_stack_top,
uintptr_t* stack_copy_bottom,
uintptr_t pointer) {
- uintptr_t original_stack_bottom_int =
+ auto original_stack_bottom_int =
reinterpret_cast<uintptr_t>(original_stack_bottom);
- uintptr_t original_stack_top_int =
- reinterpret_cast<uintptr_t>(original_stack_top);
- uintptr_t stack_copy_bottom_int =
- reinterpret_cast<uintptr_t>(stack_copy_bottom);
+ auto original_stack_top_int = reinterpret_cast<uintptr_t>(original_stack_top);
+ auto stack_copy_bottom_int = reinterpret_cast<uintptr_t>(stack_copy_bottom);
- if ((pointer < original_stack_bottom_int) ||
- (pointer >= original_stack_top_int)) {
+ if (pointer < original_stack_bottom_int || pointer >= original_stack_top_int)
return pointer;
- }
return stack_copy_bottom_int + (pointer - original_stack_bottom_int);
}
@@ -291,65 +265,12 @@ bool HasValidRbp(unw_cursor_t* unwind_cursor, uintptr_t stack_top) {
unw_get_reg(unwind_cursor, UNW_X86_64_RSP, &rsp);
unw_get_reg(unwind_cursor, UNW_X86_64_RBP, &rbp);
uint32_t offset = GetFrameOffset(proc_info.format) * sizeof(unw_word_t);
- if (rbp < offset || (rbp - offset) < rsp || rbp > stack_top) {
+ if (rbp < offset || (rbp - offset) < rsp || rbp > stack_top)
return false;
- }
}
return true;
}
-// Walks the stack represented by |unwind_context|, calling back to the provided
-// lambda for each frame. Returns false if an error occurred, otherwise returns
-// true.
-template <typename StackFrameCallback, typename ContinueUnwindPredicate>
-bool WalkStackFromContext(
- unw_context_t* unwind_context,
- size_t* frame_count,
- std::vector<StackSamplingProfiler::Module>* current_modules,
- std::vector<ModuleIndex>* profile_module_index,
- const StackFrameCallback& callback,
- const ContinueUnwindPredicate& continue_unwind) {
- unw_cursor_t unwind_cursor;
- unw_init_local(&unwind_cursor, unwind_context);
-
- int step_result;
- unw_word_t rip;
- do {
- ++(*frame_count);
- unw_get_reg(&unwind_cursor, UNW_REG_IP, &rip);
-
- // Ensure IP is in a module.
- //
- // Frameless unwinding (non-DWARF) works by fetching the function's
- // stack size from the unwind encoding or stack, and adding it to the
- // stack pointer to determine the function's return address.
- //
- // If we're in a function prologue or epilogue, the actual stack size
- // may be smaller than it will be during the normal course of execution.
- // When libunwind adds the expected stack size, it will look for the
- // return address in the wrong place. This check should ensure that we
- // bail before trying to deref a bad IP obtained this way in the previous
- // frame.
- size_t module_index =
- GetModuleIndex(rip, current_modules, profile_module_index);
- if (module_index == StackSamplingProfiler::Frame::kUnknownModuleIndex) {
- return false;
- }
-
- callback(static_cast<uintptr_t>(rip), module_index);
-
- if (!continue_unwind(&unwind_cursor))
- return false;
-
- step_result = unw_step(&unwind_cursor);
- } while (step_result > 0);
-
- if (step_result != 0)
- return false;
-
- return true;
-}
-
const char* LibSystemKernelName() {
static char path[PATH_MAX];
static char* name = nullptr;
@@ -369,7 +290,7 @@ const char* LibSystemKernelName() {
}
void GetSigtrampRange(uintptr_t* start, uintptr_t* end) {
- uintptr_t address = reinterpret_cast<uintptr_t>(&_sigtramp);
+ auto address = reinterpret_cast<uintptr_t>(&_sigtramp);
DCHECK(address != 0);
*start = address;
@@ -389,57 +310,6 @@ void GetSigtrampRange(uintptr_t* start, uintptr_t* end) {
*end = info.end_ip;
}
-// Walks the stack represented by |thread_state|, calling back to the provided
-// lambda for each frame.
-template <typename StackFrameCallback, typename ContinueUnwindPredicate>
-void WalkStack(const x86_thread_state64_t& thread_state,
- std::vector<StackSamplingProfiler::Module>* current_modules,
- std::vector<ModuleIndex>* profile_module_index,
- const StackFrameCallback& callback,
- const ContinueUnwindPredicate& continue_unwind) {
- size_t frame_count = 0;
- // This uses libunwind to walk the stack. libunwind is designed to be used for
- // a thread to walk its own stack. This creates two problems.
-
- // Problem 1: There is no official way to create a unw_context other than to
- // create it from the current state of the current thread's stack. To get
- // around this, forge a context. A unw_context is just a copy of the 16 main
- // registers followed by the instruction pointer, nothing more.
- // Coincidentally, the first 17 items of the x86_thread_state64_t type are
- // exactly those registers in exactly the same order, so just bulk copy them
- // over.
- unw_context_t unwind_context;
- memcpy(&unwind_context, &thread_state, sizeof(uintptr_t) * 17);
- bool result =
- WalkStackFromContext(&unwind_context, &frame_count, current_modules,
- profile_module_index, callback, continue_unwind);
-
- if (!result)
- return;
-
- if (frame_count == 1) {
- // Problem 2: Because libunwind is designed to be triggered by user code on
- // their own thread, if it hits a library that has no unwind info for the
- // function that is being executed, it just stops. This isn't a problem in
- // the normal case, but in this case, it's quite possible that the stack
- // being walked is stopped in a function that bridges to the kernel and thus
- // is missing the unwind info.
-
- // For now, just unwind the single case where the thread is stopped in a
- // function in libsystem_kernel.
- uint64_t& rsp = unwind_context.data[7];
- uint64_t& rip = unwind_context.data[16];
- Dl_info info;
- if (dladdr(reinterpret_cast<void*>(rip), &info) != 0 &&
- strcmp(info.dli_fname, LibSystemKernelName()) == 0) {
- rip = *reinterpret_cast<uint64_t*>(rsp);
- rsp += 8;
- WalkStackFromContext(&unwind_context, &frame_count, current_modules,
- profile_module_index, callback, continue_unwind);
- }
- }
-}
-
// ScopedSuspendThread --------------------------------------------------------
// Suspends a thread for the lifetime of the object.
@@ -471,40 +341,46 @@ class ScopedSuspendThread {
class NativeStackSamplerMac : public NativeStackSampler {
public:
NativeStackSamplerMac(mach_port_t thread_port,
- AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate);
~NativeStackSamplerMac() override;
// StackSamplingProfiler::NativeStackSampler:
- void ProfileRecordingStarting(
- std::vector<StackSamplingProfiler::Module>* modules) override;
- void RecordStackSample(StackBuffer* stack_buffer,
- StackSamplingProfiler::Sample* sample) override;
- void ProfileRecordingStopped(StackBuffer* stack_buffer) override;
+ void ProfileRecordingStarting() override;
+ std::vector<InternalFrame> RecordStackFrames(
+ StackBuffer* stack_buffer,
+ ProfileBuilder* profile_builder) override;
private:
- // Suspends the thread with |thread_port_|, copies its stack and resumes the
- // thread, then records the stack frames and associated modules into |sample|.
- void SuspendThreadAndRecordStack(StackBuffer* stack_buffer,
- StackSamplingProfiler::Sample* sample);
+ // Returns the InternalModule containing |instruction_pointer|, adding it to
+ // module_cache_entry_ if it's not already present.
+ InternalModule GetInternalModule(uintptr_t instruction_pointer);
+
+ // Walks the stack represented by |unwind_context|, calling back to the
+ // provided lambda for each frame. Returns false if an error occurred,
+ // otherwise returns true.
+ template <typename StackFrameCallback, typename ContinueUnwindPredicate>
+ bool WalkStackFromContext(unw_context_t* unwind_context,
+ size_t* frame_count,
+ const StackFrameCallback& callback,
+ const ContinueUnwindPredicate& continue_unwind);
+
+ // Walks the stack represented by |thread_state|, calling back to the
+ // provided lambda for each frame.
+ template <typename StackFrameCallback, typename ContinueUnwindPredicate>
+ void WalkStack(const x86_thread_state64_t& thread_state,
+ const StackFrameCallback& callback,
+ const ContinueUnwindPredicate& continue_unwind);
// Weak reference: Mach port for thread being profiled.
mach_port_t thread_port_;
- const AnnotateCallback annotator_;
-
NativeStackSamplerTestDelegate* const test_delegate_;
// The stack base address corresponding to |thread_handle_|.
const void* const thread_stack_base_address_;
- // Weak. Points to the modules associated with the profile being recorded
- // between ProfileRecordingStarting() and ProfileRecordingStopped().
- std::vector<StackSamplingProfiler::Module>* current_modules_ = nullptr;
-
- // Maps a module's address range to the corresponding Module's index within
- // current_modules_.
- std::vector<ModuleIndex> profile_module_index_;
+ // Maps a module's address range to the module.
+ std::vector<ModuleCacheEntry> module_cache_entry_;
// The address range of |_sigtramp|, the signal trampoline function.
uintptr_t sigtramp_start_;
@@ -515,15 +391,11 @@ class NativeStackSamplerMac : public NativeStackSampler {
NativeStackSamplerMac::NativeStackSamplerMac(
mach_port_t thread_port,
- AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate)
: thread_port_(thread_port),
- annotator_(annotator),
test_delegate_(test_delegate),
thread_stack_base_address_(
pthread_get_stackaddr_np(pthread_from_mach_thread_np(thread_port))) {
- DCHECK(annotator_);
-
GetSigtrampRange(&sigtramp_start_, &sigtramp_end_);
// This class suspends threads, and those threads might be suspended in dyld.
// Therefore, for all the system functions that might be linked in dynamically
@@ -535,29 +407,17 @@ NativeStackSamplerMac::NativeStackSamplerMac(
NativeStackSamplerMac::~NativeStackSamplerMac() {}
-void NativeStackSamplerMac::ProfileRecordingStarting(
- std::vector<StackSamplingProfiler::Module>* modules) {
- current_modules_ = modules;
- profile_module_index_.clear();
-}
-
-void NativeStackSamplerMac::RecordStackSample(
- StackBuffer* stack_buffer,
- StackSamplingProfiler::Sample* sample) {
- DCHECK(current_modules_);
-
- SuspendThreadAndRecordStack(stack_buffer, sample);
+void NativeStackSamplerMac::ProfileRecordingStarting() {
+ module_cache_entry_.clear();
}
-void NativeStackSamplerMac::ProfileRecordingStopped(StackBuffer* stack_buffer) {
- current_modules_ = nullptr;
-}
-
-void NativeStackSamplerMac::SuspendThreadAndRecordStack(
+std::vector<InternalFrame> NativeStackSamplerMac::RecordStackFrames(
StackBuffer* stack_buffer,
- StackSamplingProfiler::Sample* sample) {
+ ProfileBuilder* profile_builder) {
x86_thread_state64_t thread_state;
+ const std::vector<InternalFrame> empty_internal_frames;
+
// Copy the stack.
uintptr_t new_stack_top = 0;
@@ -568,21 +428,21 @@ void NativeStackSamplerMac::SuspendThreadAndRecordStack(
// default heap acquired by the target thread before it was suspended.
ScopedSuspendThread suspend_thread(thread_port_);
if (!suspend_thread.was_successful())
- return;
+ return empty_internal_frames;
if (!GetThreadState(thread_port_, &thread_state))
- return;
- uintptr_t stack_top =
- reinterpret_cast<uintptr_t>(thread_stack_base_address_);
+ return empty_internal_frames;
+
+ auto stack_top = reinterpret_cast<uintptr_t>(thread_stack_base_address_);
uintptr_t stack_bottom = thread_state.__rsp;
if (stack_bottom >= stack_top)
- return;
- uintptr_t stack_size = stack_top - stack_bottom;
+ return empty_internal_frames;
+ uintptr_t stack_size = stack_top - stack_bottom;
if (stack_size > stack_buffer->size())
- return;
+ return empty_internal_frames;
- (*annotator_)(sample);
+ profile_builder->RecordAnnotations();
CopyStackAndRewritePointers(
reinterpret_cast<uintptr_t*>(stack_buffer->buffer()),
@@ -600,19 +460,16 @@ void NativeStackSamplerMac::SuspendThreadAndRecordStack(
// Reserve enough memory for most stacks, to avoid repeated allocations.
// Approximately 99.9% of recorded stacks are 128 frames or fewer.
- sample->frames.reserve(128);
-
- auto* current_modules = current_modules_;
- auto* profile_module_index = &profile_module_index_;
+ std::vector<InternalFrame> internal_frames;
+ internal_frames.reserve(128);
// Avoid an out-of-bounds read bug in libunwind that can crash us in some
// circumstances. If we're subject to that case, just record the first frame
// and bail. See MayTriggerUnwInitLocalCrash for details.
uintptr_t rip = thread_state.__rip;
if (MayTriggerUnwInitLocalCrash(rip)) {
- sample->frames.emplace_back(
- rip, GetModuleIndex(rip, current_modules, profile_module_index));
- return;
+ internal_frames.emplace_back(rip, GetInternalModule(rip));
+ return internal_frames;
}
const auto continue_predicate = [this,
@@ -631,22 +488,142 @@ void NativeStackSamplerMac::SuspendThreadAndRecordStack(
return HasValidRbp(unwind_cursor, new_stack_top);
};
- WalkStack(thread_state, current_modules, profile_module_index,
- [sample, current_modules, profile_module_index](
- uintptr_t frame_ip, size_t module_index) {
- sample->frames.emplace_back(frame_ip, module_index);
- },
- continue_predicate);
+ WalkStack(
+ thread_state,
+ [&internal_frames](uintptr_t frame_ip, InternalModule internal_module) {
+ internal_frames.emplace_back(frame_ip, std::move(internal_module));
+ },
+ continue_predicate);
+
+ return internal_frames;
+}
+
+InternalModule NativeStackSamplerMac::GetInternalModule(
+ uintptr_t instruction_pointer) {
+ // Check if |instruction_pointer| is in the address range of a module we've
+ // already seen.
+ auto loc =
+ std::find_if(module_cache_entry_.begin(), module_cache_entry_.end(),
+ [instruction_pointer](const ModuleCacheEntry& entry) {
+ return instruction_pointer >= entry.base_address &&
+ instruction_pointer < entry.end_address;
+ });
+ if (loc != module_cache_entry_.end())
+ return loc->internal_module;
+
+ Dl_info inf;
+ if (!dladdr(reinterpret_cast<const void*>(instruction_pointer), &inf))
+ return InternalModule();
+
+ auto base_module_address = reinterpret_cast<uintptr_t>(inf.dli_fbase);
+
+ InternalModule internal_module(
+ base_module_address, GetUniqueId(inf.dli_fbase), FilePath(inf.dli_fname));
+
+ module_cache_entry_.emplace_back(
+ base_module_address,
+ base_module_address + GetModuleTextSize(inf.dli_fbase), internal_module);
+
+ return internal_module;
+}
+
+template <typename StackFrameCallback, typename ContinueUnwindPredicate>
+bool NativeStackSamplerMac::WalkStackFromContext(
+ unw_context_t* unwind_context,
+ size_t* frame_count,
+ const StackFrameCallback& callback,
+ const ContinueUnwindPredicate& continue_unwind) {
+ unw_cursor_t unwind_cursor;
+ unw_init_local(&unwind_cursor, unwind_context);
+
+ int step_result;
+ unw_word_t rip;
+ do {
+ ++(*frame_count);
+ unw_get_reg(&unwind_cursor, UNW_REG_IP, &rip);
+
+ // Ensure IP is in a module.
+ //
+ // Frameless unwinding (non-DWARF) works by fetching the function's stack
+ // size from the unwind encoding or stack, and adding it to the stack
+ // pointer to determine the function's return address.
+ //
+ // If we're in a function prologue or epilogue, the actual stack size may be
+ // smaller than it will be during the normal course of execution. When
+ // libunwind adds the expected stack size, it will look for the return
+ // address in the wrong place. This check should ensure that we bail before
+ // trying to deref a bad IP obtained this way in the previous frame.
+ InternalModule internal_module = GetInternalModule(rip);
+ if (!internal_module.is_valid)
+ return false;
+
+ callback(static_cast<uintptr_t>(rip), internal_module);
+
+ if (!continue_unwind(&unwind_cursor))
+ return false;
+
+ step_result = unw_step(&unwind_cursor);
+ } while (step_result > 0);
+
+ if (step_result != 0)
+ return false;
+
+ return true;
+}
+
+template <typename StackFrameCallback, typename ContinueUnwindPredicate>
+void NativeStackSamplerMac::WalkStack(
+ const x86_thread_state64_t& thread_state,
+ const StackFrameCallback& callback,
+ const ContinueUnwindPredicate& continue_unwind) {
+ size_t frame_count = 0;
+ // This uses libunwind to walk the stack. libunwind is designed to be used for
+ // a thread to walk its own stack. This creates two problems.
+
+ // Problem 1: There is no official way to create a unw_context other than to
+ // create it from the current state of the current thread's stack. To get
+ // around this, forge a context. A unw_context is just a copy of the 16 main
+ // registers followed by the instruction pointer, nothing more.
+ // Coincidentally, the first 17 items of the x86_thread_state64_t type are
+ // exactly those registers in exactly the same order, so just bulk copy them
+ // over.
+ unw_context_t unwind_context;
+ memcpy(&unwind_context, &thread_state, sizeof(uintptr_t) * 17);
+ bool result = WalkStackFromContext(&unwind_context, &frame_count, callback,
+ continue_unwind);
+
+ if (!result)
+ return;
+
+ if (frame_count == 1) {
+ // Problem 2: Because libunwind is designed to be triggered by user code on
+ // their own thread, if it hits a library that has no unwind info for the
+ // function that is being executed, it just stops. This isn't a problem in
+ // the normal case, but in this case, it's quite possible that the stack
+ // being walked is stopped in a function that bridges to the kernel and thus
+ // is missing the unwind info.
+
+ // For now, just unwind the single case where the thread is stopped in a
+ // function in libsystem_kernel.
+ uint64_t& rsp = unwind_context.data[7];
+ uint64_t& rip = unwind_context.data[16];
+ Dl_info info;
+ if (dladdr(reinterpret_cast<void*>(rip), &info) != 0 &&
+ strcmp(info.dli_fname, LibSystemKernelName()) == 0) {
+ rip = *reinterpret_cast<uint64_t*>(rsp);
+ rsp += 8;
+ WalkStackFromContext(&unwind_context, &frame_count, callback,
+ continue_unwind);
+ }
+ }
}
} // namespace
std::unique_ptr<NativeStackSampler> NativeStackSampler::Create(
PlatformThreadId thread_id,
- AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate) {
- return std::make_unique<NativeStackSamplerMac>(thread_id, annotator,
- test_delegate);
+ return std::make_unique<NativeStackSamplerMac>(thread_id, test_delegate);
}
size_t NativeStackSampler::GetStackBufferSize() {
diff --git a/chromium/base/profiler/native_stack_sampler_posix.cc b/chromium/base/profiler/native_stack_sampler_posix.cc
index 1055d44e930..fdc18e017ff 100644
--- a/chromium/base/profiler/native_stack_sampler_posix.cc
+++ b/chromium/base/profiler/native_stack_sampler_posix.cc
@@ -8,7 +8,6 @@ namespace base {
std::unique_ptr<NativeStackSampler> NativeStackSampler::Create(
PlatformThreadId thread_id,
- AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate) {
return std::unique_ptr<NativeStackSampler>();
}
diff --git a/chromium/base/profiler/native_stack_sampler_win.cc b/chromium/base/profiler/native_stack_sampler_win.cc
index b53197d0dc7..b9b1773e896 100644
--- a/chromium/base/profiler/native_stack_sampler_win.cc
+++ b/chromium/base/profiler/native_stack_sampler_win.cc
@@ -4,8 +4,9 @@
#include "base/profiler/native_stack_sampler.h"
-#include <objbase.h>
#include <windows.h>
+
+#include <objbase.h>
#include <stddef.h>
#include <winternl.h>
@@ -20,6 +21,8 @@
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/profiler/win32_stack_frame_unwinder.h"
+#include "base/stl_util.h"
+#include "base/strings/string16.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
@@ -29,6 +32,12 @@
namespace base {
+using Frame = StackSamplingProfiler::Frame;
+using InternalFrame = StackSamplingProfiler::InternalFrame;
+using Module = StackSamplingProfiler::Module;
+using InternalModule = StackSamplingProfiler::InternalModule;
+using ProfileBuilder = StackSamplingProfiler::ProfileBuilder;
+
// Stack recording functions --------------------------------------------------
namespace {
@@ -59,21 +68,18 @@ const TEB* GetThreadEnvironmentBlock(HANDLE thread_handle) {
};
using NtQueryInformationThreadFunction =
- NTSTATUS (WINAPI*)(HANDLE, THREAD_INFORMATION_CLASS, PVOID, ULONG,
- PULONG);
+ NTSTATUS(WINAPI*)(HANDLE, THREAD_INFORMATION_CLASS, PVOID, ULONG, PULONG);
- const NtQueryInformationThreadFunction nt_query_information_thread =
- reinterpret_cast<NtQueryInformationThreadFunction>(
- ::GetProcAddress(::GetModuleHandle(L"ntdll.dll"),
- "NtQueryInformationThread"));
+ const auto nt_query_information_thread =
+ reinterpret_cast<NtQueryInformationThreadFunction>(::GetProcAddress(
+ ::GetModuleHandle(L"ntdll.dll"), "NtQueryInformationThread"));
if (!nt_query_information_thread)
return nullptr;
THREAD_BASIC_INFORMATION basic_info = {0};
- NTSTATUS status =
- nt_query_information_thread(thread_handle, ThreadBasicInformation,
- &basic_info, sizeof(THREAD_BASIC_INFORMATION),
- nullptr);
+ NTSTATUS status = nt_query_information_thread(
+ thread_handle, ThreadBasicInformation, &basic_info,
+ sizeof(THREAD_BASIC_INFORMATION), nullptr);
if (status != 0)
return nullptr;
@@ -83,9 +89,11 @@ const TEB* GetThreadEnvironmentBlock(HANDLE thread_handle) {
#if defined(_WIN64)
// If the value at |pointer| points to the original stack, rewrite it to point
// to the corresponding location in the copied stack.
-void RewritePointerIfInOriginalStack(uintptr_t top, uintptr_t bottom,
- void* stack_copy, const void** pointer) {
- const uintptr_t value = reinterpret_cast<uintptr_t>(*pointer);
+void RewritePointerIfInOriginalStack(uintptr_t top,
+ uintptr_t bottom,
+ void* stack_copy,
+ const void** pointer) {
+ const auto value = reinterpret_cast<uintptr_t>(*pointer);
if (value >= bottom && value < top) {
*pointer = reinterpret_cast<const void*>(
static_cast<unsigned char*>(stack_copy) + (value - bottom));
@@ -122,23 +130,17 @@ void CopyMemoryFromStack(void* to, const void* from, size_t length)
// Note: this function must not access memory in the original stack as it may
// have been changed or deallocated by this point. This is why |top| and
// |bottom| are passed as uintptr_t.
-void RewritePointersToStackMemory(uintptr_t top, uintptr_t bottom,
- CONTEXT* context, void* stack_copy) {
+void RewritePointersToStackMemory(uintptr_t top,
+ uintptr_t bottom,
+ CONTEXT* context,
+ void* stack_copy) {
#if defined(_WIN64)
- DWORD64 CONTEXT::* const nonvolatile_registers[] = {
- &CONTEXT::R12,
- &CONTEXT::R13,
- &CONTEXT::R14,
- &CONTEXT::R15,
- &CONTEXT::Rdi,
- &CONTEXT::Rsi,
- &CONTEXT::Rbx,
- &CONTEXT::Rbp,
- &CONTEXT::Rsp
- };
+ DWORD64 CONTEXT::*const nonvolatile_registers[] = {
+ &CONTEXT::R12, &CONTEXT::R13, &CONTEXT::R14, &CONTEXT::R15, &CONTEXT::Rdi,
+ &CONTEXT::Rsi, &CONTEXT::Rbx, &CONTEXT::Rbp, &CONTEXT::Rsp};
// Rewrite pointers in the context.
- for (size_t i = 0; i < arraysize(nonvolatile_registers); ++i) {
+ for (size_t i = 0; i < size(nonvolatile_registers); ++i) {
DWORD64* const reg = &(context->*nonvolatile_registers[i]);
RewritePointerIfInOriginalStack(top, bottom, stack_copy,
reinterpret_cast<const void**>(reg));
@@ -159,8 +161,7 @@ struct RecordedFrame {
RecordedFrame(RecordedFrame&& other)
: instruction_pointer(other.instruction_pointer),
- module(std::move(other.module)) {
- }
+ module(std::move(other.module)) {}
RecordedFrame& operator=(RecordedFrame&& other) {
instruction_pointer = other.instruction_pointer;
@@ -220,14 +221,14 @@ std::string GetBuildIDForModule(HMODULE module_handle) {
DWORD age;
win::PEImage(module_handle).GetDebugId(&guid, &age, /* pdb_file= */ nullptr);
const int kGUIDSize = 39;
- std::wstring build_id;
+ string16 build_id;
int result =
::StringFromGUID2(guid, WriteInto(&build_id, kGUIDSize), kGUIDSize);
if (result != kGUIDSize)
return std::string();
RemoveChars(build_id, L"{}-", &build_id);
build_id += StringPrintf(L"%d", age);
- return WideToUTF8(build_id);
+ return UTF16ToUTF8(build_id);
}
// ScopedDisablePriorityBoost -------------------------------------------------
@@ -315,8 +316,7 @@ ScopedSuspendThread::~ScopedSuspendThread() {
bool PointsToGuardPage(uintptr_t stack_pointer) {
MEMORY_BASIC_INFORMATION memory_info;
SIZE_T result = ::VirtualQuery(reinterpret_cast<LPCVOID>(stack_pointer),
- &memory_info,
- sizeof(memory_info));
+ &memory_info, sizeof(memory_info));
return result != 0 && (memory_info.Protect & PAGE_GUARD);
}
@@ -333,8 +333,7 @@ void SuspendThreadAndRecordStack(
void* stack_copy_buffer,
size_t stack_copy_buffer_size,
std::vector<RecordedFrame>* stack,
- NativeStackSampler::AnnotateCallback annotator,
- StackSamplingProfiler::Sample* sample,
+ ProfileBuilder* profile_builder,
NativeStackSamplerTestDelegate* test_delegate) {
DCHECK(stack->empty());
@@ -343,7 +342,7 @@ void SuspendThreadAndRecordStack(
// The stack bounds are saved to uintptr_ts for use outside
// ScopedSuspendThread, as the thread's memory is not safe to dereference
// beyond that point.
- const uintptr_t top = reinterpret_cast<uintptr_t>(base_address);
+ const auto top = reinterpret_cast<uintptr_t>(base_address);
uintptr_t bottom = 0u;
{
@@ -369,7 +368,7 @@ void SuspendThreadAndRecordStack(
if (PointsToGuardPage(bottom))
return;
- (*annotator)(sample);
+ profile_builder->RecordAnnotations();
CopyMemoryFromStack(stack_copy_buffer,
reinterpret_cast<const void*>(bottom), top - bottom);
@@ -388,163 +387,126 @@ void SuspendThreadAndRecordStack(
class NativeStackSamplerWin : public NativeStackSampler {
public:
NativeStackSamplerWin(win::ScopedHandle thread_handle,
- AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate);
~NativeStackSamplerWin() override;
// StackSamplingProfiler::NativeStackSampler:
- void ProfileRecordingStarting(
- std::vector<StackSamplingProfiler::Module>* modules) override;
- void RecordStackSample(StackBuffer* stack_buffer,
- StackSamplingProfiler::Sample* sample) override;
- void ProfileRecordingStopped(StackBuffer* stack_buffer) override;
+ void ProfileRecordingStarting() override;
+ std::vector<InternalFrame> RecordStackFrames(
+ StackBuffer* stack_buffer,
+ ProfileBuilder* profile_builder) override;
private:
// Attempts to query the module filename, base address, and id for
- // |module_handle|, and store them in |module|. Returns true if it succeeded.
- static bool GetModuleForHandle(HMODULE module_handle,
- StackSamplingProfiler::Module* module);
-
- // Gets the index for the Module corresponding to |module_handle| in
- // |modules|, adding it if it's not already present. Returns
- // StackSamplingProfiler::Frame::kUnknownModuleIndex if no Module can be
- // determined for |module|.
- size_t GetModuleIndex(HMODULE module_handle,
- std::vector<StackSamplingProfiler::Module>* modules);
-
- // Copies the information represented by |stack| into |sample| and |modules|.
- void CopyToSample(const std::vector<RecordedFrame>& stack,
- StackSamplingProfiler::Sample* sample,
- std::vector<StackSamplingProfiler::Module>* modules);
+ // |module_handle|, and returns them in an InternalModule object.
+ static InternalModule GetModuleForHandle(HMODULE module_handle);
- win::ScopedHandle thread_handle_;
+ // Creates a set of internal frames with the information represented by
+ // |stack|.
+ std::vector<InternalFrame> CreateInternalFrames(
+ const std::vector<RecordedFrame>& stack);
- const AnnotateCallback annotator_;
+ win::ScopedHandle thread_handle_;
NativeStackSamplerTestDelegate* const test_delegate_;
// The stack base address corresponding to |thread_handle_|.
const void* const thread_stack_base_address_;
- // Weak. Points to the modules associated with the profile being recorded
- // between ProfileRecordingStarting() and ProfileRecordingStopped().
- std::vector<StackSamplingProfiler::Module>* current_modules_;
-
- // Maps a module handle to the corresponding Module's index within
- // current_modules_.
- std::map<HMODULE, size_t> profile_module_index_;
+ // The internal module objects, indexed by the module handle.
+ std::map<HMODULE, InternalModule> module_cache_;
DISALLOW_COPY_AND_ASSIGN(NativeStackSamplerWin);
};
NativeStackSamplerWin::NativeStackSamplerWin(
win::ScopedHandle thread_handle,
- AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate)
: thread_handle_(thread_handle.Take()),
- annotator_(annotator),
test_delegate_(test_delegate),
thread_stack_base_address_(
- GetThreadEnvironmentBlock(thread_handle_.Get())->Tib.StackBase) {
- DCHECK(annotator_);
-}
+ GetThreadEnvironmentBlock(thread_handle_.Get())->Tib.StackBase) {}
-NativeStackSamplerWin::~NativeStackSamplerWin() {
-}
+NativeStackSamplerWin::~NativeStackSamplerWin() {}
-void NativeStackSamplerWin::ProfileRecordingStarting(
- std::vector<StackSamplingProfiler::Module>* modules) {
- current_modules_ = modules;
- profile_module_index_.clear();
+void NativeStackSamplerWin::ProfileRecordingStarting() {
+ module_cache_.clear();
}
-void NativeStackSamplerWin::RecordStackSample(
+std::vector<InternalFrame> NativeStackSamplerWin::RecordStackFrames(
StackBuffer* stack_buffer,
- StackSamplingProfiler::Sample* sample) {
+ ProfileBuilder* profile_builder) {
DCHECK(stack_buffer);
- DCHECK(current_modules_);
std::vector<RecordedFrame> stack;
SuspendThreadAndRecordStack(thread_handle_.Get(), thread_stack_base_address_,
stack_buffer->buffer(), stack_buffer->size(),
- &stack, annotator_, sample, test_delegate_);
- CopyToSample(stack, sample, current_modules_);
-}
+ &stack, profile_builder, test_delegate_);
-void NativeStackSamplerWin::ProfileRecordingStopped(StackBuffer* stack_buffer) {
- current_modules_ = nullptr;
+ return CreateInternalFrames(stack);
}
// static
-bool NativeStackSamplerWin::GetModuleForHandle(
- HMODULE module_handle,
- StackSamplingProfiler::Module* module) {
+InternalModule NativeStackSamplerWin::GetModuleForHandle(
+ HMODULE module_handle) {
wchar_t module_name[MAX_PATH];
DWORD result_length =
- GetModuleFileName(module_handle, module_name, arraysize(module_name));
+ ::GetModuleFileName(module_handle, module_name, size(module_name));
if (result_length == 0)
- return false;
+ return InternalModule();
- module->filename = base::FilePath(module_name);
+ const std::string& module_id = GetBuildIDForModule(module_handle);
+ if (module_id.empty())
+ return InternalModule();
- module->base_address = reinterpret_cast<uintptr_t>(module_handle);
+ return InternalModule(reinterpret_cast<uintptr_t>(module_handle), module_id,
+ FilePath(module_name));
+}
- module->id = GetBuildIDForModule(module_handle);
- if (module->id.empty())
- return false;
+std::vector<InternalFrame> NativeStackSamplerWin::CreateInternalFrames(
+ const std::vector<RecordedFrame>& stack) {
+ std::vector<InternalFrame> internal_frames;
+ internal_frames.reserve(stack.size());
- return true;
-}
+ for (const auto& frame : stack) {
+ auto frame_ip = reinterpret_cast<uintptr_t>(frame.instruction_pointer);
-size_t NativeStackSamplerWin::GetModuleIndex(
- HMODULE module_handle,
- std::vector<StackSamplingProfiler::Module>* modules) {
- if (!module_handle)
- return StackSamplingProfiler::Frame::kUnknownModuleIndex;
-
- auto loc = profile_module_index_.find(module_handle);
- if (loc == profile_module_index_.end()) {
- StackSamplingProfiler::Module module;
- if (!GetModuleForHandle(module_handle, &module))
- return StackSamplingProfiler::Frame::kUnknownModuleIndex;
- modules->push_back(module);
- loc = profile_module_index_.insert(std::make_pair(
- module_handle, modules->size() - 1)).first;
- }
+ HMODULE module_handle = frame.module.Get();
+ if (!module_handle) {
+ internal_frames.emplace_back(frame_ip, InternalModule());
+ continue;
+ }
- return loc->second;
-}
+ auto loc = module_cache_.find(module_handle);
+ if (loc != module_cache_.end()) {
+ internal_frames.emplace_back(frame_ip, loc->second);
+ continue;
+ }
-void NativeStackSamplerWin::CopyToSample(
- const std::vector<RecordedFrame>& stack,
- StackSamplingProfiler::Sample* sample,
- std::vector<StackSamplingProfiler::Module>* modules) {
- sample->frames.clear();
- sample->frames.reserve(stack.size());
-
- for (const RecordedFrame& frame : stack) {
- sample->frames.push_back(StackSamplingProfiler::Frame(
- reinterpret_cast<uintptr_t>(frame.instruction_pointer),
- GetModuleIndex(frame.module.Get(), modules)));
+ InternalModule internal_module = GetModuleForHandle(module_handle);
+ if (internal_module.is_valid)
+ module_cache_.insert(std::make_pair(module_handle, internal_module));
+
+ internal_frames.emplace_back(frame_ip, std::move(internal_module));
}
+
+ return internal_frames;
}
} // namespace
std::unique_ptr<NativeStackSampler> NativeStackSampler::Create(
PlatformThreadId thread_id,
- AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate) {
#if _WIN64
// Get the thread's handle.
HANDLE thread_handle = ::OpenThread(
THREAD_GET_CONTEXT | THREAD_SUSPEND_RESUME | THREAD_QUERY_INFORMATION,
- FALSE,
- thread_id);
+ FALSE, thread_id);
if (thread_handle) {
return std::unique_ptr<NativeStackSampler>(new NativeStackSamplerWin(
- win::ScopedHandle(thread_handle), annotator, test_delegate));
+ win::ScopedHandle(thread_handle), test_delegate));
}
#endif
return std::unique_ptr<NativeStackSampler>();
diff --git a/chromium/base/profiler/stack_sampling_profiler.cc b/chromium/base/profiler/stack_sampling_profiler.cc
index a8cddf08650..02df814f8cc 100644
--- a/chromium/base/profiler/stack_sampling_profiler.cc
+++ b/chromium/base/profiler/stack_sampling_profiler.cc
@@ -5,7 +5,6 @@
#include "base/profiler/stack_sampling_profiler.h"
#include <algorithm>
-#include <map>
#include <utility>
#include "base/atomic_sequence_num.h"
@@ -13,7 +12,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/callback.h"
-#include "base/lazy_instance.h"
#include "base/location.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
@@ -27,6 +25,8 @@
namespace base {
+const size_t kUnknownModuleIndex = static_cast<size_t>(-1);
+
namespace {
// This value is used to initialize the WaitableEvent object. This MUST BE set
@@ -37,29 +37,14 @@ constexpr WaitableEvent::ResetPolicy kResetPolicy =
// This value is used when there is no collection in progress and thus no ID
// for referencing the active collection to the SamplingThread.
-const int NULL_PROFILER_ID = -1;
-
-void ChangeAtomicFlags(subtle::Atomic32* flags,
- subtle::Atomic32 set,
- subtle::Atomic32 clear) {
- DCHECK(set != 0 || clear != 0);
- DCHECK_EQ(0, set & clear);
-
- subtle::Atomic32 bits = subtle::NoBarrier_Load(flags);
- while (true) {
- subtle::Atomic32 existing =
- subtle::NoBarrier_CompareAndSwap(flags, bits, (bits | set) & ~clear);
- if (existing == bits)
- break;
- bits = existing;
- }
-}
+const int kNullProfilerId = -1;
} // namespace
// StackSamplingProfiler::Module ----------------------------------------------
StackSamplingProfiler::Module::Module() : base_address(0u) {}
+
StackSamplingProfiler::Module::Module(uintptr_t base_address,
const std::string& id,
const FilePath& filename)
@@ -67,6 +52,17 @@ StackSamplingProfiler::Module::Module(uintptr_t base_address,
StackSamplingProfiler::Module::~Module() = default;
+// StackSamplingProfiler::InternalModule --------------------------------------
+
+StackSamplingProfiler::InternalModule::InternalModule() : is_valid(false) {}
+
+StackSamplingProfiler::InternalModule::InternalModule(uintptr_t base_address,
+ const std::string& id,
+ const FilePath& filename)
+ : base_address(base_address), id(id), filename(filename), is_valid(true) {}
+
+StackSamplingProfiler::InternalModule::~InternalModule() = default;
+
// StackSamplingProfiler::Frame -----------------------------------------------
StackSamplingProfiler::Frame::Frame(uintptr_t instruction_pointer,
@@ -76,8 +72,17 @@ StackSamplingProfiler::Frame::Frame(uintptr_t instruction_pointer,
StackSamplingProfiler::Frame::~Frame() = default;
StackSamplingProfiler::Frame::Frame()
- : instruction_pointer(0), module_index(kUnknownModuleIndex) {
-}
+ : instruction_pointer(0), module_index(kUnknownModuleIndex) {}
+
+// StackSamplingProfiler::InternalFrame -------------------------------------
+
+StackSamplingProfiler::InternalFrame::InternalFrame(
+ uintptr_t instruction_pointer,
+ InternalModule internal_module)
+ : instruction_pointer(instruction_pointer),
+ internal_module(std::move(internal_module)) {}
+
+StackSamplingProfiler::InternalFrame::~InternalFrame() = default;
// StackSamplingProfiler::Sample ----------------------------------------------
@@ -145,61 +150,58 @@ class StackSamplingProfiler::SamplingThread : public Thread {
};
struct CollectionContext {
- CollectionContext(int profiler_id,
- PlatformThreadId target,
+ CollectionContext(PlatformThreadId target,
const SamplingParams& params,
- const CompletedCallback& callback,
WaitableEvent* finished,
- std::unique_ptr<NativeStackSampler> sampler)
- : profiler_id(profiler_id),
+ std::unique_ptr<NativeStackSampler> sampler,
+ std::unique_ptr<ProfileBuilder> profile_builder)
+ : collection_id(next_collection_id.GetNext()),
target(target),
params(params),
- callback(callback),
finished(finished),
- native_sampler(std::move(sampler)) {}
+ native_sampler(std::move(sampler)),
+ profile_builder(std::move(profile_builder)) {}
~CollectionContext() = default;
- // An identifier for the profiler associated with this collection, used to
- // uniquely identify the collection to outside interests.
- const int profiler_id;
+ // An identifier for this collection, used to uniquely identify the
+ // collection to outside interests.
+ const int collection_id;
- const PlatformThreadId target; // ID of The thread being sampled.
- const SamplingParams params; // Information about how to sample.
- const CompletedCallback callback; // Callback made when sampling complete.
- WaitableEvent* const finished; // Signaled when all sampling complete.
+ const PlatformThreadId target; // ID of The thread being sampled.
+ const SamplingParams params; // Information about how to sample.
+ WaitableEvent* const finished; // Signaled when all sampling complete.
// Platform-specific module that does the actual sampling.
std::unique_ptr<NativeStackSampler> native_sampler;
+ // Receives the sampling data and builds a CallStackProfile.
+ std::unique_ptr<ProfileBuilder> profile_builder;
+
// The absolute time for the next sample.
Time next_sample_time;
// The time that a profile was started, for calculating the total duration.
Time profile_start_time;
- // Counters that indicate the current position along the acquisition.
- int burst = 0;
- int sample = 0;
-
- // The collected stack samples. The active profile is always at the back().
- CallStackProfiles profiles;
+ // Counter that indicates the current sample position along the acquisition.
+ int sample_count = 0;
- // Sequence number for generating new profiler ids.
- static AtomicSequenceNumber next_profiler_id;
+ // Sequence number for generating new collection ids.
+ static AtomicSequenceNumber next_collection_id;
};
// Gets the single instance of this class.
static SamplingThread* GetInstance();
// Adds a new CollectionContext to the thread. This can be called externally
- // from any thread. This returns an ID that can later be used to stop
- // the sampling.
+ // from any thread. This returns a collection id that can later be used to
+ // stop the sampling.
int Add(std::unique_ptr<CollectionContext> collection);
- // Removes an active collection based on its ID, forcing it to run its
- // callback if any data has been collected. This can be called externally
+ // Removes an active collection based on its collection id, forcing it to run
+ // its callback if any data has been collected. This can be called externally
// from any thread.
- void Remove(int id);
+ void Remove(int collection_id);
private:
friend class TestAPI;
@@ -235,28 +237,21 @@ class StackSamplingProfiler::SamplingThread : public Thread {
// Get task runner that is usable from the sampling thread itself.
scoped_refptr<SingleThreadTaskRunner> GetTaskRunnerOnSamplingThread();
- // Finishes a collection and reports collected data via callback. The
- // collection's |finished| waitable event will be signalled. The |collection|
- // should already have been removed from |active_collections_| by the caller,
- // as this is needed to avoid flakyness in unit tests.
+ // Finishes a collection. The collection's |finished| waitable event will be
+ // signalled. The |collection| should already have been removed from
+ // |active_collections_| by the caller, as this is needed to avoid flakiness
+ // in unit tests.
void FinishCollection(CollectionContext* collection);
- // Records a single sample of a collection.
- void RecordSample(CollectionContext* collection);
-
// Check if the sampling thread is idle and begin a shutdown if it is.
void ScheduleShutdownIfIdle();
// These methods are tasks that get posted to the internal message queue.
void AddCollectionTask(std::unique_ptr<CollectionContext> collection);
- void RemoveCollectionTask(int id);
- void PerformCollectionTask(int id);
+ void RemoveCollectionTask(int collection_id);
+ void RecordSampleTask(int collection_id);
void ShutdownTask(int add_events);
- // Updates the |next_sample_time| time based on configured parameters.
- // Returns true if there is a next sample or false if sampling is complete.
- bool UpdateNextSampleTime(CollectionContext* collection);
-
// Thread:
void CleanUp() override;
@@ -265,10 +260,10 @@ class StackSamplingProfiler::SamplingThread : public Thread {
// that take it are not called concurrently.
std::unique_ptr<NativeStackSampler::StackBuffer> stack_buffer_;
- // A map of IDs to collection contexts. Because this class is a singleton
- // that is never destroyed, context objects will never be destructed except
- // by explicit action. Thus, it's acceptable to pass unretained pointers
- // to these objects when posting tasks.
+ // A map of collection ids to collection contexts. Because this class is a
+ // singleton that is never destroyed, context objects will never be destructed
+ // except by explicit action. Thus, it's acceptable to pass unretained
+ // pointers to these objects when posting tasks.
std::map<int, std::unique_ptr<CollectionContext>> active_collections_;
// State maintained about the current execution (or non-execution) of
@@ -285,7 +280,7 @@ class StackSamplingProfiler::SamplingThread : public Thread {
// A counter that notes adds of new collection requests. It is incremented
// when changes occur so that delayed shutdown tasks are able to detect if
- // samething new has happened while it was waiting. Like all "execution_state"
+ // something new has happened while it was waiting. Like all "execution_state"
// vars, this must be accessed while holding |thread_execution_state_lock_|.
int thread_execution_state_add_events_ = 0;
@@ -371,8 +366,8 @@ void StackSamplingProfiler::SamplingThread::TestAPI::ShutdownTaskAndSignalEvent(
event->Signal();
}
-AtomicSequenceNumber
- StackSamplingProfiler::SamplingThread::CollectionContext::next_profiler_id;
+AtomicSequenceNumber StackSamplingProfiler::SamplingThread::CollectionContext::
+ next_collection_id;
StackSamplingProfiler::SamplingThread::SamplingThread()
: Thread("StackSamplingProfiler") {}
@@ -388,7 +383,7 @@ int StackSamplingProfiler::SamplingThread::Add(
std::unique_ptr<CollectionContext> collection) {
// This is not to be run on the sampling thread.
- int id = collection->profiler_id;
+ int collection_id = collection->collection_id;
scoped_refptr<SingleThreadTaskRunner> task_runner =
GetOrCreateTaskRunnerForAdd();
@@ -396,10 +391,10 @@ int StackSamplingProfiler::SamplingThread::Add(
FROM_HERE, BindOnce(&SamplingThread::AddCollectionTask, Unretained(this),
std::move(collection)));
- return id;
+ return collection_id;
}
-void StackSamplingProfiler::SamplingThread::Remove(int id) {
+void StackSamplingProfiler::SamplingThread::Remove(int collection_id) {
// This is not to be run on the sampling thread.
ThreadExecutionState state;
@@ -411,9 +406,9 @@ void StackSamplingProfiler::SamplingThread::Remove(int id) {
// This can fail if the thread were to exit between acquisition of the task
// runner above and the call below. In that case, however, everything has
// stopped so there's no need to try to stop it.
- task_runner->PostTask(
- FROM_HERE,
- BindOnce(&SamplingThread::RemoveCollectionTask, Unretained(this), id));
+ task_runner->PostTask(FROM_HERE,
+ BindOnce(&SamplingThread::RemoveCollectionTask,
+ Unretained(this), collection_id));
}
scoped_refptr<SingleThreadTaskRunner>
@@ -496,61 +491,18 @@ StackSamplingProfiler::SamplingThread::GetTaskRunnerOnSamplingThread() {
void StackSamplingProfiler::SamplingThread::FinishCollection(
CollectionContext* collection) {
DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
- DCHECK_EQ(0u, active_collections_.count(collection->profiler_id));
-
- // If there is no duration for the final profile (because it was stopped),
- // calculate it now.
- if (!collection->profiles.empty() &&
- collection->profiles.back().profile_duration == TimeDelta()) {
- collection->profiles.back().profile_duration =
- Time::Now() - collection->profile_start_time +
- collection->params.sampling_interval;
- }
+ DCHECK_EQ(0u, active_collections_.count(collection->collection_id));
- // Extract some information so callback and event-signalling can still be
- // done after the collection has been removed from the list of "active" ones.
- // This allows the the controlling object (and tests using it) to be confident
- // that collection is fully finished when those things occur.
- const CompletedCallback callback = collection->callback;
- CallStackProfiles profiles = std::move(collection->profiles);
- WaitableEvent* finished = collection->finished;
+ TimeDelta profile_duration = Time::Now() - collection->profile_start_time +
+ collection->params.sampling_interval;
- // Run the associated callback, passing the collected profiles.
- callback.Run(std::move(profiles));
+ collection->profile_builder->OnProfileCompleted(
+ profile_duration, collection->params.sampling_interval);
// Signal that this collection is finished.
- finished->Signal();
-}
+ collection->finished->Signal();
-void StackSamplingProfiler::SamplingThread::RecordSample(
- CollectionContext* collection) {
- DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
- DCHECK(collection->native_sampler);
-
- // If this is the first sample of a burst, a new Profile needs to be created
- // and filled.
- if (collection->sample == 0) {
- collection->profiles.push_back(CallStackProfile());
- CallStackProfile& profile = collection->profiles.back();
- profile.sampling_period = collection->params.sampling_interval;
- collection->profile_start_time = Time::Now();
- collection->native_sampler->ProfileRecordingStarting(&profile.modules);
- }
-
- // The currently active profile being captured.
- CallStackProfile& profile = collection->profiles.back();
-
- // Record a single sample.
- profile.samples.push_back(Sample());
- collection->native_sampler->RecordStackSample(stack_buffer_.get(),
- &profile.samples.back());
-
- // If this is the last sample of a burst, record the total time.
- if (collection->sample == collection->params.samples_per_burst - 1) {
- profile.profile_duration = Time::Now() - collection->profile_start_time +
- collection->params.sampling_interval;
- collection->native_sampler->ProfileRecordingStopped(stack_buffer_.get());
- }
+ ScheduleShutdownIfIdle();
}
void StackSamplingProfiler::SamplingThread::ScheduleShutdownIfIdle() {
@@ -577,16 +529,16 @@ void StackSamplingProfiler::SamplingThread::AddCollectionTask(
std::unique_ptr<CollectionContext> collection) {
DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
- const int profiler_id = collection->profiler_id;
+ const int collection_id = collection->collection_id;
const TimeDelta initial_delay = collection->params.initial_delay;
active_collections_.insert(
- std::make_pair(profiler_id, std::move(collection)));
+ std::make_pair(collection_id, std::move(collection)));
GetTaskRunnerOnSamplingThread()->PostDelayedTask(
FROM_HERE,
- BindOnce(&SamplingThread::PerformCollectionTask, Unretained(this),
- profiler_id),
+ BindOnce(&SamplingThread::RecordSampleTask, Unretained(this),
+ collection_id),
initial_delay);
// Another increment of "add events" serves to invalidate any pending
@@ -598,26 +550,27 @@ void StackSamplingProfiler::SamplingThread::AddCollectionTask(
}
}
-void StackSamplingProfiler::SamplingThread::RemoveCollectionTask(int id) {
+void StackSamplingProfiler::SamplingThread::RemoveCollectionTask(
+ int collection_id) {
DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
- auto found = active_collections_.find(id);
+ auto found = active_collections_.find(collection_id);
if (found == active_collections_.end())
return;
// Remove |collection| from |active_collections_|.
std::unique_ptr<CollectionContext> collection = std::move(found->second);
- size_t count = active_collections_.erase(id);
+ size_t count = active_collections_.erase(collection_id);
DCHECK_EQ(1U, count);
FinishCollection(collection.get());
- ScheduleShutdownIfIdle();
}
-void StackSamplingProfiler::SamplingThread::PerformCollectionTask(int id) {
+void StackSamplingProfiler::SamplingThread::RecordSampleTask(
+ int collection_id) {
DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
- auto found = active_collections_.find(id);
+ auto found = active_collections_.find(collection_id);
// The task won't be found if it has been stopped.
if (found == active_collections_.end())
@@ -625,34 +578,43 @@ void StackSamplingProfiler::SamplingThread::PerformCollectionTask(int id) {
CollectionContext* collection = found->second.get();
- // Handle first-run with no "next time".
- if (collection->next_sample_time == Time())
+ // If this is the first sample, the collection params need to be filled.
+ if (collection->sample_count == 0) {
+ collection->profile_start_time = Time::Now();
collection->next_sample_time = Time::Now();
+ collection->native_sampler->ProfileRecordingStarting();
+ }
- // Do the collection of a single sample.
- RecordSample(collection);
-
- // Update the time of the next sample recording.
- const bool collection_finished = !UpdateNextSampleTime(collection);
- if (!collection_finished) {
+ // Record a single sample.
+ collection->profile_builder->OnSampleCompleted(
+ collection->native_sampler->RecordStackFrames(
+ stack_buffer_.get(), collection->profile_builder.get()));
+
+ // Schedule the next sample recording if there is one.
+ if (++collection->sample_count < collection->params.samples_per_profile) {
+ // This will keep a consistent average interval between samples but will
+ // result in constant series of acquisitions, thus nearly locking out the
+ // target thread, if the interval is smaller than the time it takes to
+ // actually acquire the sample. Anything sampling that quickly is going
+ // to be a problem anyway so don't worry about it.
+ collection->next_sample_time += collection->params.sampling_interval;
bool success = GetTaskRunnerOnSamplingThread()->PostDelayedTask(
FROM_HERE,
- BindOnce(&SamplingThread::PerformCollectionTask, Unretained(this), id),
+ BindOnce(&SamplingThread::RecordSampleTask, Unretained(this),
+ collection_id),
std::max(collection->next_sample_time - Time::Now(), TimeDelta()));
DCHECK(success);
return;
}
- // Take ownership of |collection| and remove it from the map. If collection is
- // to be restarted, a new collection task will be added below.
+ // Take ownership of |collection| and remove it from the map.
std::unique_ptr<CollectionContext> owned_collection =
std::move(found->second);
- size_t count = active_collections_.erase(id);
+ size_t count = active_collections_.erase(collection_id);
DCHECK_EQ(1U, count);
// All capturing has completed so finish the collection.
FinishCollection(collection);
- ScheduleShutdownIfIdle();
}
void StackSamplingProfiler::SamplingThread::ShutdownTask(int add_events) {
@@ -689,27 +651,6 @@ void StackSamplingProfiler::SamplingThread::ShutdownTask(int add_events) {
stack_buffer_.reset();
}
-bool StackSamplingProfiler::SamplingThread::UpdateNextSampleTime(
- CollectionContext* collection) {
- // This will keep a consistent average interval between samples but will
- // result in constant series of acquisitions, thus nearly locking out the
- // target thread, if the interval is smaller than the time it takes to
- // actually acquire the sample. Anything sampling that quickly is going
- // to be a problem anyway so don't worry about it.
- if (++collection->sample < collection->params.samples_per_burst) {
- collection->next_sample_time += collection->params.sampling_interval;
- return true;
- }
-
- if (++collection->burst < collection->params.bursts) {
- collection->sample = 0;
- collection->next_sample_time += collection->params.burst_interval;
- return true;
- }
-
- return false;
-}
-
void StackSamplingProfiler::SamplingThread::CleanUp() {
DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
@@ -725,12 +666,6 @@ void StackSamplingProfiler::SamplingThread::CleanUp() {
// static
void StackSamplingProfiler::TestAPI::Reset() {
SamplingThread::TestAPI::Reset();
- ResetAnnotations();
-}
-
-// static
-void StackSamplingProfiler::TestAPI::ResetAnnotations() {
- subtle::NoBarrier_Store(&process_milestones_, 0u);
}
// static
@@ -749,30 +684,30 @@ void StackSamplingProfiler::TestAPI::PerformSamplingThreadIdleShutdown(
SamplingThread::TestAPI::ShutdownAssumingIdle(simulate_intervening_start);
}
-subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0;
-
StackSamplingProfiler::StackSamplingProfiler(
const SamplingParams& params,
- const CompletedCallback& callback,
+ std::unique_ptr<ProfileBuilder> profile_builder,
NativeStackSamplerTestDelegate* test_delegate)
- : StackSamplingProfiler(base::PlatformThread::CurrentId(),
+ : StackSamplingProfiler(PlatformThread::CurrentId(),
params,
- callback,
+ std::move(profile_builder),
test_delegate) {}
StackSamplingProfiler::StackSamplingProfiler(
PlatformThreadId thread_id,
const SamplingParams& params,
- const CompletedCallback& callback,
+ std::unique_ptr<ProfileBuilder> profile_builder,
NativeStackSamplerTestDelegate* test_delegate)
: thread_id_(thread_id),
params_(params),
- completed_callback_(callback),
+ profile_builder_(std::move(profile_builder)),
// The event starts "signaled" so code knows it's safe to start thread
// and "manual" so that it can be waited in multiple places.
profiling_inactive_(kResetPolicy, WaitableEvent::InitialState::SIGNALED),
- profiler_id_(NULL_PROFILER_ID),
- test_delegate_(test_delegate) {}
+ profiler_id_(kNullProfilerId),
+ test_delegate_(test_delegate) {
+ DCHECK(profile_builder_);
+}
StackSamplingProfiler::~StackSamplingProfiler() {
// Stop returns immediately but the shutdown runs asynchronously. There is a
@@ -794,12 +729,13 @@ StackSamplingProfiler::~StackSamplingProfiler() {
}
void StackSamplingProfiler::Start() {
- if (completed_callback_.is_null())
- return;
+ // Multiple calls to Start() for a single StackSamplingProfiler object is not
+ // allowed. If profile_builder_ is nullptr, then Start() has been called
+ // already.
+ DCHECK(profile_builder_);
std::unique_ptr<NativeStackSampler> native_sampler =
- NativeStackSampler::Create(thread_id_, &RecordAnnotations,
- test_delegate_);
+ NativeStackSampler::Create(thread_id_, test_delegate_);
if (!native_sampler)
return;
@@ -816,34 +752,17 @@ void StackSamplingProfiler::Start() {
profiling_inactive_.Wait();
profiling_inactive_.Reset();
- DCHECK_EQ(NULL_PROFILER_ID, profiler_id_);
+ DCHECK_EQ(kNullProfilerId, profiler_id_);
profiler_id_ = SamplingThread::GetInstance()->Add(
std::make_unique<SamplingThread::CollectionContext>(
- SamplingThread::CollectionContext::next_profiler_id.GetNext(),
- thread_id_, params_, completed_callback_, &profiling_inactive_,
- std::move(native_sampler)));
- DCHECK_NE(NULL_PROFILER_ID, profiler_id_);
+ thread_id_, params_, &profiling_inactive_, std::move(native_sampler),
+ std::move(profile_builder_)));
+ DCHECK_NE(kNullProfilerId, profiler_id_);
}
void StackSamplingProfiler::Stop() {
SamplingThread::GetInstance()->Remove(profiler_id_);
- profiler_id_ = NULL_PROFILER_ID;
-}
-
-// static
-void StackSamplingProfiler::SetProcessMilestone(int milestone) {
- DCHECK_LE(0, milestone);
- DCHECK_GT(static_cast<int>(sizeof(process_milestones_) * 8), milestone);
- DCHECK_EQ(0, subtle::NoBarrier_Load(&process_milestones_) & (1 << milestone));
- ChangeAtomicFlags(&process_milestones_, 1 << milestone, 0);
-}
-
-// static
-void StackSamplingProfiler::RecordAnnotations(Sample* sample) {
- // The code inside this method must not do anything that could acquire a
- // mutex, including allocating memory (which includes LOG messages) because
- // that mutex could be held by a stopped thread, thus resulting in deadlock.
- sample->process_milestones = subtle::NoBarrier_Load(&process_milestones_);
+ profiler_id_ = kNullProfilerId;
}
// StackSamplingProfiler::Frame global functions ------------------------------
@@ -851,7 +770,7 @@ void StackSamplingProfiler::RecordAnnotations(Sample* sample) {
bool operator==(const StackSamplingProfiler::Module& a,
const StackSamplingProfiler::Module& b) {
return a.base_address == b.base_address && a.id == b.id &&
- a.filename == b.filename;
+ a.filename == b.filename;
}
bool operator==(const StackSamplingProfiler::Sample& a,
@@ -866,25 +785,24 @@ bool operator!=(const StackSamplingProfiler::Sample& a,
bool operator<(const StackSamplingProfiler::Sample& a,
const StackSamplingProfiler::Sample& b) {
- if (a.process_milestones < b.process_milestones)
- return true;
- if (a.process_milestones > b.process_milestones)
- return false;
+ if (a.process_milestones != b.process_milestones)
+ return a.process_milestones < b.process_milestones;
return a.frames < b.frames;
}
-bool operator==(const StackSamplingProfiler::Frame &a,
- const StackSamplingProfiler::Frame &b) {
+bool operator==(const StackSamplingProfiler::Frame& a,
+ const StackSamplingProfiler::Frame& b) {
return a.instruction_pointer == b.instruction_pointer &&
- a.module_index == b.module_index;
+ a.module_index == b.module_index;
}
-bool operator<(const StackSamplingProfiler::Frame &a,
- const StackSamplingProfiler::Frame &b) {
- return (a.module_index < b.module_index) ||
- (a.module_index == b.module_index &&
- a.instruction_pointer < b.instruction_pointer);
+bool operator<(const StackSamplingProfiler::Frame& a,
+ const StackSamplingProfiler::Frame& b) {
+ if (a.module_index != b.module_index)
+ return a.module_index < b.module_index;
+
+ return a.instruction_pointer < b.instruction_pointer;
}
} // namespace base
diff --git a/chromium/base/profiler/stack_sampling_profiler.h b/chromium/base/profiler/stack_sampling_profiler.h
index 2f9ade55eea..e43349a8fe0 100644
--- a/chromium/base/profiler/stack_sampling_profiler.h
+++ b/chromium/base/profiler/stack_sampling_profiler.h
@@ -7,13 +7,12 @@
#include <stddef.h>
+#include <map>
#include <memory>
#include <string>
#include <vector>
-#include "base/atomicops.h"
#include "base/base_export.h"
-#include "base/callback.h"
#include "base/files/file_path.h"
#include "base/macros.h"
#include "base/strings/string16.h"
@@ -23,7 +22,9 @@
namespace base {
-class NativeStackSampler;
+// Identifies an unknown module.
+BASE_EXPORT extern const size_t kUnknownModuleIndex;
+
class NativeStackSamplerTestDelegate;
// StackSamplingProfiler periodically stops a thread to sample its stack, for
@@ -35,34 +36,24 @@ class NativeStackSamplerTestDelegate;
//
// // Create and customize params as desired.
// base::StackStackSamplingProfiler::SamplingParams params;
-// // Any thread's ID may be passed as the target.
-// base::StackSamplingProfiler profiler(base::PlatformThread::CurrentId()),
-// params);
//
-// // Or, to process the profiles within Chrome rather than via UMA, use a
-// // custom completed callback:
-// base::StackStackSamplingProfiler::CompletedCallback
-// thread_safe_callback = ...;
+// // To process the profiles, use a custom ProfileBuilder subclass:
+// class SubProfileBuilder :
+// public base::StackSamplingProfiler::ProfileBuilder{...}
// base::StackSamplingProfiler profiler(base::PlatformThread::CurrentId()),
-// params, thread_safe_callback);
+// params, std::make_unique<SubProfileBuilder>(...));
//
// profiler.Start();
// // ... work being done on the target thread here ...
// profiler.Stop(); // optional, stops collection before complete per params
//
-// The default SamplingParams causes stacks to be recorded in a single burst at
-// a 10Hz interval for a total of 30 seconds. All of these parameters may be
+// The default SamplingParams causes stacks to be recorded in a single profile
+// at a 10Hz interval for a total of 30 seconds. All of these parameters may be
// altered as desired.
//
-// When all call stack profiles are complete, or the profiler is stopped, the
-// completed callback is called from a thread created by the profiler with the
-// collected profiles.
-//
-// The results of the profiling are passed to the completed callback and consist
-// of a vector of CallStackProfiles. Each CallStackProfile corresponds to a
-// burst as specified in SamplingParams and contains a set of Samples and
-// Modules. One Sample corresponds to a single recorded stack, and the Modules
-// record those modules associated with the recorded stack frames.
+// When a call stack profile is complete, or the profiler is stopped,
+// ProfileBuilder's OnProfileCompleted function is called from a thread created
+// by the profiler.
class BASE_EXPORT StackSamplingProfiler {
public:
// Module represents the module (DLL or exe) corresponding to a stack frame.
@@ -89,11 +80,39 @@ class BASE_EXPORT StackSamplingProfiler {
FilePath filename;
};
+ // InternalModule represents the module (DLL or exe) and its validness state.
+ // Different from Module, it has an additional field "is_valid".
+ //
+ // This struct is only used for sampling data transfer from NativeStackSampler
+ // to ProfileBuilder.
+ struct BASE_EXPORT InternalModule {
+ InternalModule();
+ InternalModule(uintptr_t base_address,
+ const std::string& id,
+ const FilePath& filename);
+ ~InternalModule();
+
+ // Points to the base address of the module.
+ uintptr_t base_address;
+
+ // An opaque binary string that uniquely identifies a particular program
+ // version with high probability. This is parsed from headers of the loaded
+ // module.
+ // For binaries generated by GNU tools:
+ // Contents of the .note.gnu.build-id field.
+ // On Windows:
+ // GUID + AGE in the debug image headers of a module.
+ std::string id;
+
+ // The filename of the module.
+ FilePath filename;
+
+ // The validness of the module.
+ bool is_valid;
+ };
+
// Frame represents an individual sampled stack frame with module information.
struct BASE_EXPORT Frame {
- // Identifies an unknown module.
- static const size_t kUnknownModuleIndex = static_cast<size_t>(-1);
-
Frame(uintptr_t instruction_pointer, size_t module_index);
~Frame();
@@ -108,6 +127,23 @@ class BASE_EXPORT StackSamplingProfiler {
size_t module_index;
};
+ // InternalFrame represents an individual sampled stack frame with full module
+ // information. This is different from Frame which only contains module index.
+ //
+ // This struct is only used for sampling data transfer from NativeStackSampler
+ // to ProfileBuilder.
+ struct BASE_EXPORT InternalFrame {
+ InternalFrame(uintptr_t instruction_pointer,
+ InternalModule internal_module);
+ ~InternalFrame();
+
+ // The sampled instruction pointer within the function.
+ uintptr_t instruction_pointer;
+
+ // The module information.
+ InternalModule internal_module;
+ };
+
// Sample represents a set of stack frames with some extra information.
struct BASE_EXPORT Sample {
Sample();
@@ -157,24 +193,15 @@ class BASE_EXPORT StackSamplingProfiler {
DISALLOW_ASSIGN(CallStackProfile);
};
- using CallStackProfiles = std::vector<CallStackProfile>;
-
// Represents parameters that configure the sampling.
struct BASE_EXPORT SamplingParams {
// Time to delay before first samples are taken.
TimeDelta initial_delay = TimeDelta::FromMilliseconds(0);
- // Number of sampling bursts to perform.
- int bursts = 1;
-
- // Interval between sampling bursts. This is the desired duration from the
- // start of one burst to the start of the next burst.
- TimeDelta burst_interval = TimeDelta::FromSeconds(10);
-
- // Number of samples to record per burst.
- int samples_per_burst = 300;
+ // Number of samples to record per profile.
+ int samples_per_profile = 300;
- // Interval between samples during a sampling burst. This is the desired
+ // Interval between samples during a sampling profile. This is the desired
// duration from the start of one sample to the start of the next sample.
TimeDelta sampling_interval = TimeDelta::FromMilliseconds(100);
};
@@ -189,9 +216,6 @@ class BASE_EXPORT StackSamplingProfiler {
// so that tests don't inherit state from previous tests.
static void Reset();
- // Resets internal annotations (like process phase) to initial values.
- static void ResetAnnotations();
-
// Returns whether the sampling thread is currently running or not.
static bool IsSamplingThreadRunning();
@@ -211,41 +235,58 @@ class BASE_EXPORT StackSamplingProfiler {
bool simulate_intervening_start);
};
- // The callback type used to collect completed profiles. The passed |profiles|
- // are move-only. Other threads, including the UI thread, may block on
- // callback completion so this should run as quickly as possible.
- //
- // IMPORTANT NOTE: The callback is invoked on a thread the profiler
- // constructs, rather than on the thread used to construct the profiler and
- // set the callback, and thus the callback must be callable on any thread. For
- // threads with message loops that create StackSamplingProfilers, posting a
- // task to the message loop with the moved (i.e. std::move) profiles is the
- // thread-safe callback implementation.
- using CompletedCallback = Callback<void(CallStackProfiles)>;
-
- // Creates a profiler for the CURRENT thread that sends completed profiles
- // to |callback|. An optional |test_delegate| can be supplied by tests.
- // The caller must ensure that this object gets destroyed before the current
- // thread exits.
+ // The ProfileBuilder interface allows the user to record profile information
+ // on the fly in whatever format is desired. Functions are invoked by the
+ // profiler on its own thread so must not block or perform expensive
+ // operations.
+ class BASE_EXPORT ProfileBuilder {
+ public:
+ ProfileBuilder() = default;
+ virtual ~ProfileBuilder() = default;
+
+ // Metadata associated with the sample to be saved off.
+ // The code implementing this method must not do anything that could acquire
+ // a mutex, including allocating memory (which includes LOG messages)
+ // because that mutex could be held by a stopped thread, thus resulting in
+ // deadlock.
+ virtual void RecordAnnotations() = 0;
+
+ // Records a new set of internal frames. Invoked when sampling a sample
+ // completes.
+ virtual void OnSampleCompleted(
+ std::vector<InternalFrame> internal_frames) = 0;
+
+ // Finishes the profile construction with |profile_duration| and
+ // |sampling_period|. Invoked when sampling a profile completes.
+ virtual void OnProfileCompleted(TimeDelta profile_duration,
+ TimeDelta sampling_period) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ProfileBuilder);
+ };
+
+ // Creates a profiler for the CURRENT thread. An optional |test_delegate| can
+ // be supplied by tests. The caller must ensure that this object gets
+ // destroyed before the current thread exits.
StackSamplingProfiler(
const SamplingParams& params,
- const CompletedCallback& callback,
+ std::unique_ptr<ProfileBuilder> profile_builder,
NativeStackSamplerTestDelegate* test_delegate = nullptr);
- // Creates a profiler for ANOTHER thread that sends completed profiles to
- // |callback|. An optional |test_delegate| can be supplied by tests.
+ // Creates a profiler for ANOTHER thread. An optional |test_delegate| can be
+ // supplied by tests.
//
// IMPORTANT: The caller must ensure that the thread being sampled does not
// exit before this object gets destructed or Bad Things(tm) may occur.
StackSamplingProfiler(
PlatformThreadId thread_id,
const SamplingParams& params,
- const CompletedCallback& callback,
+ std::unique_ptr<ProfileBuilder> profile_builder,
NativeStackSamplerTestDelegate* test_delegate = nullptr);
// Stops any profiling currently taking place before destroying the profiler.
- // This will block until the callback has been run if profiling has started
- // but not already finished.
+ // This will block until profile_builder_'s OnProfileCompleted function has
+ // executed if profiling has started but not already finished.
~StackSamplingProfiler();
// Initializes the profiler and starts sampling. Might block on a
@@ -254,21 +295,13 @@ class BASE_EXPORT StackSamplingProfiler {
void Start();
// Stops the profiler and any ongoing sampling. This method will return
- // immediately with the callback being run asynchronously. At most one
- // more stack sample will be taken after this method returns. Calling this
- // function is optional; if not invoked profiling terminates when all the
- // profiling bursts specified in the SamplingParams are completed or the
- // profiler object is destroyed, whichever occurs first.
+ // immediately with the profile_builder_'s OnProfileCompleted function being
+ // run asynchronously. At most one more stack sample will be taken after this
+ // method returns. Calling this function is optional; if not invoked profiling
+ // terminates when all the profiling samples specified in the SamplingParams
+ // are completed or the profiler object is destroyed, whichever occurs first.
void Stop();
- // Set the current system state that is recorded with each captured stack
- // frame. This is thread-safe so can be called from anywhere. The parameter
- // value should be from an enumeration of the appropriate type with values
- // ranging from 0 to 31, inclusive. This sets bits within Sample field of
- // |process_milestones|. The actual meanings of these bits are defined
- // (globally) by the caller(s).
- static void SetProcessMilestone(int milestone);
-
private:
friend class TestAPI;
@@ -276,31 +309,21 @@ class BASE_EXPORT StackSamplingProfiler {
// the target thread.
class SamplingThread;
- // Adds annotations to a Sample.
- static void RecordAnnotations(Sample* sample);
-
- // This global variables holds the current system state and is recorded with
- // every captured sample, done on a separate thread which is why updates to
- // this must be atomic. A PostTask to move the the updates to that thread
- // would skew the timing and a lock could result in deadlock if the thread
- // making a change was also being profiled and got stopped.
- static subtle::Atomic32 process_milestones_;
-
// The thread whose stack will be sampled.
PlatformThreadId thread_id_;
const SamplingParams params_;
- const CompletedCallback completed_callback_;
+ // Receives the sampling data and builds a CallStackProfile. The ownership of
+ // this object will be transferred to the sampling thread when thread sampling
+ // starts.
+ std::unique_ptr<ProfileBuilder> profile_builder_;
// This starts "signaled", is reset when sampling begins, and is signaled
- // when that sampling is complete and the callback done.
+ // when that sampling is complete and the profile_builder_'s
+ // OnProfileCompleted function has executed.
WaitableEvent profiling_inactive_;
- // Object that does the native sampling. This is created during construction
- // and later passed to the sampling thread when profiling is started.
- std::unique_ptr<NativeStackSampler> native_sampler_;
-
// An ID uniquely identifying this profiler to the sampling thread. This
// will be an internal "null" value when no collection has been started.
int profiler_id_;
diff --git a/chromium/base/profiler/stack_sampling_profiler_unittest.cc b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
index 8fc25c92c19..b0f883624f6 100644
--- a/chromium/base/profiler/stack_sampling_profiler_unittest.cc
+++ b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
@@ -12,6 +12,7 @@
#include <vector>
#include "base/bind.h"
+#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/files/file_util.h"
#include "base/macros.h"
@@ -22,10 +23,12 @@
#include "base/profiler/stack_sampling_profiler.h"
#include "base/run_loop.h"
#include "base/scoped_native_library.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
+#include "base/test/bind_test_util.h"
#include "base/threading/platform_thread.h"
#include "base/threading/simple_thread.h"
#include "base/time/time.h"
@@ -61,11 +64,13 @@ namespace base {
using SamplingParams = StackSamplingProfiler::SamplingParams;
using Frame = StackSamplingProfiler::Frame;
-using Frames = std::vector<StackSamplingProfiler::Frame>;
+using Frames = std::vector<Frame>;
+using InternalFrame = StackSamplingProfiler::InternalFrame;
+using InternalFrames = std::vector<InternalFrame>;
+using InternalFrameSets = std::vector<std::vector<InternalFrame>>;
using Module = StackSamplingProfiler::Module;
+using InternalModule = StackSamplingProfiler::InternalModule;
using Sample = StackSamplingProfiler::Sample;
-using CallStackProfile = StackSamplingProfiler::CallStackProfile;
-using CallStackProfiles = StackSamplingProfiler::CallStackProfiles;
namespace {
@@ -92,8 +97,9 @@ struct StackConfiguration {
// Signature for a target function that is expected to appear in the stack. See
// SignalAndWaitUntilSignaled() below. The return value should be a program
// counter pointer near the end of the function.
-using TargetFunction = const void*(*)(WaitableEvent*, WaitableEvent*,
- const StackConfiguration*);
+using TargetFunction = const void* (*)(WaitableEvent*,
+ WaitableEvent*,
+ const StackConfiguration*);
// A thread to target for profiling, whose stack is guaranteed to contain
// SignalAndWaitUntilSignaled() when coordinated with the main thread.
@@ -149,7 +155,7 @@ class TargetThread : public PlatformThread::Delegate {
};
// Callback function to be provided when calling through the other library.
- static void OtherLibraryCallback(void *arg);
+ static void OtherLibraryCallback(void* arg);
// Returns the current program counter, or a value very close to it.
static const void* GetProgramCounter();
@@ -237,18 +243,15 @@ NOINLINE const void* TargetThread::CallThroughOtherLibrary(
const StackConfiguration* stack_config) {
if (stack_config) {
// A function whose arguments are a function accepting void*, and a void*.
- using InvokeCallbackFunction = void(*)(void (*)(void*), void*);
+ using InvokeCallbackFunction = void (*)(void (*)(void*), void*);
EXPECT_TRUE(stack_config->library);
InvokeCallbackFunction function = reinterpret_cast<InvokeCallbackFunction>(
GetFunctionPointerFromNativeLibrary(stack_config->library,
"InvokeCallbackFunction"));
EXPECT_TRUE(function);
- TargetFunctionArgs args = {
- thread_started_event,
- finish_event,
- stack_config
- };
+ TargetFunctionArgs args = {thread_started_event, finish_event,
+ stack_config};
(*function)(&OtherLibraryCallback, &args);
}
@@ -258,7 +261,7 @@ NOINLINE const void* TargetThread::CallThroughOtherLibrary(
}
// static
-void TargetThread::OtherLibraryCallback(void *arg) {
+void TargetThread::OtherLibraryCallback(void* arg) {
const TargetFunctionArgs* args = static_cast<TargetFunctionArgs*>(arg);
SignalAndWaitUntilSignaled(args->thread_started_event, args->finish_event,
args->stack_config);
@@ -277,6 +280,92 @@ NOINLINE const void* TargetThread::GetProgramCounter() {
#endif
}
+// Profile consists of a set of internal frame sets and other sampling
+// information.
+struct Profile {
+ Profile() = default;
+ Profile(Profile&& other) = default;
+ Profile(const InternalFrameSets& frame_sets,
+ int annotation_count,
+ TimeDelta profile_duration,
+ TimeDelta sampling_period);
+
+ ~Profile() = default;
+
+ Profile& operator=(Profile&& other) = default;
+
+ // The collected internal frame sets.
+ InternalFrameSets frame_sets;
+
+ // The number of invocations of RecordAnnotations().
+ int annotation_count;
+
+ // Duration of this profile.
+ TimeDelta profile_duration;
+
+ // Time between samples.
+ TimeDelta sampling_period;
+};
+
+Profile::Profile(const InternalFrameSets& frame_sets,
+ int annotation_count,
+ TimeDelta profile_duration,
+ TimeDelta sampling_period)
+ : frame_sets(frame_sets),
+ annotation_count(annotation_count),
+ profile_duration(profile_duration),
+ sampling_period(sampling_period) {}
+
+// The callback type used to collect a profile. The passed Profile is move-only.
+// Other threads, including the UI thread, may block on callback completion so
+// this should run as quickly as possible.
+using ProfileCompletedCallback = Callback<void(Profile)>;
+
+// TestProfileBuilder collects internal frames produced by the profiler.
+class TestProfileBuilder : public StackSamplingProfiler::ProfileBuilder {
+ public:
+ TestProfileBuilder(const ProfileCompletedCallback& callback);
+
+ ~TestProfileBuilder() override;
+
+ // StackSamplingProfiler::ProfileBuilder:
+ void RecordAnnotations() override;
+ void OnSampleCompleted(InternalFrames internal_frames) override;
+ void OnProfileCompleted(TimeDelta profile_duration,
+ TimeDelta sampling_period) override;
+
+ private:
+ // The sets of internal frames recorded.
+ std::vector<InternalFrames> frame_sets_;
+
+ // The number of invocations of RecordAnnotations().
+ int annotation_count_ = 0;
+
+ // Callback made when sampling a profile completes.
+ const ProfileCompletedCallback callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestProfileBuilder);
+};
+
+TestProfileBuilder::TestProfileBuilder(const ProfileCompletedCallback& callback)
+ : callback_(callback) {}
+
+TestProfileBuilder::~TestProfileBuilder() = default;
+
+void TestProfileBuilder::RecordAnnotations() {
+ ++annotation_count_;
+}
+
+void TestProfileBuilder::OnSampleCompleted(InternalFrames internal_frames) {
+ frame_sets_.push_back(std::move(internal_frames));
+}
+
+void TestProfileBuilder::OnProfileCompleted(TimeDelta profile_duration,
+ TimeDelta sampling_period) {
+ callback_.Run(Profile(frame_sets_, annotation_count_, profile_duration,
+ sampling_period));
+}
+
// Loads the other library, which defines a function to be called in the
// WITH_OTHER_LIBRARY configuration.
NativeLibrary LoadOtherLibrary() {
@@ -310,7 +399,7 @@ void SynchronousUnloadNativeLibrary(NativeLibrary library) {
HMODULE module_handle;
// Keep trying to get the module handle until the call fails.
while (::GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
- GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
reinterpret_cast<LPCTSTR>(module_base_address),
&module_handle) ||
::GetLastError() != ERROR_MOD_NOT_FOUND) {
@@ -323,22 +412,6 @@ void SynchronousUnloadNativeLibrary(NativeLibrary library) {
#endif
}
-// Called on the profiler thread when complete, to collect profiles.
-void SaveProfiles(CallStackProfiles* profiles,
- CallStackProfiles pending_profiles) {
- *profiles = std::move(pending_profiles);
-}
-
-// Called on the profiler thread when complete. Collects profiles produced by
-// the profiler, and signals an event to allow the main thread to know that that
-// the profiler is done.
-void SaveProfilesAndSignalEvent(CallStackProfiles* profiles,
- WaitableEvent* event,
- CallStackProfiles pending_profiles) {
- *profiles = std::move(pending_profiles);
- event->Signal();
-}
-
// Executes the function with the target thread running and executing within
// SignalAndWaitUntilSignaled(). Performs all necessary target thread startup
// and shutdown work before and afterward.
@@ -371,14 +444,16 @@ struct TestProfilerInfo {
WaitableEvent::InitialState::NOT_SIGNALED),
profiler(thread_id,
params,
- Bind(&SaveProfilesAndSignalEvent,
- Unretained(&profiles),
- Unretained(&completed)),
+ std::make_unique<TestProfileBuilder>(
+ BindLambdaForTesting([this](Profile result_profile) {
+ profile = std::move(result_profile);
+ completed.Signal();
+ })),
delegate) {}
// The order here is important to ensure objects being referenced don't get
// destructed until after the objects referencing them.
- CallStackProfiles profiles;
+ Profile profile;
WaitableEvent completed;
StackSamplingProfiler profiler;
@@ -401,21 +476,22 @@ std::vector<std::unique_ptr<TestProfilerInfo>> CreateProfilers(
return profilers;
}
-// Captures profiles as specified by |params| on the TargetThread, and returns
-// them in |profiles|. Waits up to |profiler_wait_time| for the profiler to
-// complete.
-void CaptureProfiles(const SamplingParams& params, TimeDelta profiler_wait_time,
- CallStackProfiles* profiles) {
- WithTargetThread([&params, profiles,
+// Captures internal frames as specified by |params| on the TargetThread, and
+// returns them. Waits up to |profiler_wait_time| for the profiler to complete.
+InternalFrameSets CaptureFrameSets(const SamplingParams& params,
+ TimeDelta profiler_wait_time) {
+ InternalFrameSets frame_sets;
+ WithTargetThread([&params, &frame_sets,
profiler_wait_time](PlatformThreadId target_thread_id) {
TestProfilerInfo info(target_thread_id, params);
info.profiler.Start();
info.completed.TimedWait(profiler_wait_time);
info.profiler.Stop();
info.completed.Wait();
-
- *profiles = std::move(info.profiles);
+ frame_sets = std::move(info.profile.frame_sets);
});
+
+ return frame_sets;
}
// Waits for one of multiple samplings to complete.
@@ -457,38 +533,39 @@ const void* MaybeFixupFunctionAddressForILT(const void* function_address) {
// Searches through the frames in |sample|, returning an iterator to the first
// frame that has an instruction pointer within |target_function|. Returns
// sample.end() if no such frames are found.
-Frames::const_iterator FindFirstFrameWithinFunction(
- const Sample& sample,
+InternalFrames::const_iterator FindFirstFrameWithinFunction(
+ const InternalFrames& frames,
TargetFunction target_function) {
- uintptr_t function_start = reinterpret_cast<uintptr_t>(
- MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
- target_function)));
+ uintptr_t function_start =
+ reinterpret_cast<uintptr_t>(MaybeFixupFunctionAddressForILT(
+ reinterpret_cast<const void*>(target_function)));
uintptr_t function_end =
reinterpret_cast<uintptr_t>(target_function(nullptr, nullptr, nullptr));
- for (auto it = sample.frames.begin(); it != sample.frames.end(); ++it) {
- if ((it->instruction_pointer >= function_start) &&
- (it->instruction_pointer <= function_end))
+ for (auto it = frames.begin(); it != frames.end(); ++it) {
+ if (it->instruction_pointer >= function_start &&
+ it->instruction_pointer <= function_end) {
return it;
+ }
}
- return sample.frames.end();
+ return frames.end();
}
// Formats a sample into a string that can be output for test diagnostics.
-std::string FormatSampleForDiagnosticOutput(
- const Sample& sample,
- const std::vector<Module>& modules) {
+std::string FormatSampleForDiagnosticOutput(const InternalFrames& frames) {
std::string output;
- for (const Frame& frame : sample.frames) {
+ for (const auto& frame : frames) {
output += StringPrintf(
"0x%p %s\n", reinterpret_cast<const void*>(frame.instruction_pointer),
- modules[frame.module_index].filename.AsUTF8Unsafe().c_str());
+ frame.internal_module.filename.AsUTF8Unsafe().c_str());
}
return output;
}
// Returns a duration that is longer than the test timeout. We would use
// TimeDelta::Max() but https://crbug.com/465948.
-TimeDelta AVeryLongTimeDelta() { return TimeDelta::FromDays(1); }
+TimeDelta AVeryLongTimeDelta() {
+ return TimeDelta::FromDays(1);
+}
// Tests the scenario where the library is unloaded after copying the stack, but
// before walking it. If |wait_until_unloaded| is true, ensures that the
@@ -520,12 +597,11 @@ void TestLibraryUnload(bool wait_until_unloaded) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
NativeLibrary other_library = LoadOtherLibrary();
TargetThread target_thread(StackConfiguration(
- StackConfiguration::WITH_OTHER_LIBRARY,
- other_library));
+ StackConfiguration::WITH_OTHER_LIBRARY, other_library));
PlatformThreadHandle target_thread_handle;
EXPECT_TRUE(PlatformThread::Create(0, &target_thread, &target_thread_handle));
@@ -535,18 +611,22 @@ void TestLibraryUnload(bool wait_until_unloaded) {
WaitableEvent sampling_thread_completed(
WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- std::vector<CallStackProfile> profiles;
- const StackSamplingProfiler::CompletedCallback callback =
- Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
- Unretained(&sampling_thread_completed));
+ Profile profile;
+
WaitableEvent stack_copied(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
WaitableEvent start_stack_walk(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
StackCopiedSignaler test_delegate(&stack_copied, &start_stack_walk,
wait_until_unloaded);
- StackSamplingProfiler profiler(target_thread.id(), params, callback,
- &test_delegate);
+ StackSamplingProfiler profiler(
+ target_thread.id(), params,
+ std::make_unique<TestProfileBuilder>(BindLambdaForTesting(
+ [&profile, &sampling_thread_completed](Profile result_profile) {
+ profile = std::move(result_profile);
+ sampling_thread_completed.Signal();
+ })),
+ &test_delegate);
profiler.Start();
@@ -568,58 +648,57 @@ void TestLibraryUnload(bool wait_until_unloaded) {
// on that event.
start_stack_walk.Signal();
- // Wait for the sampling thread to complete and fill out |profiles|.
+ // Wait for the sampling thread to complete and fill out |profile|.
sampling_thread_completed.Wait();
- // Look up the sample.
- ASSERT_EQ(1u, profiles.size());
- const CallStackProfile& profile = profiles[0];
- ASSERT_EQ(1u, profile.samples.size());
- const Sample& sample = profile.samples[0];
+ // Look up the frames.
+ ASSERT_EQ(1u, profile.frame_sets.size());
+ const InternalFrames& frames = profile.frame_sets[0];
// Check that the stack contains a frame for
// TargetThread::SignalAndWaitUntilSignaled().
- Frames::const_iterator end_frame = FindFirstFrameWithinFunction(
- sample, &TargetThread::SignalAndWaitUntilSignaled);
- ASSERT_TRUE(end_frame != sample.frames.end())
+ InternalFrames::const_iterator end_frame = FindFirstFrameWithinFunction(
+ frames, &TargetThread::SignalAndWaitUntilSignaled);
+ ASSERT_TRUE(end_frame != frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
&TargetThread::SignalAndWaitUntilSignaled))
<< " was not found in stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << FormatSampleForDiagnosticOutput(frames);
if (wait_until_unloaded) {
// The stack should look like this, resulting one frame after
- // SignalAndWaitUntilSignaled. The frame in the now-unloaded library is not
- // recorded since we can't get module information.
+ // SignalAndWaitUntilSignaled. The frame in the now-unloaded library is
+ // not recorded since we can't get module information.
//
// ... WaitableEvent and system frames ...
// TargetThread::SignalAndWaitUntilSignaled
// TargetThread::OtherLibraryCallback
- EXPECT_EQ(2, sample.frames.end() - end_frame)
+ EXPECT_EQ(2, frames.end() - end_frame)
<< "Stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << FormatSampleForDiagnosticOutput(frames);
} else {
// We didn't wait for the asynchronous unloading to complete, so the results
// are non-deterministic: if the library finished unloading we should have
// the same stack as |wait_until_unloaded|, if not we should have the full
// stack. The important thing is that we should not crash.
- if (sample.frames.end() - end_frame == 2) {
+ if (frames.end() - end_frame == 2) {
// This is the same case as |wait_until_unloaded|.
return;
}
// Check that the stack contains a frame for
// TargetThread::CallThroughOtherLibrary().
- Frames::const_iterator other_library_frame = FindFirstFrameWithinFunction(
- sample, &TargetThread::CallThroughOtherLibrary);
- ASSERT_TRUE(other_library_frame != sample.frames.end())
+ InternalFrames::const_iterator other_library_frame =
+ FindFirstFrameWithinFunction(frames,
+ &TargetThread::CallThroughOtherLibrary);
+ ASSERT_TRUE(other_library_frame != frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
&TargetThread::CallThroughOtherLibrary))
<< " was not found in stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << FormatSampleForDiagnosticOutput(frames);
// The stack should look like this, resulting in three frames between
// SignalAndWaitUntilSignaled and CallThroughOtherLibrary:
@@ -631,7 +710,7 @@ void TestLibraryUnload(bool wait_until_unloaded) {
// TargetThread::CallThroughOtherLibrary
EXPECT_EQ(3, other_library_frame - end_frame)
<< "Stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << FormatSampleForDiagnosticOutput(frames);
}
}
@@ -656,8 +735,9 @@ class StackSamplingProfilerTest : public testing::Test {
} // namespace
-// Checks that the basic expected information is present in a sampled call stack
-// profile.
+// Checks that the basic expected information is present in sampled internal
+// frames.
+//
// macOS ASAN is not yet supported - crbug.com/718628.
#if !(defined(ADDRESS_SANITIZER) && defined(OS_MACOSX))
#define MAYBE_Basic Basic
@@ -667,67 +747,28 @@ class StackSamplingProfilerTest : public testing::Test {
PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_Basic) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
-
- std::vector<CallStackProfile> profiles;
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
-
- // Check that the profile and samples sizes are correct, and the module
- // indices are in range.
- ASSERT_EQ(1u, profiles.size());
- const CallStackProfile& profile = profiles[0];
- ASSERT_EQ(1u, profile.samples.size());
- EXPECT_EQ(params.sampling_interval, profile.sampling_period);
- const Sample& sample = profile.samples[0];
- EXPECT_EQ(0u, sample.process_milestones);
- for (const auto& frame : sample.frames) {
- ASSERT_GE(frame.module_index, 0u);
- ASSERT_LT(frame.module_index, profile.modules.size());
- }
+ params.samples_per_profile = 1;
+
+ InternalFrameSets frame_sets = CaptureFrameSets(params, AVeryLongTimeDelta());
+
+ // Check that the size of the frame sets are correct.
+ ASSERT_EQ(1u, frame_sets.size());
+ const InternalFrames& frames = frame_sets[0];
+
+ // Check that all the modules are valid.
+ for (const auto& frame : frames)
+ EXPECT_TRUE(frame.internal_module.is_valid);
// Check that the stack contains a frame for
- // TargetThread::SignalAndWaitUntilSignaled() and that the frame has this
- // executable's module.
- Frames::const_iterator loc = FindFirstFrameWithinFunction(
- sample, &TargetThread::SignalAndWaitUntilSignaled);
- ASSERT_TRUE(loc != sample.frames.end())
+ // TargetThread::SignalAndWaitUntilSignaled().
+ InternalFrames::const_iterator loc = FindFirstFrameWithinFunction(
+ frames, &TargetThread::SignalAndWaitUntilSignaled);
+ ASSERT_TRUE(loc != frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
&TargetThread::SignalAndWaitUntilSignaled))
<< " was not found in stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
- FilePath executable_path;
- EXPECT_TRUE(PathService::Get(FILE_EXE, &executable_path));
- EXPECT_EQ(executable_path,
- MakeAbsoluteFilePath(profile.modules[loc->module_index].filename));
-}
-
-// Checks that annotations are recorded in samples.
-PROFILER_TEST_F(StackSamplingProfilerTest, Annotations) {
- SamplingParams params;
- params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
-
- // Check that a run picks up annotations.
- StackSamplingProfiler::SetProcessMilestone(1);
- std::vector<CallStackProfile> profiles1;
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles1);
- ASSERT_EQ(1u, profiles1.size());
- const CallStackProfile& profile1 = profiles1[0];
- ASSERT_EQ(1u, profile1.samples.size());
- const Sample& sample1 = profile1.samples[0];
- EXPECT_EQ(1u << 1, sample1.process_milestones);
-
- // Run it a second time but with changed annotations. These annotations
- // should appear in the first acquired sample.
- StackSamplingProfiler::SetProcessMilestone(2);
- std::vector<CallStackProfile> profiles2;
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles2);
- ASSERT_EQ(1u, profiles2.size());
- const CallStackProfile& profile2 = profiles2[0];
- ASSERT_EQ(1u, profile2.samples.size());
- const Sample& sample2 = profile2.samples[0];
- EXPECT_EQ(sample1.process_milestones | (1u << 2), sample2.process_milestones);
+ << FormatSampleForDiagnosticOutput(frames);
}
// Checks that the profiler handles stacks containing dynamically-allocated
@@ -741,71 +782,55 @@ PROFILER_TEST_F(StackSamplingProfilerTest, Annotations) {
PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_Alloca) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
- std::vector<CallStackProfile> profiles;
+ Profile profile;
WithTargetThread(
- [&params, &profiles](PlatformThreadId target_thread_id) {
+ [&params, &profile](PlatformThreadId target_thread_id) {
WaitableEvent sampling_thread_completed(
WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- const StackSamplingProfiler::CompletedCallback callback =
- Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
- Unretained(&sampling_thread_completed));
- StackSamplingProfiler profiler(target_thread_id, params, callback);
+ StackSamplingProfiler profiler(
+ target_thread_id, params,
+ std::make_unique<TestProfileBuilder>(BindLambdaForTesting(
+ [&profile, &sampling_thread_completed](Profile result_profile) {
+ profile = std::move(result_profile);
+ sampling_thread_completed.Signal();
+ })));
profiler.Start();
sampling_thread_completed.Wait();
},
StackConfiguration(StackConfiguration::WITH_ALLOCA));
- // Look up the sample.
- ASSERT_EQ(1u, profiles.size());
- const CallStackProfile& profile = profiles[0];
- ASSERT_EQ(1u, profile.samples.size());
- const Sample& sample = profile.samples[0];
+ // Look up the frames.
+ ASSERT_EQ(1u, profile.frame_sets.size());
+ const InternalFrames& frames = profile.frame_sets[0];
// Check that the stack contains a frame for
// TargetThread::SignalAndWaitUntilSignaled().
- Frames::const_iterator end_frame = FindFirstFrameWithinFunction(
- sample, &TargetThread::SignalAndWaitUntilSignaled);
- ASSERT_TRUE(end_frame != sample.frames.end())
+ InternalFrames::const_iterator end_frame = FindFirstFrameWithinFunction(
+ frames, &TargetThread::SignalAndWaitUntilSignaled);
+ ASSERT_TRUE(end_frame != frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
&TargetThread::SignalAndWaitUntilSignaled))
<< " was not found in stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << FormatSampleForDiagnosticOutput(frames);
// Check that the stack contains a frame for TargetThread::CallWithAlloca().
- Frames::const_iterator alloca_frame =
- FindFirstFrameWithinFunction(sample, &TargetThread::CallWithAlloca);
- ASSERT_TRUE(alloca_frame != sample.frames.end())
+ InternalFrames::const_iterator alloca_frame =
+ FindFirstFrameWithinFunction(frames, &TargetThread::CallWithAlloca);
+ ASSERT_TRUE(alloca_frame != frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(
reinterpret_cast<const void*>(&TargetThread::CallWithAlloca))
<< " was not found in stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << FormatSampleForDiagnosticOutput(frames);
// These frames should be adjacent on the stack.
EXPECT_EQ(1, alloca_frame - end_frame)
<< "Stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
-}
-
-// Checks that the expected number of profiles and samples are present in the
-// call stack profiles produced.
-PROFILER_TEST_F(StackSamplingProfilerTest, MultipleProfilesAndSamples) {
- SamplingParams params;
- params.burst_interval = params.sampling_interval =
- TimeDelta::FromMilliseconds(0);
- params.bursts = 2;
- params.samples_per_burst = 3;
-
- std::vector<CallStackProfile> profiles;
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
-
- ASSERT_EQ(2u, profiles.size());
- EXPECT_EQ(3u, profiles[0].samples.size());
- EXPECT_EQ(3u, profiles[1].samples.size());
+ << FormatSampleForDiagnosticOutput(frames);
}
// Checks that a profiler can stop/destruct without ever having started.
@@ -813,15 +838,19 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopWithoutStarting) {
WithTargetThread([](PlatformThreadId target_thread_id) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
- CallStackProfiles profiles;
+ Profile profile;
WaitableEvent sampling_completed(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- const StackSamplingProfiler::CompletedCallback callback =
- Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
- Unretained(&sampling_completed));
- StackSamplingProfiler profiler(target_thread_id, params, callback);
+
+ StackSamplingProfiler profiler(
+ target_thread_id, params,
+ std::make_unique<TestProfileBuilder>(BindLambdaForTesting(
+ [&profile, &sampling_completed](Profile result_profile) {
+ profile = std::move(result_profile);
+ sampling_completed.Signal();
+ })));
profiler.Stop(); // Constructed but never started.
EXPECT_FALSE(sampling_completed.IsSignaled());
@@ -860,13 +889,13 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopSafely) {
// whatever interval the thread wakes up.
params[0].initial_delay = TimeDelta::FromMilliseconds(10);
params[0].sampling_interval = TimeDelta::FromMilliseconds(1);
- params[0].samples_per_burst = 100000;
+ params[0].samples_per_profile = 100000;
params[1].initial_delay = TimeDelta::FromMilliseconds(10);
params[1].sampling_interval = TimeDelta::FromMilliseconds(1);
- params[1].samples_per_burst = 100000;
+ params[1].samples_per_profile = 100000;
- SampleRecordedCounter samples_recorded[arraysize(params)];
+ SampleRecordedCounter samples_recorded[size(params)];
TestProfilerInfo profiler_info0(target_thread_id, params[0],
&samples_recorded[0]);
@@ -885,16 +914,16 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopSafely) {
// Ensure that the first sampler can be safely stopped while the second
// continues to run. The stopped first profiler will still have a
- // PerformCollectionTask pending that will do nothing when executed because
- // the collection will have been removed by Stop().
+ // RecordSampleTask pending that will do nothing when executed because the
+ // collection will have been removed by Stop().
profiler_info0.profiler.Stop();
profiler_info0.completed.Wait();
size_t count0 = samples_recorded[0].Get();
size_t count1 = samples_recorded[1].Get();
// Waiting for the second sampler to collect a couple samples ensures that
- // the pending PerformCollectionTask for the first has executed because
- // tasks are always ordered by their next scheduled time.
+ // the pending RecordSampleTask for the first has executed because tasks are
+ // always ordered by their next scheduled time.
while (samples_recorded[1].Get() < count1 + 2)
PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
@@ -903,36 +932,20 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopSafely) {
});
}
-// Checks that no call stack profiles are captured if the profiling is stopped
+// Checks that no internal frames are captured if the profiling is stopped
// during the initial delay.
PROFILER_TEST_F(StackSamplingProfilerTest, StopDuringInitialDelay) {
SamplingParams params;
params.initial_delay = TimeDelta::FromSeconds(60);
- std::vector<CallStackProfile> profiles;
- CaptureProfiles(params, TimeDelta::FromMilliseconds(0), &profiles);
-
- EXPECT_TRUE(profiles.empty());
-}
-
-// Checks that the single completed call stack profile is captured if the
-// profiling is stopped between bursts.
-PROFILER_TEST_F(StackSamplingProfilerTest, StopDuringInterBurstInterval) {
- SamplingParams params;
- params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.burst_interval = TimeDelta::FromSeconds(60);
- params.bursts = 2;
- params.samples_per_burst = 1;
-
- std::vector<CallStackProfile> profiles;
- CaptureProfiles(params, TimeDelta::FromMilliseconds(50), &profiles);
+ InternalFrameSets frame_sets =
+ CaptureFrameSets(params, TimeDelta::FromMilliseconds(0));
- ASSERT_EQ(1u, profiles.size());
- EXPECT_EQ(1u, profiles[0].samples.size());
+ EXPECT_TRUE(frame_sets.empty());
}
-// Checks that tasks can be stopped before completion and incomplete call stack
-// profiles are captured.
+// Checks that tasks can be stopped before completion and incomplete internal
+// frames are captured.
PROFILER_TEST_F(StackSamplingProfilerTest, StopDuringInterSampleInterval) {
// Test delegate that counts samples.
class SampleRecordedEvent : public NativeStackSamplerTestDelegate {
@@ -953,7 +966,7 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopDuringInterSampleInterval) {
SamplingParams params;
params.sampling_interval = AVeryLongTimeDelta();
- params.samples_per_burst = 2;
+ params.samples_per_profile = 2;
SampleRecordedEvent samples_recorded;
TestProfilerInfo profiler_info(target_thread_id, params, &samples_recorded);
@@ -967,8 +980,7 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopDuringInterSampleInterval) {
profiler_info.profiler.Stop();
profiler_info.completed.Wait();
- ASSERT_EQ(1u, profiler_info.profiles.size());
- EXPECT_EQ(1u, profiler_info.profiles[0].samples.size());
+ EXPECT_EQ(1u, profiler_info.profile.frame_sets.size());
});
}
@@ -977,11 +989,15 @@ PROFILER_TEST_F(StackSamplingProfilerTest, DestroyProfilerWhileProfiling) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(10);
- CallStackProfiles profiles;
- WithTargetThread([&params, &profiles](PlatformThreadId target_thread_id) {
+ Profile profile;
+ WithTargetThread([&params, &profile](PlatformThreadId target_thread_id) {
std::unique_ptr<StackSamplingProfiler> profiler;
- profiler.reset(new StackSamplingProfiler(
- target_thread_id, params, Bind(&SaveProfiles, Unretained(&profiles))));
+ auto profile_builder = std::make_unique<TestProfileBuilder>(
+ BindLambdaForTesting([&profile](Profile result_profile) {
+ profile = std::move(result_profile);
+ }));
+ profiler.reset(new StackSamplingProfiler(target_thread_id, params,
+ std::move(profile_builder)));
profiler->Start();
profiler.reset();
@@ -991,49 +1007,17 @@ PROFILER_TEST_F(StackSamplingProfilerTest, DestroyProfilerWhileProfiling) {
});
}
-// Checks that the same profiler may be run multiple times.
-PROFILER_TEST_F(StackSamplingProfilerTest, CanRunMultipleTimes) {
- WithTargetThread([](PlatformThreadId target_thread_id) {
- SamplingParams params;
- params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
-
- CallStackProfiles profiles;
- WaitableEvent sampling_completed(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- const StackSamplingProfiler::CompletedCallback callback =
- Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
- Unretained(&sampling_completed));
- StackSamplingProfiler profiler(target_thread_id, params, callback);
-
- // Just start and stop to execute code paths.
- profiler.Start();
- profiler.Stop();
- sampling_completed.Wait();
-
- // Ensure a second request will run and not block.
- sampling_completed.Reset();
- profiles.clear();
- profiler.Start();
- sampling_completed.Wait();
- profiler.Stop();
- ASSERT_EQ(1u, profiles.size());
- });
-}
-
// Checks that the different profilers may be run.
PROFILER_TEST_F(StackSamplingProfilerTest, CanRunMultipleProfilers) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
- std::vector<CallStackProfile> profiles;
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
- ASSERT_EQ(1u, profiles.size());
+ InternalFrameSets frame_sets = CaptureFrameSets(params, AVeryLongTimeDelta());
+ ASSERT_EQ(1u, frame_sets.size());
- profiles.clear();
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
- ASSERT_EQ(1u, profiles.size());
+ frame_sets = CaptureFrameSets(params, AVeryLongTimeDelta());
+ ASSERT_EQ(1u, frame_sets.size());
}
// Checks that a sampler can be started while another is running.
@@ -1042,10 +1026,10 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MultipleStart) {
std::vector<SamplingParams> params(2);
params[0].initial_delay = AVeryLongTimeDelta();
- params[0].samples_per_burst = 1;
+ params[0].samples_per_profile = 1;
params[1].sampling_interval = TimeDelta::FromMilliseconds(1);
- params[1].samples_per_burst = 1;
+ params[1].samples_per_profile = 1;
std::vector<std::unique_ptr<TestProfilerInfo>> profiler_infos =
CreateProfilers(target_thread_id, params);
@@ -1053,7 +1037,35 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MultipleStart) {
profiler_infos[0]->profiler.Start();
profiler_infos[1]->profiler.Start();
profiler_infos[1]->completed.Wait();
- EXPECT_EQ(1u, profiler_infos[1]->profiles.size());
+ EXPECT_EQ(1u, profiler_infos[1]->profile.frame_sets.size());
+ });
+}
+
+// Checks that the profile duration and the sampling interval are calculated
+// correctly. Also checks that RecordAnnotations() is invoked each time a sample
+// is recorded.
+PROFILER_TEST_F(StackSamplingProfilerTest, ProfileGeneralInfo) {
+ WithTargetThread([](PlatformThreadId target_thread_id) {
+ SamplingParams params;
+ params.sampling_interval = TimeDelta::FromMilliseconds(1);
+ params.samples_per_profile = 3;
+
+ TestProfilerInfo profiler_info(target_thread_id, params);
+
+ profiler_info.profiler.Start();
+ profiler_info.completed.Wait();
+ EXPECT_EQ(3u, profiler_info.profile.frame_sets.size());
+
+ // The profile duration should be greater than the total sampling intervals.
+ EXPECT_GT(profiler_info.profile.profile_duration,
+ profiler_info.profile.sampling_period * 3);
+
+ EXPECT_EQ(TimeDelta::FromMilliseconds(1),
+ profiler_info.profile.sampling_period);
+
+ // The number of invocations of RecordAnnotations() should be equal to the
+ // number of samples recorded.
+ EXPECT_EQ(3, profiler_info.profile.annotation_count);
});
}
@@ -1061,11 +1073,10 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MultipleStart) {
PROFILER_TEST_F(StackSamplingProfilerTest, SamplerIdleShutdown) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
- std::vector<CallStackProfile> profiles;
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
- ASSERT_EQ(1u, profiles.size());
+ InternalFrameSets frame_sets = CaptureFrameSets(params, AVeryLongTimeDelta());
+ ASSERT_EQ(1u, frame_sets.size());
// Capture thread should still be running at this point.
ASSERT_TRUE(StackSamplingProfiler::TestAPI::IsSamplingThreadRunning());
@@ -1079,7 +1090,7 @@ PROFILER_TEST_F(StackSamplingProfilerTest, SamplerIdleShutdown) {
// happens asynchronously. Watch until the thread actually exits. This test
// will time-out in the case of failure.
while (StackSamplingProfiler::TestAPI::IsSamplingThreadRunning())
- PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(1));
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
}
// Checks that additional requests will restart a stopped profiler.
@@ -1087,11 +1098,10 @@ PROFILER_TEST_F(StackSamplingProfilerTest,
WillRestartSamplerAfterIdleShutdown) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
- std::vector<CallStackProfile> profiles;
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
- ASSERT_EQ(1u, profiles.size());
+ InternalFrameSets frame_sets = CaptureFrameSets(params, AVeryLongTimeDelta());
+ ASSERT_EQ(1u, frame_sets.size());
// Capture thread should still be running at this point.
ASSERT_TRUE(StackSamplingProfiler::TestAPI::IsSamplingThreadRunning());
@@ -1101,9 +1111,8 @@ PROFILER_TEST_F(StackSamplingProfilerTest,
StackSamplingProfiler::TestAPI::PerformSamplingThreadIdleShutdown(false);
// Ensure another capture will start the sampling thread and run.
- profiles.clear();
- CaptureProfiles(params, AVeryLongTimeDelta(), &profiles);
- ASSERT_EQ(1u, profiles.size());
+ frame_sets = CaptureFrameSets(params, AVeryLongTimeDelta());
+ ASSERT_EQ(1u, frame_sets.size());
EXPECT_TRUE(StackSamplingProfiler::TestAPI::IsSamplingThreadRunning());
}
@@ -1114,7 +1123,7 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopAfterIdleShutdown) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(1);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
TestProfilerInfo profiler_info(target_thread_id, params);
@@ -1142,11 +1151,11 @@ PROFILER_TEST_F(StackSamplingProfilerTest,
params[0].initial_delay = AVeryLongTimeDelta();
params[0].sampling_interval = TimeDelta::FromMilliseconds(1);
- params[0].samples_per_burst = 1;
+ params[0].samples_per_profile = 1;
params[1].initial_delay = TimeDelta::FromMilliseconds(0);
params[1].sampling_interval = TimeDelta::FromMilliseconds(1);
- params[1].samples_per_burst = 1;
+ params[1].samples_per_profile = 1;
std::vector<std::unique_ptr<TestProfilerInfo>> profiler_infos =
CreateProfilers(target_thread_id, params);
@@ -1171,13 +1180,13 @@ PROFILER_TEST_F(StackSamplingProfilerTest, IdleShutdownAbort) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(1);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
TestProfilerInfo profiler_info(target_thread_id, params);
profiler_info.profiler.Start();
profiler_info.completed.Wait();
- EXPECT_EQ(1u, profiler_info.profiles.size());
+ EXPECT_EQ(1u, profiler_info.profile.frame_sets.size());
// Perform an idle shutdown but simulate that a new capture is started
// before it can actually run.
@@ -1188,14 +1197,14 @@ PROFILER_TEST_F(StackSamplingProfilerTest, IdleShutdownAbort) {
// except to wait a reasonable amount of time and then check. Since the
// thread was just running ("perform" blocked until it was), it should
// finish almost immediately and without any waiting for tasks or events.
- PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(200));
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(200));
EXPECT_TRUE(StackSamplingProfiler::TestAPI::IsSamplingThreadRunning());
// Ensure that it's still possible to run another sampler.
TestProfilerInfo another_info(target_thread_id, params);
another_info.profiler.Start();
another_info.completed.Wait();
- EXPECT_EQ(1u, another_info.profiles.size());
+ EXPECT_EQ(1u, another_info.profile.frame_sets.size());
});
}
@@ -1211,11 +1220,11 @@ PROFILER_TEST_F(StackSamplingProfilerTest, ConcurrentProfiling_InSync) {
// will be 10ms (delay) + 10x1ms (sampling) + 1/2 timer minimum interval.
params[0].initial_delay = TimeDelta::FromMilliseconds(10);
params[0].sampling_interval = TimeDelta::FromMilliseconds(1);
- params[0].samples_per_burst = 9;
+ params[0].samples_per_profile = 9;
params[1].initial_delay = TimeDelta::FromMilliseconds(11);
params[1].sampling_interval = TimeDelta::FromMilliseconds(1);
- params[1].samples_per_burst = 8;
+ params[1].samples_per_profile = 8;
std::vector<std::unique_ptr<TestProfilerInfo>> profiler_infos =
CreateProfilers(target_thread_id, params);
@@ -1225,16 +1234,14 @@ PROFILER_TEST_F(StackSamplingProfilerTest, ConcurrentProfiling_InSync) {
// Wait for one profiler to finish.
size_t completed_profiler = WaitForSamplingComplete(profiler_infos);
- ASSERT_EQ(1u, profiler_infos[completed_profiler]->profiles.size());
size_t other_profiler = 1 - completed_profiler;
// Wait for the other profiler to finish.
profiler_infos[other_profiler]->completed.Wait();
- ASSERT_EQ(1u, profiler_infos[other_profiler]->profiles.size());
- // Ensure each got the correct number of samples.
- EXPECT_EQ(9u, profiler_infos[0]->profiles[0].samples.size());
- EXPECT_EQ(8u, profiler_infos[1]->profiles[0].samples.size());
+ // Ensure each got the correct number of frame sets.
+ EXPECT_EQ(9u, profiler_infos[0]->profile.frame_sets.size());
+ EXPECT_EQ(8u, profiler_infos[1]->profile.frame_sets.size());
});
}
@@ -1245,15 +1252,15 @@ PROFILER_TEST_F(StackSamplingProfilerTest, ConcurrentProfiling_Mixed) {
params[0].initial_delay = TimeDelta::FromMilliseconds(8);
params[0].sampling_interval = TimeDelta::FromMilliseconds(4);
- params[0].samples_per_burst = 10;
+ params[0].samples_per_profile = 10;
params[1].initial_delay = TimeDelta::FromMilliseconds(9);
params[1].sampling_interval = TimeDelta::FromMilliseconds(3);
- params[1].samples_per_burst = 10;
+ params[1].samples_per_profile = 10;
params[2].initial_delay = TimeDelta::FromMilliseconds(10);
params[2].sampling_interval = TimeDelta::FromMilliseconds(2);
- params[2].samples_per_burst = 10;
+ params[2].samples_per_profile = 10;
std::vector<std::unique_ptr<TestProfilerInfo>> profiler_infos =
CreateProfilers(target_thread_id, params);
@@ -1263,7 +1270,8 @@ PROFILER_TEST_F(StackSamplingProfilerTest, ConcurrentProfiling_Mixed) {
// Wait for one profiler to finish.
size_t completed_profiler = WaitForSamplingComplete(profiler_infos);
- EXPECT_EQ(1u, profiler_infos[completed_profiler]->profiles.size());
+ EXPECT_EQ(10u,
+ profiler_infos[completed_profiler]->profile.frame_sets.size());
// Stop and destroy all profilers, always in the same order. Don't crash.
for (size_t i = 0; i < profiler_infos.size(); ++i)
profiler_infos[i]->profiler.Stop();
@@ -1283,20 +1291,24 @@ PROFILER_TEST_F(StackSamplingProfilerTest, ConcurrentProfiling_Mixed) {
PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_OtherLibrary) {
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
- params.samples_per_burst = 1;
+ params.samples_per_profile = 1;
- std::vector<CallStackProfile> profiles;
+ Profile profile;
{
ScopedNativeLibrary other_library(LoadOtherLibrary());
WithTargetThread(
- [&params, &profiles](PlatformThreadId target_thread_id) {
+ [&params, &profile](PlatformThreadId target_thread_id) {
WaitableEvent sampling_thread_completed(
WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- const StackSamplingProfiler::CompletedCallback callback =
- Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles),
- Unretained(&sampling_thread_completed));
- StackSamplingProfiler profiler(target_thread_id, params, callback);
+ StackSamplingProfiler profiler(
+ target_thread_id, params,
+ std::make_unique<TestProfileBuilder>(
+ BindLambdaForTesting([&profile, &sampling_thread_completed](
+ Profile result_profile) {
+ profile = std::move(result_profile);
+ sampling_thread_completed.Signal();
+ })));
profiler.Start();
sampling_thread_completed.Wait();
},
@@ -1304,33 +1316,32 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_OtherLibrary) {
other_library.get()));
}
- // Look up the sample.
- ASSERT_EQ(1u, profiles.size());
- const CallStackProfile& profile = profiles[0];
- ASSERT_EQ(1u, profile.samples.size());
- const Sample& sample = profile.samples[0];
+ // Look up the frames.
+ ASSERT_EQ(1u, profile.frame_sets.size());
+ const InternalFrames& frames = profile.frame_sets[0];
// Check that the stack contains a frame for
// TargetThread::CallThroughOtherLibrary().
- Frames::const_iterator other_library_frame = FindFirstFrameWithinFunction(
- sample, &TargetThread::CallThroughOtherLibrary);
- ASSERT_TRUE(other_library_frame != sample.frames.end())
+ InternalFrames::const_iterator other_library_frame =
+ FindFirstFrameWithinFunction(frames,
+ &TargetThread::CallThroughOtherLibrary);
+ ASSERT_TRUE(other_library_frame != frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
&TargetThread::CallThroughOtherLibrary))
<< " was not found in stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << FormatSampleForDiagnosticOutput(frames);
// Check that the stack contains a frame for
// TargetThread::SignalAndWaitUntilSignaled().
- Frames::const_iterator end_frame = FindFirstFrameWithinFunction(
- sample, &TargetThread::SignalAndWaitUntilSignaled);
- ASSERT_TRUE(end_frame != sample.frames.end())
+ InternalFrames::const_iterator end_frame = FindFirstFrameWithinFunction(
+ frames, &TargetThread::SignalAndWaitUntilSignaled);
+ ASSERT_TRUE(end_frame != frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
&TargetThread::SignalAndWaitUntilSignaled))
<< " was not found in stack:\n"
- << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << FormatSampleForDiagnosticOutput(frames);
// The stack should look like this, resulting in three frames between
// SignalAndWaitUntilSignaled and CallThroughOtherLibrary:
@@ -1341,7 +1352,8 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_OtherLibrary) {
// InvokeCallbackFunction (in other library)
// TargetThread::CallThroughOtherLibrary
EXPECT_EQ(3, other_library_frame - end_frame)
- << "Stack:\n" << FormatSampleForDiagnosticOutput(sample, profile.modules);
+ << "Stack:\n"
+ << FormatSampleForDiagnosticOutput(frames);
}
// Checks that a stack that runs through a library that is unloading produces a
@@ -1389,38 +1401,42 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MultipleSampledThreads) {
SamplingParams params1, params2;
params1.initial_delay = TimeDelta::FromMilliseconds(10);
params1.sampling_interval = TimeDelta::FromMilliseconds(1);
- params1.samples_per_burst = 9;
+ params1.samples_per_profile = 9;
params2.initial_delay = TimeDelta::FromMilliseconds(10);
params2.sampling_interval = TimeDelta::FromMilliseconds(1);
- params2.samples_per_burst = 8;
+ params2.samples_per_profile = 8;
- std::vector<CallStackProfile> profiles1, profiles2;
+ Profile profile1, profile2;
WaitableEvent sampling_thread_completed1(
WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- const StackSamplingProfiler::CompletedCallback callback1 =
- Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles1),
- Unretained(&sampling_thread_completed1));
- StackSamplingProfiler profiler1(target_thread1.id(), params1, callback1);
+ StackSamplingProfiler profiler1(
+ target_thread1.id(), params1,
+ std::make_unique<TestProfileBuilder>(BindLambdaForTesting(
+ [&profile1, &sampling_thread_completed1](Profile result_profile) {
+ profile1 = std::move(result_profile);
+ sampling_thread_completed1.Signal();
+ })));
WaitableEvent sampling_thread_completed2(
WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- const StackSamplingProfiler::CompletedCallback callback2 =
- Bind(&SaveProfilesAndSignalEvent, Unretained(&profiles2),
- Unretained(&sampling_thread_completed2));
- StackSamplingProfiler profiler2(target_thread2.id(), params2, callback2);
+ StackSamplingProfiler profiler2(
+ target_thread2.id(), params2,
+ std::make_unique<TestProfileBuilder>(BindLambdaForTesting(
+ [&profile2, &sampling_thread_completed2](Profile result_profile) {
+ profile2 = std::move(result_profile);
+ sampling_thread_completed2.Signal();
+ })));
// Finally the real work.
profiler1.Start();
profiler2.Start();
sampling_thread_completed1.Wait();
sampling_thread_completed2.Wait();
- ASSERT_EQ(1u, profiles1.size());
- EXPECT_EQ(9u, profiles1[0].samples.size());
- ASSERT_EQ(1u, profiles2.size());
- EXPECT_EQ(8u, profiles2[0].samples.size());
+ EXPECT_EQ(9u, profile1.frame_sets.size());
+ EXPECT_EQ(8u, profile2.frame_sets.size());
target_thread1.SignalThreadToFinish();
target_thread2.SignalThreadToFinish();
@@ -1441,9 +1457,11 @@ class ProfilerThread : public SimpleThread {
WaitableEvent::InitialState::NOT_SIGNALED),
profiler_(thread_id,
params,
- Bind(&SaveProfilesAndSignalEvent,
- Unretained(&profiles_),
- Unretained(&completed_))) {}
+ std::make_unique<TestProfileBuilder>(
+ BindLambdaForTesting([this](Profile result_profile) {
+ profile_ = std::move(result_profile);
+ completed_.Signal();
+ }))) {}
void Run() override {
run_.Wait();
@@ -1454,12 +1472,12 @@ class ProfilerThread : public SimpleThread {
void Wait() { completed_.Wait(); }
- CallStackProfiles& profiles() { return profiles_; }
+ Profile& profile() { return profile_; }
private:
WaitableEvent run_;
- CallStackProfiles profiles_;
+ Profile profile_;
WaitableEvent completed_;
StackSamplingProfiler profiler_;
};
@@ -1474,17 +1492,17 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MultipleProfilerThreads) {
SamplingParams params1, params2;
params1.initial_delay = TimeDelta::FromMilliseconds(10);
params1.sampling_interval = TimeDelta::FromMilliseconds(1);
- params1.samples_per_burst = 9;
+ params1.samples_per_profile = 9;
params2.initial_delay = TimeDelta::FromMilliseconds(10);
params2.sampling_interval = TimeDelta::FromMilliseconds(1);
- params2.samples_per_burst = 8;
+ params2.samples_per_profile = 8;
// Start the profiler threads and give them a moment to get going.
ProfilerThread profiler_thread1("profiler1", target_thread_id, params1);
ProfilerThread profiler_thread2("profiler2", target_thread_id, params2);
profiler_thread1.Start();
profiler_thread2.Start();
- PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10));
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(10));
// This will (approximately) synchronize the two threads.
profiler_thread1.Go();
@@ -1493,10 +1511,8 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MultipleProfilerThreads) {
// Wait for them both to finish and validate collection.
profiler_thread1.Wait();
profiler_thread2.Wait();
- ASSERT_EQ(1u, profiler_thread1.profiles().size());
- EXPECT_EQ(9u, profiler_thread1.profiles()[0].samples.size());
- ASSERT_EQ(1u, profiler_thread2.profiles().size());
- EXPECT_EQ(8u, profiler_thread2.profiles()[0].samples.size());
+ EXPECT_EQ(9u, profiler_thread1.profile().frame_sets.size());
+ EXPECT_EQ(8u, profiler_thread2.profile().frame_sets.size());
profiler_thread1.Join();
profiler_thread2.Join();
diff --git a/chromium/base/profiler/win32_stack_frame_unwinder.cc b/chromium/base/profiler/win32_stack_frame_unwinder.cc
index 9e6ab392524..a3f5f74b853 100644
--- a/chromium/base/profiler/win32_stack_frame_unwinder.cc
+++ b/chromium/base/profiler/win32_stack_frame_unwinder.cc
@@ -67,7 +67,7 @@ PRUNTIME_FUNCTION Win32UnwindFunctions::LookupFunctionEntry(
DWORD64 program_counter,
PDWORD64 image_base) {
#ifdef _WIN64
- return RtlLookupFunctionEntry(program_counter, image_base, nullptr);
+ return ::RtlLookupFunctionEntry(program_counter, image_base, nullptr);
#else
NOTREACHED();
return nullptr;
@@ -82,9 +82,9 @@ void Win32UnwindFunctions::VirtualUnwind(DWORD64 image_base,
void* handler_data;
ULONG64 establisher_frame;
KNONVOLATILE_CONTEXT_POINTERS nvcontext = {};
- RtlVirtualUnwind(UNW_FLAG_NHANDLER, image_base, program_counter,
- runtime_function, context, &handler_data,
- &establisher_frame, &nvcontext);
+ ::RtlVirtualUnwind(UNW_FLAG_NHANDLER, image_base, program_counter,
+ runtime_function, context, &handler_data,
+ &establisher_frame, &nvcontext);
#else
NOTREACHED();
#endif