Commit cc35222c authored by alph's avatar alph Committed by Commit bot

V8 Sampling Profiler: Collect V8 sample trace events on Linux and MacOS

When the v8.cpu_profiler tracing category is enabled
it starts generating V8Sample events with stack frames and
V8 state.

LockFreeCircularQueue template is taken from V8.

Android and Windows support will be coming.

BUG=406277

Review URL: https://codereview.chromium.org/1017063002

Cr-Commit-Position: refs/heads/master@{#321565}
parent f6dd3c33
...@@ -129,6 +129,7 @@ ...@@ -129,6 +129,7 @@
'renderer/devtools/devtools_agent_filter.h', 'renderer/devtools/devtools_agent_filter.h',
'renderer/devtools/devtools_client.cc', 'renderer/devtools/devtools_client.cc',
'renderer/devtools/devtools_client.h', 'renderer/devtools/devtools_client.h',
'renderer/devtools/lock_free_circular_queue.h',
'renderer/devtools/v8_sampling_profiler.cc', 'renderer/devtools/v8_sampling_profiler.cc',
'renderer/devtools/v8_sampling_profiler.h', 'renderer/devtools/v8_sampling_profiler.h',
'renderer/disambiguation_popup_helper.cc', 'renderer/disambiguation_popup_helper.cc',
......
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_RENDERER_DEVTOOLS_LOCK_FREE_CIRCULAR_QUEUE_H_
#define CONTENT_RENDERER_DEVTOOLS_LOCK_FREE_CIRCULAR_QUEUE_H_
#include "base/atomicops.h"
#include "base/memory/aligned_memory.h"
#define CACHELINE_ALIGNED ALIGNAS(64)
namespace content {
MSVC_PUSH_DISABLE_WARNING(4324) // structure was padded due to align
// Lock-free cache-friendly sampling circular queue for large
// records. Intended for fast transfer of large records between a
// single producer and a single consumer. If the queue is full,
// StartEnqueue will return nullptr. The queue is designed with
// a goal in mind to evade cache lines thrashing by preventing
// simultaneous reads and writes to adjanced memory locations.
template <typename T, unsigned Length>
class LockFreeCircularQueue {
public:
// Executed on the application thread.
LockFreeCircularQueue();
~LockFreeCircularQueue();
// StartEnqueue returns a pointer to a memory location for storing the next
// record or nullptr if all entries are full at the moment.
T* StartEnqueue();
// Notifies the queue that the producer has complete writing data into the
// memory returned by StartEnqueue and it can be passed to the consumer.
void FinishEnqueue();
// Executed on the consumer (analyzer) thread.
// Retrieves, but does not remove, the head of this queue, returning nullptr
// if this queue is empty. After the record had been read by a consumer,
// Remove must be called.
T* Peek();
void Remove();
// The class fields have stricter alignment requirements than a normal new
// can fulfil, so we need to provide our own new/delete here.
void* operator new(size_t size);
void operator delete(void* ptr);
private:
// Reserved values for the entry marker.
enum {
kEmpty, // Marks clean (processed) entries.
kFull // Marks entries already filled by the producer but not yet
// completely processed by the consumer.
};
struct CACHELINE_ALIGNED Entry {
Entry() : marker(kEmpty) {}
T record;
base::subtle::Atomic32 marker;
};
Entry* Next(Entry* entry);
Entry buffer_[Length];
CACHELINE_ALIGNED Entry* enqueue_pos_;
CACHELINE_ALIGNED Entry* dequeue_pos_;
DISALLOW_COPY_AND_ASSIGN(LockFreeCircularQueue);
};
MSVC_POP_WARNING()
template <typename T, unsigned L>
LockFreeCircularQueue<T, L>::LockFreeCircularQueue()
: enqueue_pos_(buffer_), dequeue_pos_(buffer_) {
}
template <typename T, unsigned L>
LockFreeCircularQueue<T, L>::~LockFreeCircularQueue() {
}
template <typename T, unsigned L>
T* LockFreeCircularQueue<T, L>::Peek() {
base::subtle::MemoryBarrier();
if (base::subtle::Acquire_Load(&dequeue_pos_->marker) == kFull) {
return &dequeue_pos_->record;
}
return nullptr;
}
template <typename T, unsigned L>
void LockFreeCircularQueue<T, L>::Remove() {
base::subtle::Release_Store(&dequeue_pos_->marker, kEmpty);
dequeue_pos_ = Next(dequeue_pos_);
}
template <typename T, unsigned L>
T* LockFreeCircularQueue<T, L>::StartEnqueue() {
base::subtle::MemoryBarrier();
if (base::subtle::Acquire_Load(&enqueue_pos_->marker) == kEmpty) {
return &enqueue_pos_->record;
}
return nullptr;
}
template <typename T, unsigned L>
void LockFreeCircularQueue<T, L>::FinishEnqueue() {
base::subtle::Release_Store(&enqueue_pos_->marker, kFull);
enqueue_pos_ = Next(enqueue_pos_);
}
template <typename T, unsigned L>
typename LockFreeCircularQueue<T, L>::Entry* LockFreeCircularQueue<T, L>::Next(
Entry* entry) {
Entry* next = entry + 1;
if (next == &buffer_[L])
return buffer_;
return next;
}
template <typename T, unsigned L>
void* LockFreeCircularQueue<T, L>::operator new(size_t size) {
typedef LockFreeCircularQueue<T, L> QueueTypeAlias;
return base::AlignedAlloc(size, ALIGNOF(QueueTypeAlias));
}
template <typename T, unsigned L>
void LockFreeCircularQueue<T, L>::operator delete(void* ptr) {
base::AlignedFree(ptr);
}
} // namespace content
#endif // CONTENT_RENDERER_DEVTOOLS_LOCK_FREE_CIRCULAR_QUEUE_H_
...@@ -4,15 +4,22 @@ ...@@ -4,15 +4,22 @@
#include "content/renderer/devtools/v8_sampling_profiler.h" #include "content/renderer/devtools/v8_sampling_profiler.h"
#if defined(OS_POSIX)
#include <signal.h>
#define USE_SIGNALS
#endif
#include "base/format_macros.h" #include "base/format_macros.h"
#include "base/strings/string_util.h" #include "base/strings/stringprintf.h"
#include "base/synchronization/cancellation_flag.h" #include "base/synchronization/cancellation_flag.h"
#include "base/threading/platform_thread.h" #include "base/threading/platform_thread.h"
#include "base/trace_event/trace_event.h" #include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_argument.h" #include "base/trace_event/trace_event_argument.h"
#include "content/renderer/devtools/lock_free_circular_queue.h"
#include "content/renderer/render_thread_impl.h" #include "content/renderer/render_thread_impl.h"
#include "v8/include/v8.h" #include "v8/include/v8.h"
using base::trace_event::ConvertableToTraceFormat;
using base::trace_event::TraceLog; using base::trace_event::TraceLog;
using base::trace_event::TracedValue; using base::trace_event::TracedValue;
using v8::Isolate; using v8::Isolate;
...@@ -22,10 +29,75 @@ namespace content { ...@@ -22,10 +29,75 @@ namespace content {
namespace { namespace {
std::string PtrToString(const void* value) { std::string PtrToString(const void* value) {
char buffer[20]; return base::StringPrintf(
base::snprintf(buffer, sizeof(buffer), "0x%" PRIx64, "0x%" PRIx64, static_cast<uint64>(reinterpret_cast<intptr_t>(value)));
static_cast<uint64>(reinterpret_cast<intptr_t>(value))); }
return buffer;
class SampleRecord {
public:
static const int kMaxFramesCountLog2 = 8;
static const unsigned kMaxFramesCount = (1u << kMaxFramesCountLog2) - 1;
SampleRecord() {}
base::TimeTicks timestamp() const { return timestamp_; }
void Collect(v8::Isolate* isolate,
base::TimeTicks timestamp,
const v8::RegisterState& state);
scoped_refptr<ConvertableToTraceFormat> ToTraceFormat() const;
private:
base::TimeTicks timestamp_;
unsigned vm_state_ : 4;
unsigned frames_count_ : kMaxFramesCountLog2;
const void* frames_[kMaxFramesCount];
DISALLOW_COPY_AND_ASSIGN(SampleRecord);
};
void SampleRecord::Collect(v8::Isolate* isolate,
base::TimeTicks timestamp,
const v8::RegisterState& state) {
v8::SampleInfo sample_info;
isolate->GetStackSample(state, (void**)frames_, kMaxFramesCount,
&sample_info);
timestamp_ = timestamp;
frames_count_ = sample_info.frames_count;
vm_state_ = sample_info.vm_state;
}
scoped_refptr<ConvertableToTraceFormat> SampleRecord::ToTraceFormat() const {
scoped_refptr<TracedValue> data(new TracedValue());
const char* vm_state = nullptr;
switch (vm_state_) {
case v8::StateTag::JS:
vm_state = "js";
break;
case v8::StateTag::GC:
vm_state = "gc";
break;
case v8::StateTag::COMPILER:
vm_state = "compiler";
break;
case v8::StateTag::OTHER:
vm_state = "other";
break;
case v8::StateTag::EXTERNAL:
vm_state = "external";
break;
case v8::StateTag::IDLE:
vm_state = "idle";
break;
default:
NOTREACHED();
}
data->SetString("vm_state", vm_state);
data->BeginArray("stack");
for (unsigned i = 0; i < frames_count_; ++i) {
data->AppendString(PtrToString(frames_[i]));
}
data->EndArray();
return data;
} }
} // namespace } // namespace
...@@ -33,30 +105,84 @@ std::string PtrToString(const void* value) { ...@@ -33,30 +105,84 @@ std::string PtrToString(const void* value) {
// The class implements a sampler responsible for sampling a single thread. // The class implements a sampler responsible for sampling a single thread.
class Sampler { class Sampler {
public: public:
explicit Sampler(Isolate* isolate) : isolate_(isolate) { DCHECK(isolate_); } ~Sampler();
static scoped_ptr<Sampler> CreateForCurrentThread(); static scoped_ptr<Sampler> CreateForCurrentThread();
static Sampler* GetInstance() { return tls_instance_.Pointer()->Get(); }
// These methods are called from the sampling thread. // These methods are called from the sampling thread.
void Start(); void Start();
void Stop(); void Stop();
void Sample(); void Sample();
void DoSample(const v8::RegisterState& state);
bool EventsCollectedForTest() const {
return base::subtle::NoBarrier_Load(&code_added_events_count_) != 0 ||
base::subtle::NoBarrier_Load(&samples_count_) != 0;
}
private: private:
Sampler();
static void InstallJitCodeEventHandler(Isolate* isolate, void* data); static void InstallJitCodeEventHandler(Isolate* isolate, void* data);
static void HandleJitCodeEvent(const v8::JitCodeEvent* event); static void HandleJitCodeEvent(const v8::JitCodeEvent* event);
static scoped_refptr<base::trace_event::ConvertableToTraceFormat> static scoped_refptr<ConvertableToTraceFormat> JitCodeEventToTraceFormat(
JitCodeEventToTraceFormat(const v8::JitCodeEvent* event); const v8::JitCodeEvent* event);
static base::PlatformThreadHandle GetCurrentThreadHandle();
void InjectPendingEvents();
static const unsigned kNumberOfSamples = 10;
typedef LockFreeCircularQueue<SampleRecord, kNumberOfSamples> SamplingQueue;
base::PlatformThreadId thread_id_;
base::PlatformThreadHandle thread_handle_;
Isolate* isolate_; Isolate* isolate_;
scoped_ptr<SamplingQueue> samples_data_;
base::subtle::Atomic32 code_added_events_count_;
base::subtle::Atomic32 samples_count_;
static base::LazyInstance<base::ThreadLocalPointer<Sampler>>::Leaky
tls_instance_;
}; };
base::LazyInstance<base::ThreadLocalPointer<Sampler>>::Leaky
Sampler::tls_instance_ = LAZY_INSTANCE_INITIALIZER;
Sampler::Sampler()
: thread_id_(base::PlatformThread::CurrentId()),
thread_handle_(Sampler::GetCurrentThreadHandle()),
isolate_(Isolate::GetCurrent()),
code_added_events_count_(0),
samples_count_(0) {
DCHECK(isolate_);
DCHECK(!GetInstance());
tls_instance_.Pointer()->Set(this);
}
Sampler::~Sampler() {
DCHECK(GetInstance());
tls_instance_.Pointer()->Set(nullptr);
}
// static // static
scoped_ptr<Sampler> Sampler::CreateForCurrentThread() { scoped_ptr<Sampler> Sampler::CreateForCurrentThread() {
return scoped_ptr<Sampler>(new Sampler(Isolate::GetCurrent())); return scoped_ptr<Sampler>(new Sampler());
}
// static
base::PlatformThreadHandle Sampler::GetCurrentThreadHandle() {
#ifdef OS_WIN
// TODO(alph): Add Windows support.
return base::PlatformThreadHandle();
#else
return base::PlatformThread::CurrentHandle();
#endif
} }
void Sampler::Start() { void Sampler::Start() {
samples_data_.reset(new SamplingQueue());
v8::JitCodeEventHandler handler = &HandleJitCodeEvent; v8::JitCodeEventHandler handler = &HandleJitCodeEvent;
isolate_->RequestInterrupt(&InstallJitCodeEventHandler, isolate_->RequestInterrupt(&InstallJitCodeEventHandler,
reinterpret_cast<void*>(handler)); reinterpret_cast<void*>(handler));
...@@ -64,9 +190,42 @@ void Sampler::Start() { ...@@ -64,9 +190,42 @@ void Sampler::Start() {
void Sampler::Stop() { void Sampler::Stop() {
isolate_->RequestInterrupt(&InstallJitCodeEventHandler, nullptr); isolate_->RequestInterrupt(&InstallJitCodeEventHandler, nullptr);
samples_data_.reset();
} }
void Sampler::Sample() { void Sampler::Sample() {
#if defined(USE_SIGNALS)
int error = pthread_kill(thread_handle_.platform_handle(), SIGPROF);
if (error) {
LOG(ERROR) << "pthread_kill failed with error " << error << " "
<< strerror(error);
}
InjectPendingEvents();
#endif
}
void Sampler::DoSample(const v8::RegisterState& state) {
// Called in the sampled thread signal handler.
// Because of that it is not allowed to do any memory allocation here.
base::TimeTicks timestamp = base::TimeTicks::NowFromSystemTraceTime();
SampleRecord* record = samples_data_->StartEnqueue();
if (!record)
return;
record->Collect(isolate_, timestamp, state);
samples_data_->FinishEnqueue();
base::subtle::NoBarrier_AtomicIncrement(&samples_count_, 1);
}
void Sampler::InjectPendingEvents() {
SampleRecord* record = samples_data_->Peek();
while (record) {
TRACE_EVENT_SAMPLE_WITH_TID_AND_TIMESTAMP1(
TRACE_DISABLED_BY_DEFAULT("v8.cpu_profile"), "V8Sample", thread_id_,
(record->timestamp() - base::TimeTicks()).InMicroseconds(), "data",
record->ToTraceFormat());
samples_data_->Remove();
record = samples_data_->Peek();
}
} }
// static // static
...@@ -83,11 +242,18 @@ void Sampler::InstallJitCodeEventHandler(Isolate* isolate, void* data) { ...@@ -83,11 +242,18 @@ void Sampler::InstallJitCodeEventHandler(Isolate* isolate, void* data) {
// static // static
void Sampler::HandleJitCodeEvent(const v8::JitCodeEvent* event) { void Sampler::HandleJitCodeEvent(const v8::JitCodeEvent* event) {
// Called on the sampled V8 thread. // Called on the sampled V8 thread.
Sampler* sampler = GetInstance();
// The sampler may have already been destroyed.
// That's fine, we're not interested in these events anymore.
if (!sampler)
return;
switch (event->type) { switch (event->type) {
case v8::JitCodeEvent::CODE_ADDED: case v8::JitCodeEvent::CODE_ADDED:
TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profile"), TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profile"),
"JitCodeAdded", TRACE_EVENT_SCOPE_THREAD, "data", "JitCodeAdded", TRACE_EVENT_SCOPE_THREAD, "data",
JitCodeEventToTraceFormat(event)); JitCodeEventToTraceFormat(event));
base::subtle::NoBarrier_AtomicIncrement(
&sampler->code_added_events_count_, 1);
break; break;
case v8::JitCodeEvent::CODE_MOVED: case v8::JitCodeEvent::CODE_MOVED:
...@@ -110,9 +276,8 @@ void Sampler::HandleJitCodeEvent(const v8::JitCodeEvent* event) { ...@@ -110,9 +276,8 @@ void Sampler::HandleJitCodeEvent(const v8::JitCodeEvent* event) {
} }
// static // static
scoped_refptr<base::trace_event::ConvertableToTraceFormat> scoped_refptr<ConvertableToTraceFormat> Sampler::JitCodeEventToTraceFormat(
Sampler::JitCodeEventToTraceFormat(const v8::JitCodeEvent* event) { const v8::JitCodeEvent* event) {
// Called on the sampled thread.
switch (event->type) { switch (event->type) {
case v8::JitCodeEvent::CODE_ADDED: { case v8::JitCodeEvent::CODE_ADDED: {
scoped_refptr<TracedValue> data(new TracedValue()); scoped_refptr<TracedValue> data(new TracedValue());
...@@ -161,6 +326,13 @@ class V8SamplingThread : public base::PlatformThread::Delegate { ...@@ -161,6 +326,13 @@ class V8SamplingThread : public base::PlatformThread::Delegate {
void RemoveSamplers(); void RemoveSamplers();
void StartSamplers(); void StartSamplers();
void StopSamplers(); void StopSamplers();
static void InstallSignalHandler();
static void RestoreSignalHandler();
#ifdef USE_SIGNALS
static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
#endif
static void HandleJitCodeEvent(const v8::JitCodeEvent* event); static void HandleJitCodeEvent(const v8::JitCodeEvent* event);
Sampler* render_thread_sampler_; Sampler* render_thread_sampler_;
...@@ -169,9 +341,19 @@ class V8SamplingThread : public base::PlatformThread::Delegate { ...@@ -169,9 +341,19 @@ class V8SamplingThread : public base::PlatformThread::Delegate {
base::PlatformThreadHandle sampling_thread_handle_; base::PlatformThreadHandle sampling_thread_handle_;
std::vector<Sampler*> samplers_; std::vector<Sampler*> samplers_;
#ifdef USE_SIGNALS
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
#endif
DISALLOW_COPY_AND_ASSIGN(V8SamplingThread); DISALLOW_COPY_AND_ASSIGN(V8SamplingThread);
}; };
#ifdef USE_SIGNALS
bool V8SamplingThread::signal_handler_installed_;
struct sigaction V8SamplingThread::old_signal_handler_;
#endif
V8SamplingThread::V8SamplingThread(Sampler* render_thread_sampler, V8SamplingThread::V8SamplingThread(Sampler* render_thread_sampler,
base::WaitableEvent* event) base::WaitableEvent* event)
: render_thread_sampler_(render_thread_sampler), : render_thread_sampler_(render_thread_sampler),
...@@ -182,15 +364,20 @@ void V8SamplingThread::ThreadMain() { ...@@ -182,15 +364,20 @@ void V8SamplingThread::ThreadMain() {
base::PlatformThread::SetName("V8SamplingProfilerThread"); base::PlatformThread::SetName("V8SamplingProfilerThread");
InstallSamplers(); InstallSamplers();
StartSamplers(); StartSamplers();
InstallSignalHandler();
const int kSamplingFrequencyMicroseconds = 1000; const int kSamplingFrequencyMicroseconds = 1000;
while (!cancellation_flag_.IsSet()) { while (!cancellation_flag_.IsSet()) {
Sample(); Sample();
if (waitable_event_for_testing_) { if (waitable_event_for_testing_ &&
render_thread_sampler_->EventsCollectedForTest()) {
waitable_event_for_testing_->Signal(); waitable_event_for_testing_->Signal();
} }
// TODO(alph): make the samples firing interval not depend on the sample
// taking duration.
base::PlatformThread::Sleep( base::PlatformThread::Sleep(
base::TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds)); base::TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
} }
RestoreSignalHandler();
StopSamplers(); StopSamplers();
RemoveSamplers(); RemoveSamplers();
} }
...@@ -223,9 +410,74 @@ void V8SamplingThread::StopSamplers() { ...@@ -223,9 +410,74 @@ void V8SamplingThread::StopSamplers() {
} }
} }
// static
void V8SamplingThread::InstallSignalHandler() {
#ifdef USE_SIGNALS
// There must be the only one!
DCHECK(!signal_handler_installed_);
struct sigaction sa;
sa.sa_sigaction = &HandleProfilerSignal;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART | SA_SIGINFO;
signal_handler_installed_ =
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
#endif
}
// static
void V8SamplingThread::RestoreSignalHandler() {
#ifdef USE_SIGNALS
if (!signal_handler_installed_)
return;
sigaction(SIGPROF, &old_signal_handler_, 0);
signal_handler_installed_ = false;
#endif
}
#ifdef USE_SIGNALS
// static
void V8SamplingThread::HandleProfilerSignal(int signal,
siginfo_t* info,
void* context) {
if (signal != SIGPROF)
return;
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
v8::RegisterState state;
#if defined(OS_ANDROID)
// TODO(alph): Add support for Android
ALLOW_UNUSED_LOCAL(mcontext);
#elif defined(OS_MACOSX)
#if ARCH_CPU_64_BITS
state.pc = reinterpret_cast<void*>(mcontext->__ss.__rip);
state.sp = reinterpret_cast<void*>(mcontext->__ss.__rsp);
state.fp = reinterpret_cast<void*>(mcontext->__ss.__rbp);
#elif ARCH_CPU_32_BITS
state.pc = reinterpret_cast<void*>(mcontext->__ss.__eip);
state.sp = reinterpret_cast<void*>(mcontext->__ss.__esp);
state.fp = reinterpret_cast<void*>(mcontext->__ss.__ebp);
#endif // ARCH_CPU_32_BITS
#else
#if ARCH_CPU_64_BITS
state.pc = reinterpret_cast<void*>(mcontext.gregs[REG_RIP]);
state.sp = reinterpret_cast<void*>(mcontext.gregs[REG_RSP]);
state.fp = reinterpret_cast<void*>(mcontext.gregs[REG_RBP]);
#elif ARCH_CPU_32_BITS
state.pc = reinterpret_cast<void*>(mcontext.gregs[REG_EIP]);
state.sp = reinterpret_cast<void*>(mcontext.gregs[REG_ESP]);
state.fp = reinterpret_cast<void*>(mcontext.gregs[REG_EBP]);
#endif // ARCH_CPU_32_BITS
#endif
Sampler::GetInstance()->DoSample(state);
}
#endif
void V8SamplingThread::Start() { void V8SamplingThread::Start() {
if (!base::PlatformThread::Create(0, this, &sampling_thread_handle_)) { if (!base::PlatformThread::Create(0, this, &sampling_thread_handle_)) {
DCHECK(false) << "failed to create thread"; DCHECK(false) << "failed to create sampling thread";
} }
} }
...@@ -236,7 +488,8 @@ void V8SamplingThread::Stop() { ...@@ -236,7 +488,8 @@ void V8SamplingThread::Stop() {
V8SamplingProfiler::V8SamplingProfiler(bool underTest) V8SamplingProfiler::V8SamplingProfiler(bool underTest)
: sampling_thread_(nullptr), : sampling_thread_(nullptr),
render_thread_sampler_(Sampler::CreateForCurrentThread()) { render_thread_sampler_(Sampler::CreateForCurrentThread()),
message_loop_proxy_(base::MessageLoopProxy::current()) {
DCHECK(underTest || RenderThreadImpl::current()); DCHECK(underTest || RenderThreadImpl::current());
// Force the "v8.cpu_profile" category to show up in the trace viewer. // Force the "v8.cpu_profile" category to show up in the trace viewer.
TraceLog::GetCategoryGroupEnabled( TraceLog::GetCategoryGroupEnabled(
...@@ -249,6 +502,13 @@ V8SamplingProfiler::~V8SamplingProfiler() { ...@@ -249,6 +502,13 @@ V8SamplingProfiler::~V8SamplingProfiler() {
DCHECK(!sampling_thread_.get()); DCHECK(!sampling_thread_.get());
} }
void V8SamplingProfiler::StartSamplingThread() {
DCHECK(!sampling_thread_.get());
sampling_thread_.reset(new V8SamplingThread(
render_thread_sampler_.get(), waitable_event_for_testing_.get()));
sampling_thread_->Start();
}
void V8SamplingProfiler::OnTraceLogEnabled() { void V8SamplingProfiler::OnTraceLogEnabled() {
bool enabled; bool enabled;
TRACE_EVENT_CATEGORY_GROUP_ENABLED( TRACE_EVENT_CATEGORY_GROUP_ENABLED(
...@@ -258,15 +518,15 @@ void V8SamplingProfiler::OnTraceLogEnabled() { ...@@ -258,15 +518,15 @@ void V8SamplingProfiler::OnTraceLogEnabled() {
// Do not enable sampling profiler in continuous mode, as losing // Do not enable sampling profiler in continuous mode, as losing
// Jit code events may not be afforded. // Jit code events may not be afforded.
// TODO(alph): add support of infinite recording of meta trace events.
base::trace_event::TraceRecordMode record_mode = base::trace_event::TraceRecordMode record_mode =
TraceLog::GetInstance()->GetCurrentTraceOptions().record_mode; TraceLog::GetInstance()->GetCurrentTraceOptions().record_mode;
if (record_mode == base::trace_event::TraceRecordMode::RECORD_CONTINUOUSLY) if (record_mode == base::trace_event::TraceRecordMode::RECORD_CONTINUOUSLY)
return; return;
DCHECK(!sampling_thread_.get()); message_loop_proxy_->PostTask(
sampling_thread_.reset(new V8SamplingThread( FROM_HERE, base::Bind(&V8SamplingProfiler::StartSamplingThread,
render_thread_sampler_.get(), waitable_event_for_testing_.get())); base::Unretained(this)));
sampling_thread_->Start();
} }
void V8SamplingProfiler::OnTraceLogDisabled() { void V8SamplingProfiler::OnTraceLogDisabled() {
......
...@@ -30,9 +30,12 @@ class CONTENT_EXPORT V8SamplingProfiler final ...@@ -30,9 +30,12 @@ class CONTENT_EXPORT V8SamplingProfiler final
void WaitSamplingEventForTesting(); void WaitSamplingEventForTesting();
private: private:
void StartSamplingThread();
scoped_ptr<base::WaitableEvent> waitable_event_for_testing_; scoped_ptr<base::WaitableEvent> waitable_event_for_testing_;
scoped_ptr<V8SamplingThread> sampling_thread_; scoped_ptr<V8SamplingThread> sampling_thread_;
scoped_ptr<Sampler> render_thread_sampler_; scoped_ptr<Sampler> render_thread_sampler_;
scoped_refptr<base::MessageLoopProxy> message_loop_proxy_;
DISALLOW_COPY_AND_ASSIGN(V8SamplingProfiler); DISALLOW_COPY_AND_ASSIGN(V8SamplingProfiler);
}; };
......
...@@ -77,6 +77,39 @@ class V8SamplingProfilerTest : public RenderViewTest { ...@@ -77,6 +77,39 @@ class V8SamplingProfilerTest : public RenderViewTest {
flush_complete_event->Signal(); flush_complete_event->Signal();
} }
void CollectTrace() {
TraceLog* trace_log = TraceLog::GetInstance();
sampling_profiler_->EnableSamplingEventForTesting();
trace_log->SetEnabled(
CategoryFilter(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profile")),
TraceLog::RECORDING_MODE, TraceOptions());
base::RunLoop().RunUntilIdle();
KickV8(); // Make a call to V8 so it can invoke interrupt request
// callbacks.
base::RunLoop().RunUntilIdle();
sampling_profiler_->WaitSamplingEventForTesting();
trace_log->SetDisabled();
SyncFlush(trace_log);
}
int CountEvents(const std::string& name) const {
size_t trace_parsed_count = trace_parsed_.GetSize();
int events_count = 0;
for (size_t i = 0; i < trace_parsed_count; i++) {
const DictionaryValue* dict;
if (!trace_parsed_.GetDictionary(i, &dict))
continue;
std::string value;
if (!dict->GetString("cat", &value) ||
value != TRACE_DISABLED_BY_DEFAULT("v8.cpu_profile"))
continue;
if (!dict->GetString("name", &value) || value != name)
continue;
++events_count;
}
return events_count;
}
scoped_ptr<V8SamplingProfiler> sampling_profiler_; scoped_ptr<V8SamplingProfiler> sampling_profiler_;
base::Lock lock_; base::Lock lock_;
...@@ -85,41 +118,38 @@ class V8SamplingProfilerTest : public RenderViewTest { ...@@ -85,41 +118,38 @@ class V8SamplingProfilerTest : public RenderViewTest {
TraceResultBuffer::SimpleOutput json_output_; TraceResultBuffer::SimpleOutput json_output_;
}; };
TEST_F(V8SamplingProfilerTest, V8SamplingEventFired) { // TODO(alph): Implement on Windows and Android
scoped_ptr<V8SamplingProfiler> sampling_profiler( // The SamplingEventForTesting is fired when the framework collected at
new V8SamplingProfiler(true)); // least one JitCodeAdded event and one sample event.
sampling_profiler->EnableSamplingEventForTesting();
#if defined(OS_WIN) || defined(OS_ANDROID)
#define MAYBE(x) DISABLED_##x
#else
#define MAYBE(x) x
#endif
TEST_F(V8SamplingProfilerTest, MAYBE(V8SamplingEventFired)) {
sampling_profiler_->EnableSamplingEventForTesting();
TraceLog::GetInstance()->SetEnabled( TraceLog::GetInstance()->SetEnabled(
CategoryFilter(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profile")), CategoryFilter(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profile")),
TraceLog::RECORDING_MODE, TraceOptions()); TraceLog::RECORDING_MODE, TraceOptions());
sampling_profiler->WaitSamplingEventForTesting(); base::RunLoop().RunUntilIdle();
sampling_profiler_->WaitSamplingEventForTesting();
TraceLog::GetInstance()->SetDisabled(); TraceLog::GetInstance()->SetDisabled();
} }
TEST_F(V8SamplingProfilerTest, V8SamplingJitCodeEventsCollected) { TEST_F(V8SamplingProfilerTest, MAYBE(V8SamplingJitCodeEventsCollected)) {
TraceLog* trace_log = TraceLog::GetInstance(); CollectTrace();
trace_log->SetEnabled( int jit_code_added_events_count = CountEvents("JitCodeAdded");
CategoryFilter(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profile")),
TraceLog::RECORDING_MODE, TraceOptions());
KickV8(); // Make a call to V8 so it can invoke interrupt request callbacks.
trace_log->SetDisabled();
SyncFlush(trace_log);
size_t trace_parsed_count = trace_parsed_.GetSize();
int jit_code_added_events_count = 0;
for (size_t i = 0; i < trace_parsed_count; i++) {
const DictionaryValue* dict;
if (!trace_parsed_.GetDictionary(i, &dict))
continue;
std::string value;
if (!dict->GetString("cat", &value) ||
value != TRACE_DISABLED_BY_DEFAULT("v8.cpu_profile"))
continue;
if (!dict->GetString("name", &value) || value != "JitCodeAdded")
continue;
++jit_code_added_events_count;
}
CHECK_LT(0, jit_code_added_events_count); CHECK_LT(0, jit_code_added_events_count);
base::RunLoop().RunUntilIdle(); base::RunLoop().RunUntilIdle();
} }
TEST_F(V8SamplingProfilerTest, MAYBE(V8SamplingSamplesCollected)) {
CollectTrace();
int sample_events_count = CountEvents("V8Sample");
CHECK_LT(0, sample_events_count);
base::RunLoop().RunUntilIdle();
}
} // namespace content } // namespace content
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment