Commit cc35222c authored by alph's avatar alph Committed by Commit bot

V8 Sampling Profiler: Collect V8 sample trace events on Linux and MacOS

When the v8.cpu_profiler tracing category is enabled
it starts generating V8Sample events with stack frames and
V8 state.

LockFreeCircularQueue template is taken from V8.

Android and Windows support will be coming.

BUG=406277

Review URL: https://codereview.chromium.org/1017063002

Cr-Commit-Position: refs/heads/master@{#321565}
parent f6dd3c33
...@@ -129,6 +129,7 @@ ...@@ -129,6 +129,7 @@
'renderer/devtools/devtools_agent_filter.h', 'renderer/devtools/devtools_agent_filter.h',
'renderer/devtools/devtools_client.cc', 'renderer/devtools/devtools_client.cc',
'renderer/devtools/devtools_client.h', 'renderer/devtools/devtools_client.h',
'renderer/devtools/lock_free_circular_queue.h',
'renderer/devtools/v8_sampling_profiler.cc', 'renderer/devtools/v8_sampling_profiler.cc',
'renderer/devtools/v8_sampling_profiler.h', 'renderer/devtools/v8_sampling_profiler.h',
'renderer/disambiguation_popup_helper.cc', 'renderer/disambiguation_popup_helper.cc',
......
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_RENDERER_DEVTOOLS_LOCK_FREE_CIRCULAR_QUEUE_H_
#define CONTENT_RENDERER_DEVTOOLS_LOCK_FREE_CIRCULAR_QUEUE_H_
#include "base/atomicops.h"
#include "base/memory/aligned_memory.h"
#define CACHELINE_ALIGNED ALIGNAS(64)
namespace content {
MSVC_PUSH_DISABLE_WARNING(4324) // structure was padded due to align
// Lock-free cache-friendly sampling circular queue for large
// records. Intended for fast transfer of large records between a
// single producer and a single consumer. If the queue is full,
// StartEnqueue will return nullptr. The queue is designed with
// a goal in mind to evade cache lines thrashing by preventing
// simultaneous reads and writes to adjanced memory locations.
template <typename T, unsigned Length>
class LockFreeCircularQueue {
public:
// Executed on the application thread.
LockFreeCircularQueue();
~LockFreeCircularQueue();
// StartEnqueue returns a pointer to a memory location for storing the next
// record or nullptr if all entries are full at the moment.
T* StartEnqueue();
// Notifies the queue that the producer has complete writing data into the
// memory returned by StartEnqueue and it can be passed to the consumer.
void FinishEnqueue();
// Executed on the consumer (analyzer) thread.
// Retrieves, but does not remove, the head of this queue, returning nullptr
// if this queue is empty. After the record had been read by a consumer,
// Remove must be called.
T* Peek();
void Remove();
// The class fields have stricter alignment requirements than a normal new
// can fulfil, so we need to provide our own new/delete here.
void* operator new(size_t size);
void operator delete(void* ptr);
private:
// Reserved values for the entry marker.
enum {
kEmpty, // Marks clean (processed) entries.
kFull // Marks entries already filled by the producer but not yet
// completely processed by the consumer.
};
struct CACHELINE_ALIGNED Entry {
Entry() : marker(kEmpty) {}
T record;
base::subtle::Atomic32 marker;
};
Entry* Next(Entry* entry);
Entry buffer_[Length];
CACHELINE_ALIGNED Entry* enqueue_pos_;
CACHELINE_ALIGNED Entry* dequeue_pos_;
DISALLOW_COPY_AND_ASSIGN(LockFreeCircularQueue);
};
MSVC_POP_WARNING()
template <typename T, unsigned L>
LockFreeCircularQueue<T, L>::LockFreeCircularQueue()
: enqueue_pos_(buffer_), dequeue_pos_(buffer_) {
}
template <typename T, unsigned L>
LockFreeCircularQueue<T, L>::~LockFreeCircularQueue() {
}
template <typename T, unsigned L>
T* LockFreeCircularQueue<T, L>::Peek() {
base::subtle::MemoryBarrier();
if (base::subtle::Acquire_Load(&dequeue_pos_->marker) == kFull) {
return &dequeue_pos_->record;
}
return nullptr;
}
template <typename T, unsigned L>
void LockFreeCircularQueue<T, L>::Remove() {
base::subtle::Release_Store(&dequeue_pos_->marker, kEmpty);
dequeue_pos_ = Next(dequeue_pos_);
}
template <typename T, unsigned L>
T* LockFreeCircularQueue<T, L>::StartEnqueue() {
base::subtle::MemoryBarrier();
if (base::subtle::Acquire_Load(&enqueue_pos_->marker) == kEmpty) {
return &enqueue_pos_->record;
}
return nullptr;
}
template <typename T, unsigned L>
void LockFreeCircularQueue<T, L>::FinishEnqueue() {
base::subtle::Release_Store(&enqueue_pos_->marker, kFull);
enqueue_pos_ = Next(enqueue_pos_);
}
template <typename T, unsigned L>
typename LockFreeCircularQueue<T, L>::Entry* LockFreeCircularQueue<T, L>::Next(
Entry* entry) {
Entry* next = entry + 1;
if (next == &buffer_[L])
return buffer_;
return next;
}
template <typename T, unsigned L>
void* LockFreeCircularQueue<T, L>::operator new(size_t size) {
typedef LockFreeCircularQueue<T, L> QueueTypeAlias;
return base::AlignedAlloc(size, ALIGNOF(QueueTypeAlias));
}
template <typename T, unsigned L>
void LockFreeCircularQueue<T, L>::operator delete(void* ptr) {
base::AlignedFree(ptr);
}
} // namespace content
#endif // CONTENT_RENDERER_DEVTOOLS_LOCK_FREE_CIRCULAR_QUEUE_H_
...@@ -30,9 +30,12 @@ class CONTENT_EXPORT V8SamplingProfiler final ...@@ -30,9 +30,12 @@ class CONTENT_EXPORT V8SamplingProfiler final
void WaitSamplingEventForTesting(); void WaitSamplingEventForTesting();
private: private:
void StartSamplingThread();
scoped_ptr<base::WaitableEvent> waitable_event_for_testing_; scoped_ptr<base::WaitableEvent> waitable_event_for_testing_;
scoped_ptr<V8SamplingThread> sampling_thread_; scoped_ptr<V8SamplingThread> sampling_thread_;
scoped_ptr<Sampler> render_thread_sampler_; scoped_ptr<Sampler> render_thread_sampler_;
scoped_refptr<base::MessageLoopProxy> message_loop_proxy_;
DISALLOW_COPY_AND_ASSIGN(V8SamplingProfiler); DISALLOW_COPY_AND_ASSIGN(V8SamplingProfiler);
}; };
......
...@@ -77,6 +77,39 @@ class V8SamplingProfilerTest : public RenderViewTest { ...@@ -77,6 +77,39 @@ class V8SamplingProfilerTest : public RenderViewTest {
flush_complete_event->Signal(); flush_complete_event->Signal();
} }
void CollectTrace() {
TraceLog* trace_log = TraceLog::GetInstance();
sampling_profiler_->EnableSamplingEventForTesting();
trace_log->SetEnabled(
CategoryFilter(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profile")),
TraceLog::RECORDING_MODE, TraceOptions());
base::RunLoop().RunUntilIdle();
KickV8(); // Make a call to V8 so it can invoke interrupt request
// callbacks.
base::RunLoop().RunUntilIdle();
sampling_profiler_->WaitSamplingEventForTesting();
trace_log->SetDisabled();
SyncFlush(trace_log);
}
int CountEvents(const std::string& name) const {
size_t trace_parsed_count = trace_parsed_.GetSize();
int events_count = 0;
for (size_t i = 0; i < trace_parsed_count; i++) {
const DictionaryValue* dict;
if (!trace_parsed_.GetDictionary(i, &dict))
continue;
std::string value;
if (!dict->GetString("cat", &value) ||
value != TRACE_DISABLED_BY_DEFAULT("v8.cpu_profile"))
continue;
if (!dict->GetString("name", &value) || value != name)
continue;
++events_count;
}
return events_count;
}
scoped_ptr<V8SamplingProfiler> sampling_profiler_; scoped_ptr<V8SamplingProfiler> sampling_profiler_;
base::Lock lock_; base::Lock lock_;
...@@ -85,41 +118,38 @@ class V8SamplingProfilerTest : public RenderViewTest { ...@@ -85,41 +118,38 @@ class V8SamplingProfilerTest : public RenderViewTest {
TraceResultBuffer::SimpleOutput json_output_; TraceResultBuffer::SimpleOutput json_output_;
}; };
TEST_F(V8SamplingProfilerTest, V8SamplingEventFired) { // TODO(alph): Implement on Windows and Android
scoped_ptr<V8SamplingProfiler> sampling_profiler( // The SamplingEventForTesting is fired when the framework collected at
new V8SamplingProfiler(true)); // least one JitCodeAdded event and one sample event.
sampling_profiler->EnableSamplingEventForTesting();
#if defined(OS_WIN) || defined(OS_ANDROID)
#define MAYBE(x) DISABLED_##x
#else
#define MAYBE(x) x
#endif
TEST_F(V8SamplingProfilerTest, MAYBE(V8SamplingEventFired)) {
sampling_profiler_->EnableSamplingEventForTesting();
TraceLog::GetInstance()->SetEnabled( TraceLog::GetInstance()->SetEnabled(
CategoryFilter(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profile")), CategoryFilter(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profile")),
TraceLog::RECORDING_MODE, TraceOptions()); TraceLog::RECORDING_MODE, TraceOptions());
sampling_profiler->WaitSamplingEventForTesting(); base::RunLoop().RunUntilIdle();
sampling_profiler_->WaitSamplingEventForTesting();
TraceLog::GetInstance()->SetDisabled(); TraceLog::GetInstance()->SetDisabled();
} }
TEST_F(V8SamplingProfilerTest, V8SamplingJitCodeEventsCollected) { TEST_F(V8SamplingProfilerTest, MAYBE(V8SamplingJitCodeEventsCollected)) {
TraceLog* trace_log = TraceLog::GetInstance(); CollectTrace();
trace_log->SetEnabled( int jit_code_added_events_count = CountEvents("JitCodeAdded");
CategoryFilter(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profile")),
TraceLog::RECORDING_MODE, TraceOptions());
KickV8(); // Make a call to V8 so it can invoke interrupt request callbacks.
trace_log->SetDisabled();
SyncFlush(trace_log);
size_t trace_parsed_count = trace_parsed_.GetSize();
int jit_code_added_events_count = 0;
for (size_t i = 0; i < trace_parsed_count; i++) {
const DictionaryValue* dict;
if (!trace_parsed_.GetDictionary(i, &dict))
continue;
std::string value;
if (!dict->GetString("cat", &value) ||
value != TRACE_DISABLED_BY_DEFAULT("v8.cpu_profile"))
continue;
if (!dict->GetString("name", &value) || value != "JitCodeAdded")
continue;
++jit_code_added_events_count;
}
CHECK_LT(0, jit_code_added_events_count); CHECK_LT(0, jit_code_added_events_count);
base::RunLoop().RunUntilIdle(); base::RunLoop().RunUntilIdle();
} }
TEST_F(V8SamplingProfilerTest, MAYBE(V8SamplingSamplesCollected)) {
CollectTrace();
int sample_events_count = CountEvents("V8Sample");
CHECK_LT(0, sample_events_count);
base::RunLoop().RunUntilIdle();
}
} // namespace content } // namespace content
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment