Commit 7e308e6f authored by dskiba's avatar dskiba Committed by Commit bot

Revert of [tracing] Turn StackFrame into struct. (patchset #5 id:80001 of...

Revert of [tracing] Turn StackFrame into struct. (patchset #5 id:80001 of https://codereview.chromium.org/1891543003/ )

Reason for revert:
Broke build on Windows x64 (warning was treated as error): heap_profiler_allocation_context.cc(58): warning C4267: 'argument': conversion from 'size_t' to 'int', possible loss of data

Original issue's description:
> [tracing] Turn StackFrame into struct.
>
> This change turns StackFrame (aka const char*) into a struct and
> introduces 'type' field which controls how stack frame is formatted
> when it's written to trace file. As an example, thread name, which
> previously was just a string like any other function name, is now
> formatted as '[Thread: %s]'.
>
> More stack frame types will be added in the future, for example
> native allocation tracing will add 'program counter' type.
>
> BUG=602701

TBR=primiano@chromium.org,ssid@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=602701

Review URL: https://codereview.chromium.org/1907593002

Cr-Commit-Position: refs/heads/master@{#388558}
parent 57a5db32
......@@ -12,26 +12,26 @@
namespace base {
namespace trace_event {
bool operator < (const StackFrame& lhs, const StackFrame& rhs) {
return lhs.value < rhs.value;
}
// Constructor that does not initialize members.
AllocationContext::AllocationContext() {}
bool operator == (const StackFrame& lhs, const StackFrame& rhs) {
return lhs.value == rhs.value;
}
// static
AllocationContext AllocationContext::Empty() {
AllocationContext ctx;
bool operator != (const StackFrame& lhs, const StackFrame& rhs) {
return !(lhs.value == rhs.value);
}
for (size_t i = 0; i < arraysize(ctx.backtrace.frames); i++)
ctx.backtrace.frames[i] = nullptr;
Backtrace::Backtrace(): frame_count(0) {}
ctx.type_name = nullptr;
bool operator==(const Backtrace& lhs, const Backtrace& rhs) {
if (lhs.frame_count != rhs.frame_count) return false;
return std::equal(lhs.frames, lhs.frames + lhs.frame_count, rhs.frames);
return ctx;
}
AllocationContext::AllocationContext(): type_name(nullptr) {}
bool operator==(const Backtrace& lhs, const Backtrace& rhs) {
// Pointer equality of the stack frames is assumed, so instead of doing a deep
// string comparison on all of the frames, a |memcmp| suffices.
return std::memcmp(lhs.frames, rhs.frames, sizeof(lhs.frames)) == 0;
}
bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) {
return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name);
......@@ -43,19 +43,10 @@ bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) {
namespace BASE_HASH_NAMESPACE {
using base::trace_event::AllocationContext;
using base::trace_event::Backtrace;
using base::trace_event::StackFrame;
size_t hash<StackFrame>::operator()(const StackFrame& frame) const {
return hash<const void*>()(frame.value);
}
size_t hash<Backtrace>::operator()(const Backtrace& backtrace) const {
const void* values[Backtrace::kMaxFrameCount];
for (size_t i = 0; i != backtrace.frame_count; ++i) {
values[i] = backtrace.frames[i].value;
}
return base::SuperFastHash(reinterpret_cast<const char*>(values),
backtrace.frame_count * sizeof(*values));
return base::SuperFastHash(reinterpret_cast<const char*>(backtrace.frames),
sizeof(backtrace.frames));
}
size_t hash<AllocationContext>::operator()(const AllocationContext& ctx) const {
......
......@@ -35,50 +35,32 @@ namespace trace_event {
//
// See the design doc (https://goo.gl/4s7v7b) for more details.
// Represents (pseudo) stack frame. Used in Backtrace class below.
//
// Conceptually stack frame is identified by its value, and type is used
// mostly to properly format the value. Value is expected to be a valid
// pointer from process' address space.
struct BASE_EXPORT StackFrame {
enum class Type {
TRACE_EVENT_NAME, // const char* string
THREAD_NAME, // const char* thread name
};
static StackFrame FromTraceEventName(const char* name) {
return {Type::TRACE_EVENT_NAME, name};
}
static StackFrame FromThreadName(const char* name) {
return {Type::THREAD_NAME, name};
}
Type type;
const void* value;
};
bool BASE_EXPORT operator < (const StackFrame& lhs, const StackFrame& rhs);
bool BASE_EXPORT operator == (const StackFrame& lhs, const StackFrame& rhs);
bool BASE_EXPORT operator != (const StackFrame& lhs, const StackFrame& rhs);
using StackFrame = const char*;
struct BASE_EXPORT Backtrace {
Backtrace();
// If the stack is higher than what can be stored here, the bottom frames
// (the ones closer to main()) are stored. Based on the data above, a depth
// of 12 captures the full stack in the vast majority of the cases.
enum { kMaxFrameCount = 12 };
StackFrame frames[kMaxFrameCount];
size_t frame_count;
// Unused backtrace frames are filled with nullptr frames. If the stack is
// higher than what can be stored here, the bottom frames are stored. Based
// on the data above, a depth of 12 captures the full stack in the vast
// majority of the cases.
StackFrame frames[12];
};
// Struct to store the size and count of the allocations.
struct AllocationMetrics {
size_t size;
size_t count;
};
bool BASE_EXPORT operator==(const Backtrace& lhs, const Backtrace& rhs);
// The |AllocationContext| is context metadata that is kept for every allocation
// when heap profiling is enabled. To simplify memory management for book-
// keeping, this struct has a fixed size.
// keeping, this struct has a fixed size. All |const char*|s here must have
// static lifetime.
struct BASE_EXPORT AllocationContext {
AllocationContext();
public:
// An allocation context with empty backtrace and unknown type.
static AllocationContext Empty();
Backtrace backtrace;
......@@ -87,27 +69,25 @@ struct BASE_EXPORT AllocationContext {
// deep string comparison. In a component build, where a type name can have a
// string literal in several dynamic libraries, this may distort grouping.
const char* type_name;
private:
friend class AllocationContextTracker;
// Don't allow uninitialized instances except inside the allocation context
// tracker. Except in tests, an |AllocationContext| should only be obtained
// from the tracker. In tests, paying the overhead of initializing the struct
// to |Empty| and then overwriting the members is not such a big deal.
AllocationContext();
};
bool BASE_EXPORT operator==(const AllocationContext& lhs,
const AllocationContext& rhs);
// Struct to store the size and count of the allocations.
struct AllocationMetrics {
size_t size;
size_t count;
};
} // namespace trace_event
} // namespace base
namespace BASE_HASH_NAMESPACE {
template <>
struct BASE_EXPORT hash<base::trace_event::StackFrame> {
size_t operator()(const base::trace_event::StackFrame& frame) const;
};
template <>
struct BASE_EXPORT hash<base::trace_event::Backtrace> {
size_t operator()(const base::trace_event::Backtrace& backtrace) const;
......
......@@ -75,18 +75,17 @@ void AllocationContextTracker::SetCaptureEnabled(bool enabled) {
subtle::Release_Store(&capture_enabled_, enabled);
}
void AllocationContextTracker::PushPseudoStackFrame(
const char* trace_event_name) {
void AllocationContextTracker::PushPseudoStackFrame(StackFrame frame) {
// Impose a limit on the height to verify that every push is popped, because
// in practice the pseudo stack never grows higher than ~20 frames.
if (pseudo_stack_.size() < kMaxStackDepth)
pseudo_stack_.push_back(trace_event_name);
pseudo_stack_.push_back(frame);
else
NOTREACHED();
}
void AllocationContextTracker::PopPseudoStackFrame(
const char* trace_event_name) {
// static
void AllocationContextTracker::PopPseudoStackFrame(StackFrame frame) {
// Guard for stack underflow. If tracing was started with a TRACE_EVENT in
// scope, the frame was never pushed, so it is possible that pop is called
// on an empty stack.
......@@ -96,7 +95,7 @@ void AllocationContextTracker::PopPseudoStackFrame(
// Assert that pushes and pops are nested correctly. This DCHECK can be
// hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call
// without a corresponding TRACE_EVENT_BEGIN).
DCHECK_EQ(trace_event_name, pseudo_stack_.back())
DCHECK_EQ(frame, pseudo_stack_.back())
<< "Encountered an unmatched TRACE_EVENT_END";
pseudo_stack_.pop_back();
......@@ -122,22 +121,25 @@ AllocationContext AllocationContextTracker::GetContextSnapshot() {
// Fill the backtrace.
{
auto backtrace = std::begin(ctx.backtrace.frames);
auto backtrace_end = std::end(ctx.backtrace.frames);
auto src = pseudo_stack_.begin();
auto dst = std::begin(ctx.backtrace.frames);
auto src_end = pseudo_stack_.end();
auto dst_end = std::end(ctx.backtrace.frames);
// Add the thread name as the first entry
// Add the thread name as the first enrty in the backtrace.
if (thread_name_) {
*backtrace++ = StackFrame::FromThreadName(thread_name_);
DCHECK(dst < dst_end);
*dst = thread_name_;
++dst;
}
for (const char* event_name: pseudo_stack_) {
if (backtrace == backtrace_end) {
break;
}
*backtrace++ = StackFrame::FromTraceEventName(event_name);
}
// Copy as much of the bottom of the pseudo stack into the backtrace as
// possible.
for (; src != src_end && dst != dst_end; src++, dst++)
*dst = *src;
ctx.backtrace.frame_count = backtrace - std::begin(ctx.backtrace.frames);
// If there is room for more, fill the remaining slots with empty frames.
std::fill(dst, dst_end, nullptr);
}
// TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension
......
......@@ -53,10 +53,10 @@ class BASE_EXPORT AllocationContextTracker {
static void SetCurrentThreadName(const char* name);
// Pushes a frame onto the thread-local pseudo stack.
void PushPseudoStackFrame(const char* trace_event_name);
void PushPseudoStackFrame(StackFrame frame);
// Pops a frame from the thread-local pseudo stack.
void PopPseudoStackFrame(const char* trace_event_name);
void PopPseudoStackFrame(StackFrame frame);
// Push and pop current task's context. A stack is used to support nested
// tasks and the top of the stack will be used in allocation context.
......@@ -74,7 +74,7 @@ class BASE_EXPORT AllocationContextTracker {
static subtle::Atomic32 capture_enabled_;
// The pseudo stack where frames are |TRACE_EVENT| names.
std::vector<const char*> pseudo_stack_;
std::vector<StackFrame> pseudo_stack_;
// The thread name is used as the first entry in the pseudo stack.
const char* thread_name_;
......
......@@ -33,7 +33,7 @@ void AssertBacktraceEquals(const StackFrame(&expected_backtrace)[N]) {
->GetContextSnapshot();
auto actual = std::begin(ctx.backtrace.frames);
auto actual_bottom = actual + ctx.backtrace.frame_count;
auto actual_bottom = std::end(ctx.backtrace.frames);
auto expected = std::begin(expected_backtrace);
auto expected_bottom = std::end(expected_backtrace);
......@@ -53,7 +53,8 @@ void AssertBacktraceEmpty() {
AllocationContextTracker::GetInstanceForCurrentThread()
->GetContextSnapshot();
ASSERT_EQ(0u, ctx.backtrace.frame_count);
for (StackFrame frame : ctx.backtrace.frames)
ASSERT_EQ(nullptr, frame);
}
class AllocationContextTrackerTest : public testing::Test {
......@@ -71,22 +72,24 @@ class AllocationContextTrackerTest : public testing::Test {
};
// Check that |TRACE_EVENT| macros push and pop to the pseudo stack correctly.
// Also check that |GetContextSnapshot| fills the backtrace with null pointers
// when the pseudo stack height is less than the capacity.
TEST_F(AllocationContextTrackerTest, PseudoStackScopedTrace) {
StackFrame c = StackFrame::FromTraceEventName(kCupcake);
StackFrame d = StackFrame::FromTraceEventName(kDonut);
StackFrame e = StackFrame::FromTraceEventName(kEclair);
StackFrame f = StackFrame::FromTraceEventName(kFroyo);
StackFrame c = kCupcake;
StackFrame d = kDonut;
StackFrame e = kEclair;
StackFrame f = kFroyo;
AssertBacktraceEmpty();
{
TRACE_EVENT0("Testing", kCupcake);
StackFrame frame_c[] = {c};
StackFrame frame_c[] = {c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
AssertBacktraceEquals(frame_c);
{
TRACE_EVENT0("Testing", kDonut);
StackFrame frame_cd[] = {c, d};
StackFrame frame_cd[] = {c, d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
AssertBacktraceEquals(frame_cd);
}
......@@ -94,7 +97,7 @@ TEST_F(AllocationContextTrackerTest, PseudoStackScopedTrace) {
{
TRACE_EVENT0("Testing", kEclair);
StackFrame frame_ce[] = {c, e};
StackFrame frame_ce[] = {c, e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
AssertBacktraceEquals(frame_ce);
}
......@@ -105,7 +108,7 @@ TEST_F(AllocationContextTrackerTest, PseudoStackScopedTrace) {
{
TRACE_EVENT0("Testing", kFroyo);
StackFrame frame_f[] = {f};
StackFrame frame_f[] = {f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
AssertBacktraceEquals(frame_f);
}
......@@ -115,15 +118,15 @@ TEST_F(AllocationContextTrackerTest, PseudoStackScopedTrace) {
// Same as |PseudoStackScopedTrace|, but now test the |TRACE_EVENT_BEGIN| and
// |TRACE_EVENT_END| macros.
TEST_F(AllocationContextTrackerTest, PseudoStackBeginEndTrace) {
StackFrame c = StackFrame::FromTraceEventName(kCupcake);
StackFrame d = StackFrame::FromTraceEventName(kDonut);
StackFrame e = StackFrame::FromTraceEventName(kEclair);
StackFrame f = StackFrame::FromTraceEventName(kFroyo);
StackFrame c = kCupcake;
StackFrame d = kDonut;
StackFrame e = kEclair;
StackFrame f = kFroyo;
StackFrame frame_c[] = {c};
StackFrame frame_cd[] = {c, d};
StackFrame frame_ce[] = {c, e};
StackFrame frame_f[] = {f};
StackFrame frame_c[] = {c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
StackFrame frame_cd[] = {c, d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
StackFrame frame_ce[] = {c, e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
StackFrame frame_f[] = {f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
AssertBacktraceEmpty();
......@@ -153,15 +156,15 @@ TEST_F(AllocationContextTrackerTest, PseudoStackBeginEndTrace) {
}
TEST_F(AllocationContextTrackerTest, PseudoStackMixedTrace) {
StackFrame c = StackFrame::FromTraceEventName(kCupcake);
StackFrame d = StackFrame::FromTraceEventName(kDonut);
StackFrame e = StackFrame::FromTraceEventName(kEclair);
StackFrame f = StackFrame::FromTraceEventName(kFroyo);
StackFrame c = kCupcake;
StackFrame d = kDonut;
StackFrame e = kEclair;
StackFrame f = kFroyo;
StackFrame frame_c[] = {c};
StackFrame frame_cd[] = {c, d};
StackFrame frame_e[] = {e};
StackFrame frame_ef[] = {e, f};
StackFrame frame_c[] = {c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
StackFrame frame_cd[] = {c, d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
StackFrame frame_e[] = {e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
StackFrame frame_ef[] = {e, f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
AssertBacktraceEmpty();
......@@ -191,9 +194,6 @@ TEST_F(AllocationContextTrackerTest, PseudoStackMixedTrace) {
}
TEST_F(AllocationContextTrackerTest, BacktraceTakesTop) {
StackFrame c = StackFrame::FromTraceEventName(kCupcake);
StackFrame f = StackFrame::FromTraceEventName(kFroyo);
// Push 12 events onto the pseudo stack.
TRACE_EVENT0("Testing", kCupcake);
TRACE_EVENT0("Testing", kCupcake);
......@@ -217,16 +217,16 @@ TEST_F(AllocationContextTrackerTest, BacktraceTakesTop) {
->GetContextSnapshot();
// The pseudo stack relies on pointer equality, not deep string comparisons.
ASSERT_EQ(c, ctx.backtrace.frames[0]);
ASSERT_EQ(f, ctx.backtrace.frames[11]);
ASSERT_EQ(kCupcake, ctx.backtrace.frames[0]);
ASSERT_EQ(kFroyo, ctx.backtrace.frames[11]);
}
{
AllocationContext ctx =
AllocationContextTracker::GetInstanceForCurrentThread()
->GetContextSnapshot();
ASSERT_EQ(c, ctx.backtrace.frames[0]);
ASSERT_EQ(f, ctx.backtrace.frames[11]);
ASSERT_EQ(kCupcake, ctx.backtrace.frames[0]);
ASSERT_EQ(kFroyo, ctx.backtrace.frames[11]);
}
}
......@@ -239,8 +239,8 @@ TEST_F(AllocationContextTrackerTest, SetCurrentThreadName) {
AllocationContext ctx1 =
AllocationContextTracker::GetInstanceForCurrentThread()
->GetContextSnapshot();
ASSERT_EQ(StackFrame::FromThreadName(kThread1), ctx1.backtrace.frames[0]);
ASSERT_EQ(StackFrame::FromTraceEventName(kCupcake), ctx1.backtrace.frames[1]);
ASSERT_EQ(kThread1, ctx1.backtrace.frames[0]);
ASSERT_EQ(kCupcake, ctx1.backtrace.frames[1]);
// Test if the thread name is reset.
const char kThread2[] = "thread2";
......@@ -248,8 +248,8 @@ TEST_F(AllocationContextTrackerTest, SetCurrentThreadName) {
AllocationContext ctx2 =
AllocationContextTracker::GetInstanceForCurrentThread()
->GetContextSnapshot();
ASSERT_EQ(StackFrame::FromThreadName(kThread2), ctx2.backtrace.frames[0]);
ASSERT_EQ(StackFrame::FromTraceEventName(kCupcake), ctx2.backtrace.frames[1]);
ASSERT_EQ(kThread2, ctx2.backtrace.frames[0]);
ASSERT_EQ(kCupcake, ctx2.backtrace.frames[1]);
}
TEST_F(AllocationContextTrackerTest, TrackTaskContext) {
......
......@@ -57,7 +57,7 @@ size_t SumAllSizes(const AllocationRegister& reg) {
TEST_F(AllocationRegisterTest, InsertRemove) {
AllocationRegister reg;
AllocationContext ctx;
AllocationContext ctx = AllocationContext::Empty();
// Zero-sized allocations should be discarded.
reg.Insert(reinterpret_cast<void*>(1), 0, ctx);
......@@ -91,7 +91,7 @@ TEST_F(AllocationRegisterTest, InsertRemove) {
TEST_F(AllocationRegisterTest, DoubleFreeIsAllowed) {
AllocationRegister reg;
AllocationContext ctx;
AllocationContext ctx = AllocationContext::Empty();
reg.Insert(reinterpret_cast<void*>(1), 1, ctx);
reg.Insert(reinterpret_cast<void*>(2), 1, ctx);
......@@ -106,11 +106,9 @@ TEST_F(AllocationRegisterTest, DoubleInsertOverwrites) {
// TODO(ruuda): Although double insert happens in practice, it should not.
// Find out the cause and ban double insert if possible.
AllocationRegister reg;
AllocationContext ctx;
StackFrame frame1 = StackFrame::FromTraceEventName("Foo");
StackFrame frame2 = StackFrame::FromTraceEventName("Bar");
ctx.backtrace.frame_count = 1;
AllocationContext ctx = AllocationContext::Empty();
StackFrame frame1 = "Foo";
StackFrame frame2 = "Bar";
ctx.backtrace.frames[0] = frame1;
reg.Insert(reinterpret_cast<void*>(1), 11, ctx);
......@@ -140,7 +138,7 @@ TEST_F(AllocationRegisterTest, DoubleInsertOverwrites) {
TEST_F(AllocationRegisterTest, InsertRemoveCollisions) {
size_t expected_sum = 0;
AllocationRegister reg;
AllocationContext ctx;
AllocationContext ctx = AllocationContext::Empty();
// By inserting 100 more entries than the number of buckets, there will be at
// least 100 collisions.
......@@ -177,7 +175,7 @@ TEST_F(AllocationRegisterTest, InsertRemoveCollisions) {
TEST_F(AllocationRegisterTest, InsertRemoveRandomOrder) {
size_t expected_sum = 0;
AllocationRegister reg;
AllocationContext ctx;
AllocationContext ctx = AllocationContext::Empty();
uintptr_t generator = 3;
uintptr_t prime = 1013;
......@@ -218,7 +216,7 @@ TEST_F(AllocationRegisterTest, ChangeContextAfterInsertion) {
using Allocation = AllocationRegister::Allocation;
const char kStdString[] = "std::string";
AllocationRegister reg;
AllocationContext ctx;
AllocationContext ctx = AllocationContext::Empty();
reg.Insert(reinterpret_cast<void*>(17), 1, ctx);
reg.Insert(reinterpret_cast<void*>(19), 2, ctx);
......@@ -266,7 +264,7 @@ TEST_F(AllocationRegisterTest, ChangeContextAfterInsertion) {
TEST_F(AllocationRegisterTest, OverflowDeathTest) {
// Use a smaller register to prevent OOM errors on low-end devices.
AllocationRegister reg(static_cast<uint32_t>(GetNumCellsPerPage()));
AllocationContext ctx;
AllocationContext ctx = AllocationContext::Empty();
uintptr_t i;
// Fill up all of the memory allocated for the register. |GetNumCells(reg)|
......
......@@ -74,19 +74,25 @@ bool operator<(const Bucket& lhs, const Bucket& rhs) {
// returned list will have |backtrace_cursor| advanced or
// |is_broken_down_by_type_name| set depending on the property to group by.
std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) {
base::hash_map<const void*, Bucket> breakdown;
base::hash_map<const char*, Bucket> breakdown;
if (breakBy == BreakDownMode::kByBacktrace) {
for (const auto& context_and_metrics : bucket.metrics_by_context) {
const Backtrace& backtrace = context_and_metrics.first->backtrace;
const StackFrame* begin = std::begin(backtrace.frames);
const StackFrame* end = begin + backtrace.frame_count;
const StackFrame* cursor = begin + bucket.backtrace_cursor;
const char* const* begin = std::begin(backtrace.frames);
const char* const* end = std::end(backtrace.frames);
const char* const* cursor = begin + bucket.backtrace_cursor;
// The backtrace in the context is padded with null pointers, but these
// should not be considered for breakdown. Adjust end to point past the
// last non-null frame.
while (begin != end && *(end - 1) == nullptr)
end--;
DCHECK_LE(cursor, end);
if (cursor != end) {
Bucket& subbucket = breakdown[cursor->value];
Bucket& subbucket = breakdown[*cursor];
subbucket.size += context_and_metrics.second.size;
subbucket.count += context_and_metrics.second.count;
subbucket.metrics_by_context.push_back(context_and_metrics);
......@@ -189,13 +195,13 @@ bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) {
const AllocationContext* context = bucket.metrics_by_context.front().first;
const StackFrame* backtrace_begin = std::begin(context->backtrace.frames);
const StackFrame* backtrace_end = backtrace_begin + bucket.backtrace_cursor;
const char* const* backtrace_begin = std::begin(context->backtrace.frames);
const char* const* backtrace_end = backtrace_begin + bucket.backtrace_cursor;
DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames));
Entry entry;
entry.stack_frame_id = stack_frame_deduplicator_->Insert(
backtrace_begin, backtrace_end);
entry.stack_frame_id =
stack_frame_deduplicator_->Insert(backtrace_begin, backtrace_end);
// Deduplicate the type name, or use ID -1 if type name is not set.
entry.type_id = bucket.is_broken_down_by_type_name
......
......@@ -22,15 +22,13 @@
namespace {
using base::trace_event::StackFrame;
// Define all strings once, because the deduplicator requires pointer equality,
// and string interning is unreliable.
StackFrame kBrowserMain = StackFrame::FromTraceEventName("BrowserMain");
StackFrame kRendererMain = StackFrame::FromTraceEventName("RendererMain");
StackFrame kCreateWidget = StackFrame::FromTraceEventName("CreateWidget");
StackFrame kInitialize = StackFrame::FromTraceEventName("Initialize");
StackFrame kGetBitmap = StackFrame::FromTraceEventName("GetBitmap");
const char kBrowserMain[] = "BrowserMain";
const char kRendererMain[] = "RendererMain";
const char kCreateWidget[] = "CreateWidget";
const char kInitialize[] = "Initialize";
const char kGetBitmap[] = "GetBitmap";
const char kInt[] = "int";
const char kBool[] = "bool";
......@@ -180,10 +178,9 @@ TEST(HeapDumpWriterTest, SizeAndCountAreHexadecimal) {
TEST(HeapDumpWriterTest, BacktraceTypeNameTable) {
hash_map<AllocationContext, AllocationMetrics> metrics_by_context;
AllocationContext ctx;
AllocationContext ctx = AllocationContext::Empty();
ctx.backtrace.frames[0] = kBrowserMain;
ctx.backtrace.frames[1] = kCreateWidget;
ctx.backtrace.frame_count = 2;
ctx.type_name = kInt;
// 10 bytes with context { type: int, bt: [BrowserMain, CreateWidget] }.
......@@ -196,7 +193,6 @@ TEST(HeapDumpWriterTest, BacktraceTypeNameTable) {
ctx.backtrace.frames[0] = kRendererMain;
ctx.backtrace.frames[1] = kInitialize;
ctx.backtrace.frame_count = 2;
// 30 bytes with context { type: bool, bt: [RendererMain, Initialize] }.
metrics_by_context[ctx] = {30, 30};
......@@ -271,22 +267,19 @@ TEST(HeapDumpWriterTest, BacktraceTypeNameTable) {
TEST(HeapDumpWriterTest, InsignificantValuesNotDumped) {
hash_map<AllocationContext, AllocationMetrics> metrics_by_context;
AllocationContext ctx;
AllocationContext ctx = AllocationContext::Empty();
ctx.backtrace.frames[0] = kBrowserMain;
ctx.backtrace.frames[1] = kCreateWidget;
ctx.backtrace.frame_count = 2;
// 0.5 KiB and 1 chunk in BrowserMain -> CreateWidget itself.
metrics_by_context[ctx] = {512, 1};
// 1 MiB and 1 chunk in BrowserMain -> CreateWidget -> GetBitmap.
ctx.backtrace.frames[2] = kGetBitmap;
ctx.backtrace.frame_count = 3;
metrics_by_context[ctx] = {1024 * 1024, 1};
// 0.5 KiB and 1 chunk in BrowserMain -> CreateWidget -> Initialize.
ctx.backtrace.frames[2] = kInitialize;
ctx.backtrace.frame_count = 3;
metrics_by_context[ctx] = {512, 1};
auto sf_deduplicator = WrapUnique(new StackFrameDeduplicator);
......
......@@ -31,7 +31,7 @@ int StackFrameDeduplicator::Insert(const StackFrame* beginFrame,
std::map<StackFrame, int>* nodes = &roots_;
// Loop through the frames, early out when a frame is null.
for (const StackFrame* it = beginFrame; it != endFrame; it++) {
for (const StackFrame* it = beginFrame; it != endFrame && *it; it++) {
StackFrame frame = *it;
auto node = nodes->find(frame);
......@@ -78,19 +78,7 @@ void StackFrameDeduplicator::AppendAsTraceFormat(std::string* out) const {
out->append(stringify_buffer);
std::unique_ptr<TracedValue> frame_node_value(new TracedValue);
const StackFrame& frame = frame_node->frame;
switch (frame.type) {
case StackFrame::Type::TRACE_EVENT_NAME:
frame_node_value->SetString(
"name", static_cast<const char*>(frame.value));
break;
case StackFrame::Type::THREAD_NAME:
SStringPrintf(&stringify_buffer,
"[Thread: %s]",
static_cast<const char*>(frame.value));
frame_node_value->SetString("name", stringify_buffer);
break;
}
frame_node_value->SetString("name", frame_node->frame);
if (frame_node->parent_frame_index >= 0) {
SStringPrintf(&stringify_buffer, "%d", frame_node->parent_frame_index);
frame_node_value->SetString("parent", stringify_buffer);
......
......@@ -16,11 +16,11 @@ namespace trace_event {
// Define all strings once, because the deduplicator requires pointer equality,
// and string interning is unreliable.
StackFrame kBrowserMain = StackFrame::FromTraceEventName("BrowserMain");
StackFrame kRendererMain = StackFrame::FromTraceEventName("RendererMain");
StackFrame kCreateWidget = StackFrame::FromTraceEventName("CreateWidget");
StackFrame kInitialize = StackFrame::FromTraceEventName("Initialize");
StackFrame kMalloc = StackFrame::FromTraceEventName("malloc");
const char kBrowserMain[] = "BrowserMain";
const char kRendererMain[] = "RendererMain";
const char kCreateWidget[] = "CreateWidget";
const char kInitialize[] = "Initialize";
const char kMalloc[] = "malloc";
TEST(StackFrameDeduplicatorTest, SingleBacktrace) {
StackFrame bt[] = {kBrowserMain, kCreateWidget, kMalloc};
......@@ -47,35 +47,6 @@ TEST(StackFrameDeduplicatorTest, SingleBacktrace) {
ASSERT_EQ(iter + 3, dedup->end());
}
TEST(StackFrameDeduplicatorTest, SingleBacktraceWithNull) {
StackFrame null_frame = StackFrame::FromTraceEventName(nullptr);
StackFrame bt[] = {kBrowserMain, null_frame, kMalloc};
// Deduplicator doesn't care about what's inside StackFrames,
// and handles nullptr StackFrame values as any other.
//
// So the call tree should look like this (index in brackets).
//
// BrowserMain [0]
// (null) [1]
// malloc [2]
std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
ASSERT_EQ(2, dedup->Insert(std::begin(bt), std::end(bt)));
auto iter = dedup->begin();
ASSERT_EQ(kBrowserMain, (iter + 0)->frame);
ASSERT_EQ(-1, (iter + 0)->parent_frame_index);
ASSERT_EQ(null_frame, (iter + 1)->frame);
ASSERT_EQ(0, (iter + 1)->parent_frame_index);
ASSERT_EQ(kMalloc, (iter + 2)->frame);
ASSERT_EQ(1, (iter + 2)->parent_frame_index);
ASSERT_EQ(iter + 3, dedup->end());
}
// Test that there can be different call trees (there can be multiple bottom
// frames). Also verify that frames with the same name but a different caller
// are represented as distinct nodes.
......@@ -148,5 +119,17 @@ TEST(StackFrameDeduplicatorTest, Deduplication) {
ASSERT_EQ(dedup->begin() + 3, dedup->end());
}
TEST(StackFrameDeduplicatorTest, NullPaddingIsRemoved) {
StackFrame bt0[] = {kBrowserMain, nullptr, nullptr, nullptr};
std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
// There are four frames in the backtrace, but the null pointers should be
// skipped, so only one frame is inserted, which will have index 0.
ASSERT_EQ(4u, arraysize(bt0));
ASSERT_EQ(0, dedup->Insert(std::begin(bt0), std::end(bt0)));
ASSERT_EQ(dedup->begin() + 1, dedup->end());
}
} // namespace trace_event
} // namespace base
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment