Commit 77f34eb1 authored by Alexei Filippov's avatar Alexei Filippov Committed by Commit Bot

[heap profiler] Drop multithreading support from BacktraceStorage.

The BacktraceStorage object livespan is local to ConnectionManager::HeapProfileRetrieved
and accessed from a single thread.

BUG=923459

Change-Id: I97dfa2642d5efd06461ea69cb833d4a6aba1083d
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1574942
Commit-Queue: Alexei Filippov <alph@chromium.org>
Reviewed-by: default avatarErik Chen <erikchen@chromium.org>
Cr-Commit-Position: refs/heads/master@{#652639}
parent 7a7d5673
......@@ -4,13 +4,10 @@
static_library("heap_profiling") {
sources = [
"address.h",
"allocation_event.cc",
"allocation_event.h",
"backtrace.cc",
"backtrace.h",
"backtrace_storage.cc",
"backtrace_storage.h",
"connection_manager.cc",
"connection_manager.h",
"heap_profiling_service.cc",
......@@ -29,7 +26,7 @@ static_library("heap_profiling") {
source_set("unit_tests") {
testonly = true
sources = [
"backtrace_storage_unittest.cc",
"backtrace_unittest.cc",
"json_exporter_unittest.cc",
]
deps = [
......
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_SERVICES_HEAP_PROFILING_ADDRESS_H_
#define COMPONENTS_SERVICES_HEAP_PROFILING_ADDRESS_H_
#include <stdint.h>
#include <functional>
#include <iosfwd>
#include "base/hash/hash.h"
namespace heap_profiling {
// Wrapper around an address in the instrumented process. This wrapper should
// be a zero-overhead abstraction around a 64-bit integer (so pass by value)
// that prevents getting confused between addresses in the local process and
// ones in the instrumented process.
struct Address {
Address() : value(0) {}
explicit Address(uint64_t v) : value(v) {}
uint64_t value;
bool operator<(Address other) const { return value < other.value; }
bool operator<=(Address other) const { return value <= other.value; }
bool operator>(Address other) const { return value > other.value; }
bool operator>=(Address other) const { return value >= other.value; }
bool operator==(Address other) const { return value == other.value; }
bool operator!=(Address other) const { return value != other.value; }
Address operator+(int64_t delta) const { return Address(value + delta); }
Address operator+=(int64_t delta) {
value += delta;
return *this;
}
Address operator-(int64_t delta) const { return Address(value - delta); }
Address operator-=(int64_t delta) {
value -= delta;
return *this;
}
int64_t operator-(Address a) const { return value - a.value; }
};
} // namespace heap_profiling
namespace std {
template <>
struct hash<heap_profiling::Address> {
typedef heap_profiling::Address argument_type;
typedef uint32_t result_type;
result_type operator()(argument_type a) const {
return base::Hash(&a.value, sizeof(int64_t));
}
};
} // namespace std
#endif // COMPONENTS_SERVICES_HEAP_PROFILING_ADDRESS_H_
......@@ -9,8 +9,7 @@
#include <map>
#include <unordered_set>
#include "components/services/heap_profiling/address.h"
#include "components/services/heap_profiling/backtrace_storage.h"
#include "components/services/heap_profiling/backtrace.h"
#include "components/services/heap_profiling/public/mojom/heap_profiling_client.mojom.h"
namespace heap_profiling {
......@@ -59,7 +58,7 @@ class AllocationEvent {
struct EqualityByAddress {
bool operator()(const AllocationEvent& lhs,
const AllocationEvent& rhs) const {
return lhs.address().value == rhs.address().value;
return lhs.address() == rhs.address();
}
};
......
......@@ -4,33 +4,29 @@
#include "components/services/heap_profiling/backtrace.h"
#include <string.h>
#include <algorithm>
#include <cstring>
#include <utility>
#include "base/hash/hash.h"
#include "components/services/heap_profiling/backtrace_storage.h"
namespace heap_profiling {
namespace {
// TODO(ajwong) replace with a fingerprint capable hash.
size_t ComputeHash(const std::vector<Address>& addrs) {
if (addrs.empty())
return 0;
// Assume Address is a POD containing only the address with no padding.
static_assert(std::is_integral<Address>::value,
"base::Hash call below needs simple type.");
return base::Hash(addrs.data(), addrs.size() * sizeof(Address));
}
} // namespace
Backtrace::Backtrace(std::vector<Address>&& a)
: addrs_(std::move(a)), fingerprint_(ComputeHash(addrs_)) {}
: addrs_(std::move(a)), hash_(ComputeHash(addrs_)) {}
Backtrace::Backtrace(Backtrace&& other) noexcept = default;
Backtrace::~Backtrace() {}
Backtrace::~Backtrace() = default;
Backtrace& Backtrace::operator=(Backtrace&& other) = default;
......
......@@ -6,29 +6,21 @@
#define COMPONENTS_SERVICES_HEAP_PROFILING_BACKTRACE_H_
#include <functional>
#include <unordered_set>
#include <vector>
#include "base/macros.h"
#include "components/services/heap_profiling/address.h"
namespace heap_profiling {
using Address = uint64_t;
// Holds a move-only stack backtrace and a precomputed hash. This backtrace
// uses addresses in the instrumented process. This is in contrast to
// base::StackTrace which is for getting and working with stack traces in the
// current process.
//
// This is immutable since we assume it can be read from multiple threads
// without locking.
//
// This class has a ref_count member which is used by the allocation tracker
// to track references to the stack. The reference counting is managed
// externally. Tracking live objects with a global atom list in a threadsafe
// manner is much more difficult if this class derives from RefCount.
class Backtrace {
public:
// Move-only class. Backtraces should be managed by BacktraceStorage and
// we shouldn't be copying vectors around.
explicit Backtrace(std::vector<Address>&& a);
Backtrace(Backtrace&& other) noexcept;
~Backtrace();
......@@ -39,27 +31,17 @@ class Backtrace {
bool operator!=(const Backtrace& other) const;
const std::vector<Address>& addrs() const { return addrs_; }
size_t fingerprint() const { return fingerprint_; }
size_t hash() const { return hash_; }
private:
friend class BacktraceStorage; // Only BacktraceStorage can do ref counting.
// The reference counting is not threadsafe. it's assumed the
// BacktraceStorage is the only class accessing this, and it's done inside a
// lock.
void AddRef() const { ref_count_++; }
bool Release() const { // Returns whether the result is non-zero.
return !!(--ref_count_);
}
std::vector<Address> addrs_;
size_t fingerprint_;
mutable int ref_count_ = 0;
size_t hash_;
DISALLOW_COPY_AND_ASSIGN(Backtrace);
};
using BacktraceStorage = std::unordered_set<Backtrace>;
} // namespace heap_profiling
namespace std {
......@@ -68,9 +50,7 @@ template <>
struct hash<heap_profiling::Backtrace> {
using argument_type = heap_profiling::Backtrace;
using result_type = size_t;
result_type operator()(const argument_type& s) const {
return s.fingerprint();
}
result_type operator()(const argument_type& s) const { return s.hash(); }
};
} // namespace std
......
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/services/heap_profiling/backtrace_storage.h"
#include "base/logging.h"
#include "components/services/heap_profiling/backtrace.h"
namespace heap_profiling {
namespace {
constexpr size_t kShardCount = 64;
} // namespace
BacktraceStorage::Lock::Lock() : storage_(nullptr) {}
BacktraceStorage::Lock::Lock(BacktraceStorage* storage) : storage_(storage) {
storage_->LockStorage();
}
BacktraceStorage::Lock::Lock(Lock&& other) : storage_(other.storage_) {
other.storage_ = nullptr; // Prevent the other from unlocking.
}
BacktraceStorage::Lock::~Lock() {
if (storage_)
storage_->UnlockStorage();
}
BacktraceStorage::Lock& BacktraceStorage::Lock::operator=(Lock&& other) {
if (storage_)
storage_->UnlockStorage();
storage_ = other.storage_;
other.storage_ = nullptr;
return *this;
}
bool BacktraceStorage::Lock::IsLocked() {
return storage_ != nullptr;
}
BacktraceStorage::BacktraceStorage() : shards_(kShardCount) {}
BacktraceStorage::~BacktraceStorage() {}
const Backtrace* BacktraceStorage::Insert(std::vector<Address>&& bt) {
Backtrace backtrace(std::move(bt));
size_t shard_index = backtrace.fingerprint() % kShardCount;
ContainerShard& shard = shards_[shard_index];
base::AutoLock lock(shard.lock);
auto iter = shard.backtraces.insert(std::move(backtrace)).first;
iter->AddRef();
return &*iter;
}
void BacktraceStorage::Free(const Backtrace* bt) {
size_t shard_index = bt->fingerprint() % kShardCount;
ContainerShard& shard = shards_[shard_index];
base::AutoLock lock(shard.lock);
if (shard.consumer_count) {
shard.release_after_lock.push_back(bt);
} else {
if (!bt->Release())
shard.backtraces.erase(*bt);
}
}
void BacktraceStorage::Free(const std::vector<const Backtrace*>& bts) {
// Separate backtraces by shard using the fingerprint.
std::vector<const Backtrace*> backtraces_by_shard[kShardCount];
for (size_t i = 0; i < kShardCount; ++i) {
backtraces_by_shard[i].reserve(bts.size() / kShardCount + 1);
}
for (const Backtrace* bt : bts) {
size_t shard_index = bt->fingerprint() % kShardCount;
backtraces_by_shard[shard_index].push_back(bt);
}
for (size_t i = 0; i < kShardCount; ++i) {
ContainerShard& shard = shards_[i];
base::AutoLock lock(shard.lock);
if (shard.consumer_count) {
shard.release_after_lock.insert(shard.release_after_lock.end(),
backtraces_by_shard[i].begin(),
backtraces_by_shard[i].end());
} else {
ReleaseBacktracesLocked(backtraces_by_shard[i], i);
}
}
}
void BacktraceStorage::LockStorage() {
for (size_t i = 0; i < kShardCount; ++i) {
base::AutoLock lock(shards_[i].lock);
shards_[i].consumer_count++;
}
}
void BacktraceStorage::UnlockStorage() {
for (size_t i = 0; i < kShardCount; ++i) {
ContainerShard& shard = shards_[i];
base::AutoLock lock(shard.lock);
DCHECK(shard.consumer_count > 0);
shard.consumer_count--;
if (shard.consumer_count == 0) {
ReleaseBacktracesLocked(shard.release_after_lock, i);
shard.release_after_lock.clear();
shard.release_after_lock.shrink_to_fit();
}
}
}
void BacktraceStorage::ReleaseBacktracesLocked(
const std::vector<const Backtrace*>& bts,
size_t shard_index) {
ContainerShard& shard = shards_[shard_index];
shard.lock.AssertAcquired();
DCHECK_EQ(0, shard.consumer_count);
for (const Backtrace* bt : bts) {
if (!bt->Release())
shard.backtraces.erase(*bt);
}
}
BacktraceStorage::ContainerShard::ContainerShard() = default;
BacktraceStorage::ContainerShard::~ContainerShard() = default;
} // namespace heap_profiling
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_SERVICES_HEAP_PROFILING_BACKTRACE_STORAGE_H_
#define COMPONENTS_SERVICES_HEAP_PROFILING_BACKTRACE_STORAGE_H_
#include <unordered_set>
#include <vector>
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
#include "components/services/heap_profiling/backtrace.h"
namespace heap_profiling {
// Backtraces are stored effectively as atoms, and this class is the backing
// store for the atoms. When you insert a backtrace, it will get de-duped with
// existing ones, one refcount added, and returned. When you're done with a
// backtrace, call Free() which will release the refcount. This may or may not
// release the underlying Backtrace itself, depending on whether other refs are
// held.
//
// This class is threadsafe.
class BacktraceStorage {
public:
// Instantiating this lock will prevent backtraces from being deleted from
// the strorage for as long as it's alive. This class is moveable but not
// copyable.
class Lock {
public:
Lock(); // Doesn't take the lock.
explicit Lock(BacktraceStorage* storage); // Takes the lock.
Lock(const Lock&) = delete;
Lock(Lock&&);
~Lock();
Lock& operator=(Lock&& other);
Lock& operator=(const Lock&) = delete;
bool IsLocked();
private:
BacktraceStorage* storage_; // May be null if moved from.
};
BacktraceStorage();
~BacktraceStorage();
// Adds the given backtrace to the storage and returns a key to it. If a
// matching backtrace already exists, a key to the existing one will be
// returned.
//
// The returned key will have a reference count associated with it, call
// Free when the key is no longer needed.
const Backtrace* Insert(std::vector<Address>&& bt);
// Frees one reference to a backtrace.
void Free(const Backtrace* bt);
void Free(const std::vector<const Backtrace*>& bts);
private:
friend Lock;
using Container = std::unordered_set<Backtrace>;
// Called by the BacktraceStorage::Lock class.
void LockStorage();
void UnlockStorage();
// Releases all backtraces in the vector assuming |lock| is already held
// and |consumer_count| is zero.
void ReleaseBacktracesLocked(const std::vector<const Backtrace*>& bts,
size_t shard_index);
struct ContainerShard {
ContainerShard();
~ContainerShard();
// Container of de-duped, live backtraces. All modifications to |backtraces|
// or the Backtrace elements owned by |backtraces| must be protected by
// |lock|.
Container backtraces;
mutable base::Lock lock;
// Protected by |lock|. This indicates the number of consumers that have
// raw backtrace pointers owned by |backtraces|. As long as this count is
// non-zero, Backtraces owned by |backtraces| cannot be modified or
// destroyed. Elements can be inserted into |backtraces| even when this is
// non-zero because existing raw backtrace pointers are stable.
int consumer_count = 0;
// When |consumer_count| is non-zero, no backtraces will be deleted from
// the storage. Instead, they are accumulated here for releasing after
// consumer_count becomes non-zero.
std::vector<const Backtrace*> release_after_lock;
DISALLOW_COPY_AND_ASSIGN(ContainerShard);
};
// Backtraces are sharded by fingerprint to reduce lock contention.
std::vector<ContainerShard> shards_;
DISALLOW_COPY_AND_ASSIGN(BacktraceStorage);
};
} // namespace heap_profiling
#endif // COMPONENTS_SERVICES_HEAP_PROFILING_BACKTRACE_STORAGE_H_
......@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/services/heap_profiling/backtrace_storage.h"
#include "components/services/heap_profiling/backtrace.h"
#include <vector>
......@@ -21,7 +21,7 @@ TEST(BacktraceStorage, KeyStability) {
// Each backtrace should contain its index as the only stack entry.
std::vector<Address> addrs;
addrs.push_back(Address(i));
traces.push_back(storage.Insert(std::move(addrs)));
traces.push_back(&*storage.insert(Backtrace(std::move(addrs))).first);
}
// Validate the backtraces are still valid.
......
......@@ -18,11 +18,6 @@ const size_t kMinSizeThreshold = 16 * 1024;
const size_t kMinCountThreshold = 1024;
} // namespace
ConnectionManager::DumpArgs::DumpArgs() = default;
ConnectionManager::DumpArgs::DumpArgs(DumpArgs&& other) noexcept
: backtrace_storage_lock(std::move(other.backtrace_storage_lock)) {}
ConnectionManager::DumpArgs::~DumpArgs() = default;
// Tracking information for DumpProcessForTracing(). This struct is
// refcounted since there will be many background thread calls (one for each
// AllocationTracker) and the callback is only issued when each has
......@@ -31,10 +26,7 @@ ConnectionManager::DumpArgs::~DumpArgs() = default;
// This class is not threadsafe, its members must only be accessed on the
// I/O thread.
struct ConnectionManager::DumpProcessesForTracingTracking
: public ConnectionManager::DumpArgs,
public base::RefCountedThreadSafe<DumpProcessesForTracingTracking> {
DumpProcessesForTracingTracking() = default;
: public base::RefCountedThreadSafe<DumpProcessesForTracingTracking> {
// Number of processes we're still waiting on responses for. When this gets
// to 0, the callback will be issued.
size_t waiting_responses = 0;
......@@ -174,8 +166,6 @@ void ConnectionManager::DumpProcessesForTracing(
}
auto tracking = base::MakeRefCounted<DumpProcessesForTracingTracking>();
tracking->backtrace_storage_lock =
BacktraceStorage::Lock(&backtrace_storage_);
tracking->waiting_responses = connections_.size();
tracking->callback = std::move(callback);
tracking->vm_regions = std::move(vm_regions);
......@@ -202,7 +192,6 @@ void ConnectionManager::HeapProfileRetrieved(
ContextMap context_map;
AddressToStringMap string_map;
BacktraceStorage backtrace_storage;
BacktraceStorage::Lock backtrace_storage_lock(&backtrace_storage);
bool success = true;
for (const mojom::HeapProfileSamplePtr& sample : profile->samples) {
......@@ -223,8 +212,9 @@ void ConnectionManager::HeapProfileRetrieved(
static_cast<int>(context_map.size() + 1))
.first->second;
}
const Backtrace* backtrace = backtrace_storage.Insert(
std::vector<Address>(sample->stack.begin(), sample->stack.end()));
std::vector<Address> stack(sample->stack.begin(), sample->stack.end());
const Backtrace* backtrace =
&*backtrace_storage.insert(Backtrace(std::move(stack))).first;
AllocationEvent alloc(sample->allocator, Address(0), sample->size,
backtrace, context_id);
++counts[alloc];
......@@ -266,15 +256,8 @@ void ConnectionManager::DoDumpOneProcessForTracing(
return;
}
CHECK(tracking->backtrace_storage_lock.IsLocked());
ExportParams params;
params.allocs = std::move(counts);
auto it = tracking->vm_regions.find(pid);
if (it != tracking->vm_regions.end()) {
params.maps = std::move(it->second);
}
params.context_map = std::move(context);
params.mapped_strings = std::move(mapped_strings);
params.process_type = process_type;
......@@ -284,6 +267,10 @@ void ConnectionManager::DoDumpOneProcessForTracing(
params.next_id = next_id_;
params.sampling_rate = sampling_rate;
auto it = tracking->vm_regions.find(pid);
if (it != tracking->vm_regions.end())
params.maps = std::move(it->second);
std::ostringstream oss;
ExportMemoryMapsAndV2StackTraceToJSON(&params, oss);
std::string reply = oss.str();
......
......@@ -19,7 +19,6 @@
#include "base/timer/timer.h"
#include "build/build_config.h"
#include "components/services/heap_profiling/allocation_event.h"
#include "components/services/heap_profiling/backtrace_storage.h"
#include "components/services/heap_profiling/public/mojom/heap_profiling_service.mojom.h"
#include "services/resource_coordinator/public/mojom/memory_instrumentation/memory_instrumentation.mojom.h"
......@@ -47,22 +46,6 @@ class ConnectionManager {
ConnectionManager();
~ConnectionManager();
// Shared types for the dump-type-specific args structures.
struct DumpArgs {
DumpArgs();
DumpArgs(DumpArgs&&) noexcept;
~DumpArgs();
private:
friend ConnectionManager;
// This lock keeps the backtrace atoms alive throughout the dumping
// process. It will be initialized by DumpProcess.
BacktraceStorage::Lock backtrace_storage_lock;
DISALLOW_COPY_AND_ASSIGN(DumpArgs);
};
// Dumping is asynchronous so will not be complete when this function
// returns. The dump is complete when the callback provided in the args is
// fired.
......@@ -110,8 +93,6 @@ class ConnectionManager {
// Reports the ProcessTypes of the processes being profiled.
void ReportMetrics();
BacktraceStorage backtrace_storage_;
// The next ID to use when exporting a heap dump.
size_t next_id_ = 1;
......
......@@ -47,9 +47,8 @@ struct BacktraceNode {
using BacktraceTable = std::map<BacktraceNode, size_t>;
// Used as a temporary map key to uniquify an allocation with a given context
// and stack. A lock is held when dumping bactraces that guarantees that no
// Backtraces will be created or destroyed during the lifetime of that
// structure. Therefore it's safe to use a raw pointer Since backtraces are
// and stack. No backtraces are created or destroyed during the lifetime of that
// structure. Therefore it's safe to use a raw pointer since backtraces are
// uniquified, this does pointer comparisons on the backtrace to give a stable
// ordering, even if that ordering has no intrinsic meaning.
struct UniqueAlloc {
......@@ -219,9 +218,9 @@ size_t AppendBacktraceStrings(const Backtrace& backtrace,
ExportParams* params) {
int parent = -1;
// Addresses must be outputted in reverse order.
for (const Address& addr : base::Reversed(backtrace.addrs())) {
for (const Address addr : base::Reversed(backtrace.addrs())) {
size_t sid;
auto it = params->mapped_strings.find(addr.value);
auto it = params->mapped_strings.find(addr);
if (it != params->mapped_strings.end()) {
sid = AddOrGetString(it->second, string_table, params);
} else {
......@@ -231,10 +230,9 @@ size_t AppendBacktraceStrings(const Backtrace& backtrace,
// Adding to sizeof(kPcPrefix) yields the buffer size needed including the
// null terminator.
static constexpr int kBufSize =
sizeof(kPcPrefix) +
(std::numeric_limits<decltype(addr.value)>::digits / 4);
sizeof(kPcPrefix) + std::numeric_limits<decltype(addr)>::digits / 4;
char buf[kBufSize];
snprintf(buf, kBufSize, "%s%" PRIx64, kPcPrefix, addr.value);
snprintf(buf, kBufSize, "%s%" PRIx64, kPcPrefix, addr);
sid = AddOrGetString(buf, string_table, params);
}
parent = AddOrGetBacktraceNode(BacktraceNode(sid, parent), backtrace_table,
......@@ -245,7 +243,7 @@ size_t AppendBacktraceStrings(const Backtrace& backtrace,
// Writes the string table which looks like:
// "strings":[
// {"id":123,string:"This is the string"},
// {"id":123,"string":"This is the string"},
// ...
// ]
void WriteStrings(const StringTable& string_table, std::ostream& out) {
......
......@@ -8,7 +8,6 @@
#include <iosfwd>
#include <vector>
#include "base/values.h"
#include "components/services/heap_profiling/allocation_event.h"
#include "components/services/heap_profiling/public/mojom/heap_profiling_service.mojom.h"
#include "services/resource_coordinator/public/mojom/memory_instrumentation/memory_instrumentation.mojom.h"
......
......@@ -13,7 +13,7 @@
#include "base/strings/string_number_conversions.h"
#include "base/values.h"
#include "build/build_config.h"
#include "components/services/heap_profiling/backtrace_storage.h"
#include "components/services/heap_profiling/backtrace.h"
#include "services/resource_coordinator/public/cpp/memory_instrumentation/os_metrics.h"
#include "testing/gtest/include/gtest/gtest.h"
......@@ -128,21 +128,22 @@ bool IsBacktraceInList(const base::Value* backtraces, int id, int parent) {
return false;
}
const Backtrace* InsertBacktrace(BacktraceStorage& storage,
std::vector<Address> addrs) {
return &*storage.insert(Backtrace(std::move(addrs))).first;
}
} // namespace
TEST(ProfilingJsonExporterTest, Simple) {
BacktraceStorage backtrace_storage;
std::vector<Address> stack1;
stack1.push_back(Address(0x5678));
stack1.push_back(Address(0x1234));
const Backtrace* bt1 = backtrace_storage.Insert(std::move(stack1));
std::vector<Address> stack1{Address(0x5678), Address(0x1234)};
const Backtrace* bt1 = InsertBacktrace(backtrace_storage, std::move(stack1));
std::vector<Address> stack2;
stack2.push_back(Address(0x9013));
stack2.push_back(Address(0x9012));
stack2.push_back(Address(0x1234));
const Backtrace* bt2 = backtrace_storage.Insert(std::move(stack2));
std::vector<Address> stack2{Address(0x9013), Address(0x9012),
Address(0x1234)};
const Backtrace* bt2 = InsertBacktrace(backtrace_storage, std::move(stack2));
AllocationEventSet events;
events.insert(
......@@ -301,9 +302,8 @@ TEST(ProfilingJsonExporterTest, Sampling) {
BacktraceStorage backtrace_storage;
std::vector<Address> stack1;
stack1.push_back(Address(0x5678));
const Backtrace* bt1 = backtrace_storage.Insert(std::move(stack1));
std::vector<Address> stack1{Address(0x5678)};
const Backtrace* bt1 = InsertBacktrace(backtrace_storage, std::move(stack1));
AllocationEventSet events;
events.insert(AllocationEvent(AllocatorType::kMalloc, Address(0x1),
......@@ -356,17 +356,14 @@ TEST(ProfilingJsonExporterTest, Sampling) {
TEST(ProfilingJsonExporterTest, SimpleWithFilteredAllocations) {
BacktraceStorage backtrace_storage;
std::vector<Address> stack1;
stack1.push_back(Address(0x1234));
const Backtrace* bt1 = backtrace_storage.Insert(std::move(stack1));
std::vector<Address> stack1{Address(0x1234)};
const Backtrace* bt1 = InsertBacktrace(backtrace_storage, std::move(stack1));
std::vector<Address> stack2;
stack2.push_back(Address(0x5678));
const Backtrace* bt2 = backtrace_storage.Insert(std::move(stack2));
std::vector<Address> stack2{Address(0x5678)};
const Backtrace* bt2 = InsertBacktrace(backtrace_storage, std::move(stack2));
std::vector<Address> stack3;
stack3.push_back(Address(0x9999));
const Backtrace* bt3 = backtrace_storage.Insert(std::move(stack3));
std::vector<Address> stack3{Address(0x9999)};
const Backtrace* bt3 = InsertBacktrace(backtrace_storage, std::move(stack3));
AllocationEventSet events;
events.insert(
......@@ -493,9 +490,8 @@ TEST(ProfilingJsonExporterTest, Context) {
BacktraceStorage backtrace_storage;
ExportParams params;
std::vector<Address> stack;
stack.push_back(Address(0x1234));
const Backtrace* bt = backtrace_storage.Insert(std::move(stack));
std::vector<Address> stack{Address(0x1234)};
const Backtrace* bt = InsertBacktrace(backtrace_storage, std::move(stack));
std::string context_str1("Context 1");
int context_id1 = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment