Commit 3c600ba3 authored by Vlad Tsyrklevich's avatar Vlad Tsyrklevich Committed by Commit Bot

GWP-ASan: Teach allocator about PartitionAlloc

A core security guarantee of PartitionAlloc is that allocations of
different types are never given overlapping allocations. In order to
maintain this security guarantee, the GuardedPageAllocator needs to
taught to know when it is used to back PartitionAlloc, and passed in the
types on Allocate(). In those cases it maintains a PartitionAlloc
specific free list that is aware of what types have been previously used
for particular slots.

Bug: 956824
Change-Id: I77e596f29d29cc7f88b9e838b8bae891037bafcb
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1622477Reviewed-by: default avatarVitaly Buka <vitalybuka@chromium.org>
Commit-Queue: Vitaly Buka <vitalybuka@chromium.org>
Auto-Submit: Vlad Tsyrklevich <vtsyrklevich@chromium.org>
Cr-Commit-Position: refs/heads/master@{#661994}
parent 24c4e1c0
......@@ -65,12 +65,16 @@ void GuardedPageAllocator::SimpleFreeList<T>::Initialize(T max_entries) {
}
template <typename T>
T GuardedPageAllocator::SimpleFreeList<T>::Allocate() {
if (num_used_entries_ < max_entries_)
return num_used_entries_++;
bool GuardedPageAllocator::SimpleFreeList<T>::Allocate(T* out,
const char* type) {
if (num_used_entries_ < max_entries_) {
*out = num_used_entries_++;
return true;
}
DCHECK_LE(free_list_.size(), max_entries_);
return RandomEviction(&free_list_);
*out = RandomEviction(&free_list_);
return true;
}
template <typename T>
......@@ -79,12 +83,47 @@ void GuardedPageAllocator::SimpleFreeList<T>::Free(T entry) {
free_list_.push_back(entry);
}
GuardedPageAllocator::PartitionAllocSlotFreeList::PartitionAllocSlotFreeList() =
default;
GuardedPageAllocator::PartitionAllocSlotFreeList::
~PartitionAllocSlotFreeList() = default;
void GuardedPageAllocator::PartitionAllocSlotFreeList::Initialize(
AllocatorState::SlotIdx max_entries) {
max_entries_ = max_entries;
type_mapping_.reserve(max_entries);
}
bool GuardedPageAllocator::PartitionAllocSlotFreeList::Allocate(
AllocatorState::SlotIdx* out,
const char* type) {
if (num_used_entries_ < max_entries_) {
type_mapping_[num_used_entries_] = type;
*out = num_used_entries_++;
return true;
}
if (!free_list_.count(type) || free_list_[type].empty())
return false;
DCHECK_LE(free_list_[type].size(), max_entries_);
*out = RandomEviction(&free_list_[type]);
return true;
}
void GuardedPageAllocator::PartitionAllocSlotFreeList::Free(
AllocatorState::SlotIdx entry) {
DCHECK_LT(entry, num_used_entries_);
free_list_[type_mapping_[entry]].push_back(entry);
}
GuardedPageAllocator::GuardedPageAllocator() {}
void GuardedPageAllocator::Init(size_t max_alloced_pages,
size_t num_metadata,
size_t total_pages,
OutOfMemoryCallback oom_callback) {
OutOfMemoryCallback oom_callback,
bool is_partition_alloc) {
CHECK_GT(max_alloced_pages, 0U);
CHECK_LE(max_alloced_pages, num_metadata);
CHECK_LE(num_metadata, AllocatorState::kMaxMetadata);
......@@ -94,6 +133,7 @@ void GuardedPageAllocator::Init(size_t max_alloced_pages,
state_.num_metadata = num_metadata;
state_.total_pages = total_pages;
oom_callback_ = std::move(oom_callback);
is_partition_alloc_ = is_partition_alloc;
state_.page_size = base::GetPageSize();
......@@ -110,7 +150,11 @@ void GuardedPageAllocator::Init(size_t max_alloced_pages,
// there should be no risk of a race here.
base::AutoLock lock(lock_);
free_metadata_.Initialize(num_metadata);
free_slots_.Initialize(total_pages);
if (is_partition_alloc_)
free_slots_ = std::make_unique<PartitionAllocSlotFreeList>();
else
free_slots_ = std::make_unique<SimpleFreeList<AllocatorState::SlotIdx>>();
free_slots_->Initialize(total_pages);
}
slot_to_metadata_idx_.resize(total_pages);
......@@ -129,7 +173,12 @@ GuardedPageAllocator::~GuardedPageAllocator() {
UnmapRegion();
}
void* GuardedPageAllocator::Allocate(size_t size, size_t align) {
void* GuardedPageAllocator::Allocate(size_t size,
size_t align,
const char* type) {
if (!is_partition_alloc_)
DCHECK_EQ(type, nullptr);
if (!size || size > state_.page_size || align > state_.page_size)
return nullptr;
......@@ -143,7 +192,7 @@ void* GuardedPageAllocator::Allocate(size_t size, size_t align) {
AllocatorState::SlotIdx free_slot;
AllocatorState::MetadataIdx free_metadata;
if (!ReserveSlotAndMetadata(&free_slot, &free_metadata))
if (!ReserveSlotAndMetadata(&free_slot, &free_metadata, type))
return nullptr;
uintptr_t free_page = state_.SlotToAddr(free_slot);
......@@ -232,11 +281,13 @@ size_t GuardedPageAllocator::RegionSize() const {
bool GuardedPageAllocator::ReserveSlotAndMetadata(
AllocatorState::SlotIdx* slot,
AllocatorState::MetadataIdx* metadata_idx) {
AllocatorState::MetadataIdx* metadata_idx,
const char* type) {
base::AutoLock lock(lock_);
if (num_alloced_pages_ == max_alloced_pages_) {
if (++consecutive_failed_allocations_ == kOutOfMemoryCount) {
if (!oom_hit_) {
if (num_alloced_pages_ == max_alloced_pages_ ||
!free_slots_->Allocate(slot, type)) {
if (!oom_hit_) {
if (++consecutive_failed_allocations_ == kOutOfMemoryCount) {
oom_hit_ = true;
base::AutoUnlock unlock(lock_);
std::move(oom_callback_).Run(total_allocations_ - kOutOfMemoryCount);
......@@ -245,8 +296,7 @@ bool GuardedPageAllocator::ReserveSlotAndMetadata(
return false;
}
*slot = free_slots_.Allocate();
*metadata_idx = free_metadata_.Allocate();
CHECK(free_metadata_.Allocate(metadata_idx, nullptr));
if (metadata_[*metadata_idx].alloc_ptr) {
// Overwrite the outdated slot_to_metadata_idx mapping from the previous use
// of this metadata if it's still valid.
......@@ -269,7 +319,7 @@ void GuardedPageAllocator::FreeSlotAndMetadata(
DCHECK_LT(metadata_idx, state_.num_metadata);
base::AutoLock lock(lock_);
free_slots_.Free(slot);
free_slots_->Free(slot);
free_metadata_.Free(metadata_idx);
DCHECK_GT(num_alloced_pages_, 0U);
......
......@@ -6,6 +6,7 @@
#define COMPONENTS_GWP_ASAN_CLIENT_GUARDED_PAGE_ALLOCATOR_H_
#include <atomic>
#include <map>
#include <memory>
#include <string>
#include <vector>
......@@ -55,7 +56,8 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
void Init(size_t max_alloced_pages,
size_t num_metadata,
size_t total_pages,
OutOfMemoryCallback oom_callback);
OutOfMemoryCallback oom_callback,
bool is_partition_alloc);
// On success, returns a pointer to size bytes of page-guarded memory. On
// failure, returns nullptr. The allocation is not guaranteed to be
......@@ -66,8 +68,10 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
// It must be less than or equal to the allocation size. If it's left as zero
// it will default to the default alignment the allocator chooses.
//
// The type parameter should only be set for PartitionAlloc allocations.
//
// Preconditions: Init() must have been called.
void* Allocate(size_t size, size_t align = 0);
void* Allocate(size_t size, size_t align = 0, const char* type = nullptr);
// Deallocates memory pointed to by ptr. ptr must have been previously
// returned by a call to Allocate.
......@@ -87,17 +91,29 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
}
private:
// Virtual base class representing a free list of entries T.
template <typename T>
class FreeList {
public:
FreeList() = default;
virtual ~FreeList() = default;
virtual void Initialize(T max_entries) = 0;
virtual bool Allocate(T* out, const char* type) = 0;
virtual void Free(T entry) = 0;
};
// Manages a free list of slot or metadata indices in the range
// [0, max_entries). Access to SimpleFreeList objects must be synchronized.
//
// SimpleFreeList is specifically designed to pre-allocate data in Initialize
// so that it never recurses into malloc/free during Allocate/Free.
template <typename T>
class SimpleFreeList {
class SimpleFreeList : public FreeList<T> {
public:
void Initialize(T max_entries);
T Allocate();
void Free(T entry);
~SimpleFreeList() final = default;
void Initialize(T max_entries) final;
bool Allocate(T* out, const char* type) final;
void Free(T entry) final;
private:
std::vector<T> free_list_;
......@@ -108,6 +124,36 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
T max_entries_ = 0;
};
// Manages a free list of slot indices especially for PartitionAlloc.
// Allocate() is type-aware so that once a page has been used to allocate
// a given partition, it never reallocates an object of a different type on
// that page. Access to this object must be synchronized.
//
// PartitionAllocSlotFreeList can perform malloc/free during Allocate/Free,
// so it is not safe to use with malloc hooks!
//
// TODO(vtsyrklevich): Right now we allocate slots to partitions on a
// first-come first-serve basis, this makes it likely that all slots will be
// used up by common types first. Set aside a fixed amount of slots (~5%) for
// one-off partitions so that we make sure to sample rare types as well.
class PartitionAllocSlotFreeList : public FreeList<AllocatorState::SlotIdx> {
public:
PartitionAllocSlotFreeList();
~PartitionAllocSlotFreeList() final;
void Initialize(AllocatorState::SlotIdx max_entries) final;
bool Allocate(AllocatorState::SlotIdx* out, const char* type) final;
void Free(AllocatorState::SlotIdx entry) final;
private:
std::vector<const char*> type_mapping_;
std::map<const char*, std::vector<AllocatorState::SlotIdx>> free_list_;
// Number of used entries. This counter ensures all free entries are used
// before starting to use random eviction.
AllocatorState::SlotIdx num_used_entries_ = 0;
AllocatorState::SlotIdx max_entries_ = 0;
};
// Unmaps memory allocated by this class, if Init was called.
~GuardedPageAllocator();
......@@ -128,8 +174,8 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
// On success, returns true and writes the reserved indices to |slot| and
// |metadata_idx|. Otherwise returns false if no allocations are available.
bool ReserveSlotAndMetadata(AllocatorState::SlotIdx* slot,
AllocatorState::MetadataIdx* metadata_idx)
LOCKS_EXCLUDED(lock_);
AllocatorState::MetadataIdx* metadata_idx,
const char* type) LOCKS_EXCLUDED(lock_);
// Marks the specified slot and metadata as unreserved.
void FreeSlotAndMetadata(AllocatorState::SlotIdx slot,
......@@ -151,7 +197,8 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
// Lock that synchronizes allocating/freeing slots between threads.
base::Lock lock_;
SimpleFreeList<AllocatorState::SlotIdx> free_slots_ GUARDED_BY(lock_);
std::unique_ptr<FreeList<AllocatorState::SlotIdx>> free_slots_
GUARDED_BY(lock_);
SimpleFreeList<AllocatorState::MetadataIdx> free_metadata_ GUARDED_BY(lock_);
// Number of currently-allocated pages.
......@@ -174,11 +221,13 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
bool oom_hit_ GUARDED_BY(lock_) = false;
OutOfMemoryCallback oom_callback_;
bool is_partition_alloc_ = false;
// Required for a singleton to access the constructor.
friend base::NoDestructor<GuardedPageAllocator>;
friend class BaseGpaTest;
friend class CrashAnalyzerTest;
friend class GuardedPageAllocatorTest;
FRIEND_TEST_ALL_PREFIXES(CrashAnalyzerTest, InternalError);
FRIEND_TEST_ALL_PREFIXES(CrashAnalyzerTest, StackTraceCollection);
FRIEND_TEST_ALL_PREFIXES(GuardedPageAllocatorTest,
......
......@@ -4,6 +4,7 @@
#include "components/gwp_asan/client/guarded_page_allocator.h"
#include <algorithm>
#include <array>
#include <set>
#include <utility>
......@@ -23,14 +24,24 @@ namespace internal {
static constexpr size_t kMaxMetadata = AllocatorState::kMaxMetadata;
static constexpr size_t kMaxSlots = AllocatorState::kMaxSlots;
class GuardedPageAllocatorTest : public testing::Test {
class BaseGpaTest : public testing::Test {
protected:
explicit GuardedPageAllocatorTest(size_t max_allocated_pages = kMaxMetadata) {
BaseGpaTest(size_t max_allocated_pages, bool is_partition_alloc) {
gpa_.Init(max_allocated_pages, kMaxMetadata, kMaxSlots,
base::BindLambdaForTesting(
[&](size_t allocations) { allocator_oom_ = true; }));
[&](size_t allocations) { allocator_oom_ = true; }),
is_partition_alloc);
}
GuardedPageAllocator gpa_;
bool allocator_oom_ = false;
};
class GuardedPageAllocatorTest : public BaseGpaTest,
public testing::WithParamInterface<bool> {
protected:
GuardedPageAllocatorTest() : BaseGpaTest(kMaxMetadata, GetParam()) {}
// Get a left- or right- aligned allocation (or nullptr on error.)
char* GetAlignedAllocation(bool left_aligned, size_t sz, size_t align = 0) {
for (size_t i = 0; i < 100; i++) {
......@@ -61,12 +72,13 @@ class GuardedPageAllocatorTest : public testing::Test {
return reinterpret_cast<uintptr_t>(buf) & page_mask;
}
GuardedPageAllocator gpa_;
bool allocator_oom_ = false;
};
TEST_F(GuardedPageAllocatorTest, SingleAllocDealloc) {
INSTANTIATE_TEST_SUITE_P(VaryPartitionAlloc,
GuardedPageAllocatorTest,
testing::Values(false, true));
TEST_P(GuardedPageAllocatorTest, SingleAllocDealloc) {
char* buf = reinterpret_cast<char*>(gpa_.Allocate(base::GetPageSize()));
EXPECT_NE(buf, nullptr);
EXPECT_TRUE(gpa_.PointerIsMine(buf));
......@@ -77,14 +89,14 @@ TEST_F(GuardedPageAllocatorTest, SingleAllocDealloc) {
EXPECT_DEATH(gpa_.Deallocate(buf), "");
}
TEST_F(GuardedPageAllocatorTest, CrashOnBadDeallocPointer) {
TEST_P(GuardedPageAllocatorTest, CrashOnBadDeallocPointer) {
EXPECT_DEATH(gpa_.Deallocate(nullptr), "");
char* buf = reinterpret_cast<char*>(gpa_.Allocate(8));
EXPECT_DEATH(gpa_.Deallocate(buf + 1), "");
gpa_.Deallocate(buf);
}
TEST_F(GuardedPageAllocatorTest, PointerIsMine) {
TEST_P(GuardedPageAllocatorTest, PointerIsMine) {
void* buf = gpa_.Allocate(1);
auto malloc_ptr = std::make_unique<char>();
EXPECT_TRUE(gpa_.PointerIsMine(buf));
......@@ -95,7 +107,7 @@ TEST_F(GuardedPageAllocatorTest, PointerIsMine) {
EXPECT_FALSE(gpa_.PointerIsMine(malloc_ptr.get()));
}
TEST_F(GuardedPageAllocatorTest, GetRequestedSize) {
TEST_P(GuardedPageAllocatorTest, GetRequestedSize) {
void* buf = gpa_.Allocate(100);
EXPECT_EQ(gpa_.GetRequestedSize(buf), 100U);
#if !defined(OS_MACOSX)
......@@ -105,7 +117,7 @@ TEST_F(GuardedPageAllocatorTest, GetRequestedSize) {
#endif
}
TEST_F(GuardedPageAllocatorTest, LeftAlignedAllocation) {
TEST_P(GuardedPageAllocatorTest, LeftAlignedAllocation) {
char* buf = GetAlignedAllocation(true, 16);
ASSERT_NE(buf, nullptr);
EXPECT_DEATH(buf[-1] = 'A', "");
......@@ -114,7 +126,7 @@ TEST_F(GuardedPageAllocatorTest, LeftAlignedAllocation) {
gpa_.Deallocate(buf);
}
TEST_F(GuardedPageAllocatorTest, RightAlignedAllocation) {
TEST_P(GuardedPageAllocatorTest, RightAlignedAllocation) {
char* buf =
GetAlignedAllocation(false, GuardedPageAllocator::kGpaAllocAlignment);
ASSERT_NE(buf, nullptr);
......@@ -124,7 +136,7 @@ TEST_F(GuardedPageAllocatorTest, RightAlignedAllocation) {
gpa_.Deallocate(buf);
}
TEST_F(GuardedPageAllocatorTest, AllocationAlignment) {
TEST_P(GuardedPageAllocatorTest, AllocationAlignment) {
const uintptr_t page_size = base::GetPageSize();
EXPECT_EQ(GetRightAlignedAllocationOffset(9, 1), page_size - 9);
......@@ -144,7 +156,7 @@ TEST_F(GuardedPageAllocatorTest, AllocationAlignment) {
EXPECT_EQ(GetAlignedAllocation(false, 5, page_size * 2), nullptr);
}
TEST_F(GuardedPageAllocatorTest, OutOfMemoryCallback) {
TEST_P(GuardedPageAllocatorTest, OutOfMemoryCallback) {
for (size_t i = 0; i < kMaxMetadata; i++)
EXPECT_NE(gpa_.Allocate(1), nullptr);
......@@ -156,10 +168,10 @@ TEST_F(GuardedPageAllocatorTest, OutOfMemoryCallback) {
}
class GuardedPageAllocatorParamTest
: public GuardedPageAllocatorTest,
: public BaseGpaTest,
public testing::WithParamInterface<size_t> {
protected:
GuardedPageAllocatorParamTest() : GuardedPageAllocatorTest(GetParam()) {}
GuardedPageAllocatorParamTest() : BaseGpaTest(GetParam(), false) {}
};
TEST_P(GuardedPageAllocatorParamTest, AllocDeallocAllPages) {
......@@ -196,9 +208,8 @@ INSTANTIATE_TEST_SUITE_P(VaryNumPages,
class ThreadedAllocCountDelegate : public base::DelegateSimpleThread::Delegate {
public:
explicit ThreadedAllocCountDelegate(
GuardedPageAllocator* gpa,
std::array<void*, kMaxMetadata>* allocations)
ThreadedAllocCountDelegate(GuardedPageAllocator* gpa,
std::array<void*, kMaxMetadata>* allocations)
: gpa_(gpa), allocations_(allocations) {}
void Run() override {
......@@ -216,7 +227,7 @@ class ThreadedAllocCountDelegate : public base::DelegateSimpleThread::Delegate {
// Test that no pages are double-allocated or left unallocated, and that no
// extra pages are allocated when there's concurrent calls to Allocate().
TEST_F(GuardedPageAllocatorTest, ThreadedAllocCount) {
TEST_P(GuardedPageAllocatorTest, ThreadedAllocCount) {
constexpr size_t num_threads = 2;
std::array<void*, kMaxMetadata> allocations[num_threads];
{
......@@ -277,7 +288,7 @@ class ThreadedHighContentionDelegate
// Test that allocator remains in consistent state under high contention and
// doesn't double-allocate pages or fail to deallocate pages.
TEST_F(GuardedPageAllocatorTest, ThreadedHighContention) {
TEST_P(GuardedPageAllocatorTest, ThreadedHighContention) {
constexpr size_t num_threads = 1000;
{
base::DelegateSimpleThreadPool threads("page_writers", num_threads);
......@@ -298,5 +309,36 @@ TEST_F(GuardedPageAllocatorTest, ThreadedHighContention) {
EXPECT_NE(gpa_.Allocate(1), nullptr);
}
class GuardedPageAllocatorPartitionAllocTest : public BaseGpaTest {
protected:
GuardedPageAllocatorPartitionAllocTest() : BaseGpaTest(kMaxMetadata, true) {}
};
TEST_F(GuardedPageAllocatorPartitionAllocTest,
DifferentPartitionsNeverOverlap) {
constexpr const char* kType1 = "fake type1";
constexpr const char* kType2 = "fake type2";
std::set<void*> type1, type2;
for (size_t i = 0; i < kMaxSlots * 3; i++) {
void* alloc1 = gpa_.Allocate(1, 0, kType1);
ASSERT_NE(alloc1, nullptr);
void* alloc2 = gpa_.Allocate(1, 0, kType2);
ASSERT_NE(alloc2, nullptr);
type1.insert(alloc1);
type2.insert(alloc2);
gpa_.Deallocate(alloc1);
gpa_.Deallocate(alloc2);
}
std::vector<void*> intersection;
std::set_intersection(type1.begin(), type1.end(), type2.begin(), type2.end(),
std::back_inserter(intersection));
EXPECT_EQ(intersection.size(), 0u);
}
} // namespace internal
} // namespace gwp_asan
......@@ -255,7 +255,8 @@ void InstallMallocHooks(size_t max_allocated_pages,
size_t sampling_frequency) {
static crash_reporter::CrashKeyString<24> malloc_crash_key(kMallocCrashKey);
gpa = new GuardedPageAllocator();
gpa->Init(max_allocated_pages, num_metadata, total_pages, base::DoNothing());
gpa->Init(max_allocated_pages, num_metadata, total_pages, base::DoNothing(),
false);
malloc_crash_key.Set(gpa->GetCrashKey());
sampling_state.Init(sampling_frequency);
base::allocator::InsertAllocatorDispatch(&g_allocator_dispatch);
......
......@@ -28,13 +28,6 @@ SamplingState<PARTITIONALLOC> sampling_state;
// for every access.
GuardedPageAllocator* gpa = nullptr;
// TODO(vtsyrklevich): PartitionAlloc ensures that different typed allocations
// never overlap. For now, we ensure this property by only allowing one
// allocation for every page. In the future we need to teach the allocator about
// types so that it keeps track and pages can be reused.
size_t allocation_counter = 0;
size_t total_allocations = 0;
bool AllocationHook(void** out, int flags, size_t size, const char* type_name) {
if (UNLIKELY(sampling_state.Sample())) {
// Ignore allocation requests with unknown flags.
......@@ -43,12 +36,7 @@ bool AllocationHook(void** out, int flags, size_t size, const char* type_name) {
if (flags & ~kKnownFlags)
return false;
// Ensure PartitionAlloc types are separated for now.
if (allocation_counter >= total_allocations)
return false;
allocation_counter++;
if (void* allocation = gpa->Allocate(size)) {
if (void* allocation = gpa->Allocate(size, 0, type_name)) {
*out = allocation;
return true;
}
......@@ -86,10 +74,10 @@ void InstallPartitionAllocHooks(size_t max_allocated_pages,
static crash_reporter::CrashKeyString<24> pa_crash_key(
kPartitionAllocCrashKey);
gpa = new GuardedPageAllocator();
gpa->Init(max_allocated_pages, num_metadata, total_pages, base::DoNothing());
gpa->Init(max_allocated_pages, num_metadata, total_pages, base::DoNothing(),
true);
pa_crash_key.Set(gpa->GetCrashKey());
sampling_state.Init(sampling_frequency);
total_allocations = total_pages;
// TODO(vtsyrklevich): Allow SetOverrideHooks to be passed in so we can hook
// PDFium's PartitionAlloc fork.
base::PartitionAllocHooks::SetOverrideHooks(&AllocationHook, &FreeHook,
......
......@@ -44,7 +44,7 @@ constexpr const char* kPartitionAllocHistogramName =
class CrashAnalyzerTest : public testing::Test {
protected:
void SetUp() final {
gpa_.Init(1, 1, 1, base::DoNothing());
gpa_.Init(1, 1, 1, base::DoNothing(), false);
InitializeSnapshot();
}
......
......@@ -78,7 +78,7 @@ MULTIPROCESS_TEST_MAIN(CrashpadHandler) {
MULTIPROCESS_TEST_MAIN(CrashingProcess) {
base::NoDestructor<GuardedPageAllocator> gpa;
gpa->Init(AllocatorState::kMaxMetadata, AllocatorState::kMaxMetadata,
kTotalPages, base::DoNothing());
kTotalPages, base::DoNothing(), false);
base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
base::FilePath directory = cmd_line->GetSwitchValuePath("directory");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment