Commit 3f999d3d authored by bcwhite's avatar bcwhite Committed by Commit bot

Improved support for objects inside persistent memory.

Some compilers, including MSVC, don't consider std::atomic as a POD-
compatible type which makes it impossible to include them in persistent
memory when using the object interface -- something which is obviously
desireable.

New "object" management embeds the type directly in the class/struct
definition and runs the default constructor of the type after allocation,
thus making the allocator compatible with a much greater range of types.

BUG=546019

Review-Url: https://codereview.chromium.org/2578323002
Cr-Commit-Position: refs/heads/master@{#442884}
parent 33819cef
......@@ -1022,7 +1022,7 @@ ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
// TODO(bcwhite): Review this after major compiler releases.
DCHECK(mem_reference);
void* mem_base;
#if !defined(OS_WIN) && !defined(OS_ANDROID)
#if 0 // TODO(bcwhite): Update this for new GetAsObject functionality.
mem_base = allocator_->GetAsObject<ThreadActivityTracker::Header>(
mem_reference, kTypeIdActivityTracker);
#else
......
......@@ -28,12 +28,12 @@ FeatureList* g_instance = nullptr;
// Tracks whether the FeatureList instance was initialized via an accessor.
bool g_initialized_from_accessor = false;
const uint32_t kFeatureType = 0x06567CA6 + 1; // SHA1(FeatureEntry) v1
// An allocator entry for a feature in shared memory. The FeatureEntry is
// followed by a base::Pickle object that contains the feature and trial name.
// Any changes to this structure requires a bump in kFeatureType defined above.
struct FeatureEntry {
// SHA1(FeatureEntry): Increment this if structure changes!
static constexpr uint32_t kPersistentTypeId = 0x06567CA6 + 1;
// Expected size for 32/64-bit check.
static constexpr size_t kExpectedInstanceSize = 8;
......@@ -98,13 +98,8 @@ void FeatureList::InitializeFromSharedMemory(
DCHECK(!initialized_);
PersistentMemoryAllocator::Iterator iter(allocator);
PersistentMemoryAllocator::Reference ref;
while ((ref = iter.GetNextOfType(kFeatureType)) !=
PersistentMemoryAllocator::kReferenceNull) {
const FeatureEntry* entry =
allocator->GetAsObject<const FeatureEntry>(ref, kFeatureType);
const FeatureEntry* entry;
while ((entry = iter.GetNextOfObject<FeatureEntry>()) != nullptr) {
OverrideState override_state =
static_cast<OverrideState>(entry->override_state);
......@@ -170,20 +165,17 @@ void FeatureList::AddFeaturesToAllocator(PersistentMemoryAllocator* allocator) {
pickle.WriteString(override.second.field_trial->trial_name());
size_t total_size = sizeof(FeatureEntry) + pickle.size();
PersistentMemoryAllocator::Reference ref =
allocator->Allocate(total_size, kFeatureType);
if (!ref)
FeatureEntry* entry = allocator->AllocateObject<FeatureEntry>(total_size);
if (!entry)
return;
FeatureEntry* entry =
allocator->GetAsObject<FeatureEntry>(ref, kFeatureType);
entry->override_state = override.second.overridden_state;
entry->pickle_size = pickle.size();
char* dst = reinterpret_cast<char*>(entry) + sizeof(FeatureEntry);
memcpy(dst, pickle.data(), pickle.size());
allocator->MakeIterable(ref);
allocator->MakeIterable(entry);
}
}
......
......@@ -55,7 +55,6 @@ const bool kUseSharedMemoryForFieldTrials = true;
// Constants for the field trial allocator.
const char kAllocatorName[] = "FieldTrialAllocator";
const uint32_t kFieldTrialType = 0xABA17E13 + 2; // SHA1(FieldTrialEntry) v2
// We allocate 128 KiB to hold all the field trial data. This should be enough,
// as most people use 3 - 25 KiB for field trials (as of 11/25/2016).
......@@ -717,13 +716,9 @@ void FieldTrialList::GetInitiallyActiveFieldTrials(
FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
FieldTrialAllocator::Iterator mem_iter(allocator);
FieldTrial::FieldTrialRef ref;
while ((ref = mem_iter.GetNextOfType(kFieldTrialType)) !=
SharedPersistentMemoryAllocator::kReferenceNull) {
const FieldTrial::FieldTrialEntry* entry =
allocator->GetAsObject<const FieldTrial::FieldTrialEntry>(
ref, kFieldTrialType);
const FieldTrial::FieldTrialEntry* entry;
while ((entry = mem_iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
nullptr) {
StringPiece trial_name;
StringPiece group_name;
if (subtle::NoBarrier_Load(&entry->activated) &&
......@@ -1042,9 +1037,8 @@ bool FieldTrialList::GetParamsFromSharedMemory(
return false;
const FieldTrial::FieldTrialEntry* entry =
global_->field_trial_allocator_
->GetAsObject<const FieldTrial::FieldTrialEntry>(field_trial->ref_,
kFieldTrialType);
global_->field_trial_allocator_->GetAsObject<FieldTrial::FieldTrialEntry>(
field_trial->ref_);
size_t allocated_size =
global_->field_trial_allocator_->GetAllocSize(field_trial->ref_);
......@@ -1075,12 +1069,11 @@ void FieldTrialList::ClearParamsFromSharedMemoryForTesting() {
std::vector<FieldTrial::FieldTrialRef> new_refs;
FieldTrial::FieldTrialRef prev_ref;
while ((prev_ref = mem_iter.GetNextOfType(kFieldTrialType)) !=
while ((prev_ref = mem_iter.GetNextOfType<FieldTrial::FieldTrialEntry>()) !=
FieldTrialAllocator::kReferenceNull) {
// Get the existing field trial entry in shared memory.
const FieldTrial::FieldTrialEntry* prev_entry =
allocator->GetAsObject<const FieldTrial::FieldTrialEntry>(
prev_ref, kFieldTrialType);
allocator->GetAsObject<FieldTrial::FieldTrialEntry>(prev_ref);
StringPiece trial_name;
StringPiece group_name;
if (!prev_entry->GetTrialAndGroupName(&trial_name, &group_name))
......@@ -1091,11 +1084,8 @@ void FieldTrialList::ClearParamsFromSharedMemoryForTesting() {
pickle.WriteString(trial_name);
pickle.WriteString(group_name);
size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size();
FieldTrial::FieldTrialRef new_ref =
allocator->Allocate(total_size, kFieldTrialType);
FieldTrial::FieldTrialEntry* new_entry =
allocator->GetAsObject<FieldTrial::FieldTrialEntry>(new_ref,
kFieldTrialType);
allocator->AllocateObject<FieldTrial::FieldTrialEntry>(total_size);
subtle::NoBarrier_Store(&new_entry->activated,
subtle::NoBarrier_Load(&prev_entry->activated));
new_entry->pickle_size = pickle.size();
......@@ -1108,12 +1098,14 @@ void FieldTrialList::ClearParamsFromSharedMemoryForTesting() {
// Update the ref on the field trial and add it to the list to be made
// iterable.
FieldTrial::FieldTrialRef new_ref = allocator->GetAsReference(new_entry);
FieldTrial* trial = global_->PreLockedFind(trial_name.as_string());
trial->ref_ = new_ref;
new_refs.push_back(new_ref);
// Mark the existing entry as unused.
allocator->ChangeType(prev_ref, 0, kFieldTrialType);
allocator->ChangeType(prev_ref, 0,
FieldTrial::FieldTrialEntry::kPersistentTypeId);
}
for (const auto& ref : new_refs) {
......@@ -1137,13 +1129,10 @@ std::vector<const FieldTrial::FieldTrialEntry*>
FieldTrialList::GetAllFieldTrialsFromPersistentAllocator(
PersistentMemoryAllocator const& allocator) {
std::vector<const FieldTrial::FieldTrialEntry*> entries;
FieldTrial::FieldTrialRef ref;
FieldTrialAllocator::Iterator iter(&allocator);
while ((ref = iter.GetNextOfType(kFieldTrialType)) !=
FieldTrialAllocator::kReferenceNull) {
const FieldTrial::FieldTrialEntry* entry =
allocator.GetAsObject<const FieldTrial::FieldTrialEntry>(
ref, kFieldTrialType);
const FieldTrial::FieldTrialEntry* entry;
while ((entry = iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
nullptr) {
entries.push_back(entry);
}
return entries;
......@@ -1182,13 +1171,9 @@ bool FieldTrialList::CreateTrialsFromSharedMemory(
FieldTrialAllocator* shalloc = global_->field_trial_allocator_.get();
FieldTrialAllocator::Iterator mem_iter(shalloc);
FieldTrial::FieldTrialRef ref;
while ((ref = mem_iter.GetNextOfType(kFieldTrialType)) !=
FieldTrialAllocator::kReferenceNull) {
const FieldTrial::FieldTrialEntry* entry =
shalloc->GetAsObject<const FieldTrial::FieldTrialEntry>(
ref, kFieldTrialType);
const FieldTrial::FieldTrialEntry* entry;
while ((entry = mem_iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
nullptr) {
StringPiece trial_name;
StringPiece group_name;
if (!entry->GetTrialAndGroupName(&trial_name, &group_name))
......@@ -1199,7 +1184,7 @@ bool FieldTrialList::CreateTrialsFromSharedMemory(
FieldTrial* trial =
CreateFieldTrial(trial_name.as_string(), group_name.as_string());
trial->ref_ = ref;
trial->ref_ = mem_iter.GetAsReference(entry);
if (subtle::NoBarrier_Load(&entry->activated)) {
// Call |group()| to mark the trial as "used" and notify observers, if
// any. This is useful to ensure that field trials created in child
......@@ -1286,15 +1271,15 @@ void FieldTrialList::AddToAllocatorWhileLocked(
}
size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size();
FieldTrial::FieldTrialRef ref =
allocator->Allocate(total_size, kFieldTrialType);
FieldTrial::FieldTrialRef ref = allocator->Allocate(
total_size, FieldTrial::FieldTrialEntry::kPersistentTypeId);
if (ref == FieldTrialAllocator::kReferenceNull) {
NOTREACHED();
return;
}
FieldTrial::FieldTrialEntry* entry =
allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref, kFieldTrialType);
allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref);
subtle::NoBarrier_Store(&entry->activated, trial_state.activated);
entry->pickle_size = pickle.size();
......@@ -1328,8 +1313,7 @@ void FieldTrialList::ActivateFieldTrialEntryWhileLocked(
// the only thing that happens on a stale read here is a slight performance
// hit from the child re-synchronizing activation state.
FieldTrial::FieldTrialEntry* entry =
allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref,
kFieldTrialType);
allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref);
subtle::NoBarrier_Store(&entry->activated, 1);
}
}
......
......@@ -137,9 +137,11 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
// We create one FieldTrialEntry per field trial in shared memory, via
// AddToAllocatorWhileLocked. The FieldTrialEntry is followed by a
// base::Pickle object that we unpickle and read from. Any changes to this
// structure requires a bump in kFieldTrialType id defined in the .cc file.
// base::Pickle object that we unpickle and read from.
struct BASE_EXPORT FieldTrialEntry {
// SHA1(FieldTrialEntry): Increment this if structure changes!
static constexpr uint32_t kPersistentTypeId = 0xABA17E13 + 2;
// Expected size for 32/64-bit check.
static constexpr size_t kExpectedInstanceSize = 8;
......
......@@ -35,11 +35,8 @@ const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result";
// so that, if the structure of that object changes, stored older versions
// will be safely ignored.
enum : uint32_t {
kTypeIdHistogram = 0xF1645910 + 3, // SHA1(Histogram) v3
kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1
kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1
kTypeIdHistogramUnderConstruction = ~kTypeIdHistogram,
};
// The current globally-active persistent allocator for all new histograms.
......@@ -226,6 +223,9 @@ PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
// This data will be held in persistent memory in order for processes to
// locate and use histograms created elsewhere.
struct PersistentHistogramAllocator::PersistentHistogramData {
// SHA1(Histogram): Increment this if structure changes!
static constexpr uint32_t kPersistentTypeId = 0xF1645910 + 3;
// Expected size for 32/64-bit check.
static constexpr size_t kExpectedInstanceSize =
40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize;
......@@ -254,7 +254,7 @@ PersistentHistogramAllocator::Iterator::Iterator(
std::unique_ptr<HistogramBase>
PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
PersistentMemoryAllocator::Reference ref;
while ((ref = memory_iter_.GetNextOfType(kTypeIdHistogram)) != 0) {
while ((ref = memory_iter_.GetNextOfType<PersistentHistogramData>()) != 0) {
if (ref != ignore)
return allocator_->GetHistogram(ref);
}
......@@ -277,8 +277,7 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
// add it to the local list of known histograms (while these may be simple
// references to histograms in other processes).
PersistentHistogramData* histogram_data =
memory_allocator_->GetAsObject<PersistentHistogramData>(
ref, kTypeIdHistogram);
memory_allocator_->GetAsObject<PersistentHistogramData>(ref);
size_t length = memory_allocator_->GetAllocSize(ref);
// Check that metadata is reasonable: name is NUL terminated and non-empty,
......@@ -319,13 +318,9 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
// during the datafill doesn't leave a bad record around that could cause
// confusion by another process trying to read it. It will be corrected
// once histogram construction is complete.
PersistentMemoryAllocator::Reference histogram_ref =
memory_allocator_->Allocate(
offsetof(PersistentHistogramData, name) + name.length() + 1,
kTypeIdHistogramUnderConstruction);
PersistentHistogramData* histogram_data =
memory_allocator_->GetAsObject<PersistentHistogramData>(
histogram_ref, kTypeIdHistogramUnderConstruction);
memory_allocator_->AllocateObject<PersistentHistogramData>(
offsetof(PersistentHistogramData, name) + name.length() + 1);
if (histogram_data) {
memcpy(histogram_data->name, name.c_str(), name.size() + 1);
histogram_data->histogram_type = histogram_type;
......@@ -384,9 +379,9 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
DCHECK(histogram);
DCHECK_NE(0U, histogram_data->samples_metadata.id);
DCHECK_NE(0U, histogram_data->logged_metadata.id);
memory_allocator_->ChangeType(histogram_ref, kTypeIdHistogram,
kTypeIdHistogramUnderConstruction);
PersistentMemoryAllocator::Reference histogram_ref =
memory_allocator_->GetAsReference(histogram_data);
if (ref_ptr != nullptr)
*ref_ptr = histogram_ref;
......@@ -415,15 +410,19 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
bool registered) {
// If the created persistent histogram was registered then it needs to
// be marked as "iterable" in order to be found by other processes.
if (registered)
if (registered) {
// If the created persistent histogram was registered then it needs to
// be marked as "iterable" in order to be found by other processes. This
// happens only after the histogram is fully formed so it's impossible for
// code iterating through the allocator to read a partially created record.
memory_allocator_->MakeIterable(ref);
// If it wasn't registered then a race condition must have caused
// two to be created. The allocator does not support releasing the
// acquired memory so just change the type to be empty.
else
memory_allocator_->ChangeType(ref, 0, kTypeIdHistogram);
} else {
// If it wasn't registered then a race condition must have caused two to
// be created. The allocator does not support releasing the acquired memory
// so just change the type to be empty.
memory_allocator_->ChangeType(ref, 0,
PersistentHistogramData::kPersistentTypeId);
}
}
void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
......@@ -842,13 +841,9 @@ GlobalHistogramAllocator::ReleaseForTesting() {
// Recorder forget about the histograms contained therein; otherwise,
// some operations will try to access them and the released memory.
PersistentMemoryAllocator::Iterator iter(memory_allocator);
PersistentMemoryAllocator::Reference ref;
while ((ref = iter.GetNextOfType(kTypeIdHistogram)) != 0) {
PersistentHistogramData* histogram_data =
memory_allocator->GetAsObject<PersistentHistogramData>(
ref, kTypeIdHistogram);
DCHECK(histogram_data);
StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name);
const PersistentHistogramData* data;
while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) {
StatisticsRecorder::ForgetHistogramForTesting(data->name);
// If a test breaks here then a memory region containing a histogram
// actively used by this code is being released back to the test.
......@@ -857,7 +852,7 @@ GlobalHistogramAllocator::ReleaseForTesting() {
// the method GetCreateHistogramResultHistogram() *before* setting
// the (temporary) memory allocator via SetGlobalAllocator() so that
// histogram is instead allocated from the process heap.
DCHECK_NE(kResultHistogram, histogram_data->name);
DCHECK_NE(kResultHistogram, data->name);
}
g_allocator = nullptr;
......
......@@ -56,8 +56,8 @@ class BASE_EXPORT PersistentSparseHistogramDataManager {
// Convenience method that gets the object for a given reference so callers
// don't have to also keep their own pointer to the appropriate allocator.
template <typename T>
T* GetAsObject(PersistentMemoryAllocator::Reference ref, uint32_t type_id) {
return allocator_->GetAsObject<T>(ref, type_id);
T* GetAsObject(PersistentMemoryAllocator::Reference ref) {
return allocator_->GetAsObject<T>(ref);
}
private:
......@@ -131,8 +131,8 @@ class BASE_EXPORT PersistentSampleMapRecords {
// cleanliness of the interface), a template is defined that will be
// resolved when used inside that file.
template <typename T>
T* GetAsObject(PersistentMemoryAllocator::Reference ref, uint32_t type_id) {
return data_manager_->GetAsObject<T>(ref, type_id);
T* GetAsObject(PersistentMemoryAllocator::Reference ref) {
return data_manager_->GetAsObject<T>(ref);
}
private:
......
......@@ -750,10 +750,10 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
uint32_t size, bool queue_ok,
bool free_ok) const {
// Validation of parameters.
if (ref % kAllocAlignment != 0)
return nullptr;
if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
return nullptr;
if (ref % kAllocAlignment != 0)
return nullptr;
size += sizeof(BlockHeader);
if (ref + size > mem_size_)
return nullptr;
......
This diff is collapsed.
......@@ -40,12 +40,14 @@ class PersistentMemoryAllocatorTest : public testing::Test {
uint32_t kAllocAlignment;
struct TestObject1 {
static constexpr uint32_t kPersistentTypeId = 1;
static constexpr size_t kExpectedInstanceSize = 4 + 1 + 3;
int32_t onething;
char oranother;
};
struct TestObject2 {
static constexpr uint32_t kPersistentTypeId = 2;
static constexpr size_t kExpectedInstanceSize = 8 + 4 + 4 + 8 + 8;
int64_t thiis;
int32_t that;
......@@ -109,10 +111,12 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
// Validate allocation of test object and make sure it can be referenced
// and all metadata looks correct.
Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
EXPECT_NE(0U, block1);
EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1, 1));
EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1, 1));
TestObject1* obj1 = allocator_->AllocateObject<TestObject1>();
ASSERT_TRUE(obj1);
Reference block1 = allocator_->GetAsReference(obj1);
ASSERT_NE(0U, block1);
EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1));
EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1));
EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1));
EXPECT_GT(sizeof(TestObject1) + kAllocAlignment,
allocator_->GetAllocSize(block1));
......@@ -147,10 +151,12 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
// Create second test-object and ensure everything is good and it cannot
// be confused with test-object of another type.
Reference block2 = allocator_->Allocate(sizeof(TestObject2), 2);
EXPECT_NE(0U, block2);
EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2, 2));
EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block2, 1));
TestObject2* obj2 = allocator_->AllocateObject<TestObject2>();
ASSERT_TRUE(obj2);
Reference block2 = allocator_->GetAsReference(obj2);
ASSERT_NE(0U, block2);
EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2));
EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject1>(block2));
EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2));
EXPECT_GT(sizeof(TestObject2) + kAllocAlignment,
allocator_->GetAllocSize(block2));
......@@ -160,7 +166,7 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_GT(meminfo1.free, meminfo2.free);
// Ensure that second test-object can also be made iterable.
allocator_->MakeIterable(block2);
allocator_->MakeIterable(obj2);
EXPECT_EQ(block2, iter1a.GetNext(&type));
EXPECT_EQ(2U, type);
EXPECT_EQ(block2, iter1a.GetLast());
......@@ -214,11 +220,11 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_EQ(1, allocs_samples->GetCount(0));
#endif
// Check that an objcet's type can be changed.
// Check that an object's type can be changed.
EXPECT_EQ(2U, allocator_->GetType(block2));
allocator_->ChangeType(block2, 3, 2);
EXPECT_EQ(3U, allocator_->GetType(block2));
allocator_->ChangeType(block2, 2, 3);
allocator_->ChangeObject<TestObject2>(block2, 3);
EXPECT_EQ(2U, allocator_->GetType(block2));
// Create second allocator (read/write) using the same memory segment.
......@@ -235,8 +241,8 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_EQ(block1, iter2.GetNext(&type));
EXPECT_EQ(block2, iter2.GetNext(&type));
EXPECT_EQ(0U, iter2.GetNext(&type));
EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1, 1));
EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2, 2));
EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1));
EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2));
// Create a third allocator (read-only) using the same memory segment.
std::unique_ptr<const PersistentMemoryAllocator> allocator3(
......@@ -251,13 +257,23 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_EQ(block1, iter3.GetNext(&type));
EXPECT_EQ(block2, iter3.GetNext(&type));
EXPECT_EQ(0U, iter3.GetNext(&type));
EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1, 1));
EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2, 2));
EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1));
EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2));
// Ensure that GetNextOfType works.
PersistentMemoryAllocator::Iterator iter1c(allocator_.get());
EXPECT_EQ(block2, iter1c.GetNextOfType(2));
EXPECT_EQ(block2, iter1c.GetNextOfType<TestObject2>());
EXPECT_EQ(0U, iter1c.GetNextOfType(2));
// Ensure that GetNextOfObject works.
PersistentMemoryAllocator::Iterator iter1d(allocator_.get());
EXPECT_EQ(obj2, iter1d.GetNextOfObject<TestObject2>());
EXPECT_EQ(nullptr, iter1d.GetNextOfObject<TestObject2>());
// Ensure that deleting an object works.
allocator_->DeleteObject(obj2);
PersistentMemoryAllocator::Iterator iter1z(allocator_.get());
EXPECT_EQ(nullptr, iter1z.GetNextOfObject<TestObject2>());
}
TEST_F(PersistentMemoryAllocatorTest, PageTest) {
......
......@@ -82,6 +82,9 @@ void PersistentSampleMapIterator::SkipEmptyBuckets() {
// memory allocator. The "id" must be unique across all maps held by an
// allocator or they will get attached to the wrong sample map.
struct SampleRecord {
// SHA1(SampleRecord): Increment this if structure changes!
static constexpr uint32_t kPersistentTypeId = 0x8FE6A69F + 1;
// Expected size for 32/64-bit check.
static constexpr size_t kExpectedInstanceSize = 16;
......@@ -90,9 +93,6 @@ struct SampleRecord {
Count count; // The count associated with the above value.
};
// The type-id used to identify sample records inside an allocator.
const uint32_t kTypeIdSampleRecord = 0x8FE6A69F + 1; // SHA1(SampleRecord) v1
} // namespace
PersistentSampleMap::PersistentSampleMap(
......@@ -144,15 +144,12 @@ PersistentMemoryAllocator::Reference
PersistentSampleMap::GetNextPersistentRecord(
PersistentMemoryAllocator::Iterator& iterator,
uint64_t* sample_map_id) {
PersistentMemoryAllocator::Reference ref =
iterator.GetNextOfType(kTypeIdSampleRecord);
const SampleRecord* record =
iterator.GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
const SampleRecord* record = iterator.GetNextOfObject<SampleRecord>();
if (!record)
return 0;
*sample_map_id = record->id;
return ref;
return iterator.GetAsReference(record);
}
// static
......@@ -161,11 +158,7 @@ PersistentSampleMap::CreatePersistentRecord(
PersistentMemoryAllocator* allocator,
uint64_t sample_map_id,
Sample value) {
PersistentMemoryAllocator::Reference ref =
allocator->Allocate(sizeof(SampleRecord), kTypeIdSampleRecord);
SampleRecord* record =
allocator->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
SampleRecord* record = allocator->AllocateObject<SampleRecord>();
if (!record) {
NOTREACHED() << "full=" << allocator->IsFull()
<< ", corrupt=" << allocator->IsCorrupt();
......@@ -175,6 +168,8 @@ PersistentSampleMap::CreatePersistentRecord(
record->id = sample_map_id;
record->value = value;
record->count = 0;
PersistentMemoryAllocator::Reference ref = allocator->GetAsReference(record);
allocator->MakeIterable(ref);
return ref;
}
......@@ -256,8 +251,7 @@ Count* PersistentSampleMap::ImportSamples(Sample until_value,
PersistentMemoryAllocator::Reference ref;
PersistentSampleMapRecords* records = GetRecords();
while ((ref = records->GetNext()) != 0) {
SampleRecord* record =
records->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
SampleRecord* record = records->GetAsObject<SampleRecord>(ref);
if (!record)
continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment