Commit 3f999d3d authored by bcwhite's avatar bcwhite Committed by Commit bot

Improved support for objects inside persistent memory.

Some compilers, including MSVC, don't consider std::atomic as a POD-
compatible type which makes it impossible to include them in persistent
memory when using the object interface -- something which is obviously
desireable.

New "object" management embeds the type directly in the class/struct
definition and runs the default constructor of the type after allocation,
thus making the allocator compatible with a much greater range of types.

BUG=546019

Review-Url: https://codereview.chromium.org/2578323002
Cr-Commit-Position: refs/heads/master@{#442884}
parent 33819cef
...@@ -1022,7 +1022,7 @@ ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { ...@@ -1022,7 +1022,7 @@ ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
// TODO(bcwhite): Review this after major compiler releases. // TODO(bcwhite): Review this after major compiler releases.
DCHECK(mem_reference); DCHECK(mem_reference);
void* mem_base; void* mem_base;
#if !defined(OS_WIN) && !defined(OS_ANDROID) #if 0 // TODO(bcwhite): Update this for new GetAsObject functionality.
mem_base = allocator_->GetAsObject<ThreadActivityTracker::Header>( mem_base = allocator_->GetAsObject<ThreadActivityTracker::Header>(
mem_reference, kTypeIdActivityTracker); mem_reference, kTypeIdActivityTracker);
#else #else
......
...@@ -28,12 +28,12 @@ FeatureList* g_instance = nullptr; ...@@ -28,12 +28,12 @@ FeatureList* g_instance = nullptr;
// Tracks whether the FeatureList instance was initialized via an accessor. // Tracks whether the FeatureList instance was initialized via an accessor.
bool g_initialized_from_accessor = false; bool g_initialized_from_accessor = false;
const uint32_t kFeatureType = 0x06567CA6 + 1; // SHA1(FeatureEntry) v1
// An allocator entry for a feature in shared memory. The FeatureEntry is // An allocator entry for a feature in shared memory. The FeatureEntry is
// followed by a base::Pickle object that contains the feature and trial name. // followed by a base::Pickle object that contains the feature and trial name.
// Any changes to this structure requires a bump in kFeatureType defined above.
struct FeatureEntry { struct FeatureEntry {
// SHA1(FeatureEntry): Increment this if structure changes!
static constexpr uint32_t kPersistentTypeId = 0x06567CA6 + 1;
// Expected size for 32/64-bit check. // Expected size for 32/64-bit check.
static constexpr size_t kExpectedInstanceSize = 8; static constexpr size_t kExpectedInstanceSize = 8;
...@@ -98,13 +98,8 @@ void FeatureList::InitializeFromSharedMemory( ...@@ -98,13 +98,8 @@ void FeatureList::InitializeFromSharedMemory(
DCHECK(!initialized_); DCHECK(!initialized_);
PersistentMemoryAllocator::Iterator iter(allocator); PersistentMemoryAllocator::Iterator iter(allocator);
const FeatureEntry* entry;
PersistentMemoryAllocator::Reference ref; while ((entry = iter.GetNextOfObject<FeatureEntry>()) != nullptr) {
while ((ref = iter.GetNextOfType(kFeatureType)) !=
PersistentMemoryAllocator::kReferenceNull) {
const FeatureEntry* entry =
allocator->GetAsObject<const FeatureEntry>(ref, kFeatureType);
OverrideState override_state = OverrideState override_state =
static_cast<OverrideState>(entry->override_state); static_cast<OverrideState>(entry->override_state);
...@@ -170,20 +165,17 @@ void FeatureList::AddFeaturesToAllocator(PersistentMemoryAllocator* allocator) { ...@@ -170,20 +165,17 @@ void FeatureList::AddFeaturesToAllocator(PersistentMemoryAllocator* allocator) {
pickle.WriteString(override.second.field_trial->trial_name()); pickle.WriteString(override.second.field_trial->trial_name());
size_t total_size = sizeof(FeatureEntry) + pickle.size(); size_t total_size = sizeof(FeatureEntry) + pickle.size();
PersistentMemoryAllocator::Reference ref = FeatureEntry* entry = allocator->AllocateObject<FeatureEntry>(total_size);
allocator->Allocate(total_size, kFeatureType); if (!entry)
if (!ref)
return; return;
FeatureEntry* entry =
allocator->GetAsObject<FeatureEntry>(ref, kFeatureType);
entry->override_state = override.second.overridden_state; entry->override_state = override.second.overridden_state;
entry->pickle_size = pickle.size(); entry->pickle_size = pickle.size();
char* dst = reinterpret_cast<char*>(entry) + sizeof(FeatureEntry); char* dst = reinterpret_cast<char*>(entry) + sizeof(FeatureEntry);
memcpy(dst, pickle.data(), pickle.size()); memcpy(dst, pickle.data(), pickle.size());
allocator->MakeIterable(ref); allocator->MakeIterable(entry);
} }
} }
......
...@@ -55,7 +55,6 @@ const bool kUseSharedMemoryForFieldTrials = true; ...@@ -55,7 +55,6 @@ const bool kUseSharedMemoryForFieldTrials = true;
// Constants for the field trial allocator. // Constants for the field trial allocator.
const char kAllocatorName[] = "FieldTrialAllocator"; const char kAllocatorName[] = "FieldTrialAllocator";
const uint32_t kFieldTrialType = 0xABA17E13 + 2; // SHA1(FieldTrialEntry) v2
// We allocate 128 KiB to hold all the field trial data. This should be enough, // We allocate 128 KiB to hold all the field trial data. This should be enough,
// as most people use 3 - 25 KiB for field trials (as of 11/25/2016). // as most people use 3 - 25 KiB for field trials (as of 11/25/2016).
...@@ -717,13 +716,9 @@ void FieldTrialList::GetInitiallyActiveFieldTrials( ...@@ -717,13 +716,9 @@ void FieldTrialList::GetInitiallyActiveFieldTrials(
FieldTrialAllocator* allocator = global_->field_trial_allocator_.get(); FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
FieldTrialAllocator::Iterator mem_iter(allocator); FieldTrialAllocator::Iterator mem_iter(allocator);
FieldTrial::FieldTrialRef ref; const FieldTrial::FieldTrialEntry* entry;
while ((ref = mem_iter.GetNextOfType(kFieldTrialType)) != while ((entry = mem_iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
SharedPersistentMemoryAllocator::kReferenceNull) { nullptr) {
const FieldTrial::FieldTrialEntry* entry =
allocator->GetAsObject<const FieldTrial::FieldTrialEntry>(
ref, kFieldTrialType);
StringPiece trial_name; StringPiece trial_name;
StringPiece group_name; StringPiece group_name;
if (subtle::NoBarrier_Load(&entry->activated) && if (subtle::NoBarrier_Load(&entry->activated) &&
...@@ -1042,9 +1037,8 @@ bool FieldTrialList::GetParamsFromSharedMemory( ...@@ -1042,9 +1037,8 @@ bool FieldTrialList::GetParamsFromSharedMemory(
return false; return false;
const FieldTrial::FieldTrialEntry* entry = const FieldTrial::FieldTrialEntry* entry =
global_->field_trial_allocator_ global_->field_trial_allocator_->GetAsObject<FieldTrial::FieldTrialEntry>(
->GetAsObject<const FieldTrial::FieldTrialEntry>(field_trial->ref_, field_trial->ref_);
kFieldTrialType);
size_t allocated_size = size_t allocated_size =
global_->field_trial_allocator_->GetAllocSize(field_trial->ref_); global_->field_trial_allocator_->GetAllocSize(field_trial->ref_);
...@@ -1075,12 +1069,11 @@ void FieldTrialList::ClearParamsFromSharedMemoryForTesting() { ...@@ -1075,12 +1069,11 @@ void FieldTrialList::ClearParamsFromSharedMemoryForTesting() {
std::vector<FieldTrial::FieldTrialRef> new_refs; std::vector<FieldTrial::FieldTrialRef> new_refs;
FieldTrial::FieldTrialRef prev_ref; FieldTrial::FieldTrialRef prev_ref;
while ((prev_ref = mem_iter.GetNextOfType(kFieldTrialType)) != while ((prev_ref = mem_iter.GetNextOfType<FieldTrial::FieldTrialEntry>()) !=
FieldTrialAllocator::kReferenceNull) { FieldTrialAllocator::kReferenceNull) {
// Get the existing field trial entry in shared memory. // Get the existing field trial entry in shared memory.
const FieldTrial::FieldTrialEntry* prev_entry = const FieldTrial::FieldTrialEntry* prev_entry =
allocator->GetAsObject<const FieldTrial::FieldTrialEntry>( allocator->GetAsObject<FieldTrial::FieldTrialEntry>(prev_ref);
prev_ref, kFieldTrialType);
StringPiece trial_name; StringPiece trial_name;
StringPiece group_name; StringPiece group_name;
if (!prev_entry->GetTrialAndGroupName(&trial_name, &group_name)) if (!prev_entry->GetTrialAndGroupName(&trial_name, &group_name))
...@@ -1091,11 +1084,8 @@ void FieldTrialList::ClearParamsFromSharedMemoryForTesting() { ...@@ -1091,11 +1084,8 @@ void FieldTrialList::ClearParamsFromSharedMemoryForTesting() {
pickle.WriteString(trial_name); pickle.WriteString(trial_name);
pickle.WriteString(group_name); pickle.WriteString(group_name);
size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size(); size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size();
FieldTrial::FieldTrialRef new_ref =
allocator->Allocate(total_size, kFieldTrialType);
FieldTrial::FieldTrialEntry* new_entry = FieldTrial::FieldTrialEntry* new_entry =
allocator->GetAsObject<FieldTrial::FieldTrialEntry>(new_ref, allocator->AllocateObject<FieldTrial::FieldTrialEntry>(total_size);
kFieldTrialType);
subtle::NoBarrier_Store(&new_entry->activated, subtle::NoBarrier_Store(&new_entry->activated,
subtle::NoBarrier_Load(&prev_entry->activated)); subtle::NoBarrier_Load(&prev_entry->activated));
new_entry->pickle_size = pickle.size(); new_entry->pickle_size = pickle.size();
...@@ -1108,12 +1098,14 @@ void FieldTrialList::ClearParamsFromSharedMemoryForTesting() { ...@@ -1108,12 +1098,14 @@ void FieldTrialList::ClearParamsFromSharedMemoryForTesting() {
// Update the ref on the field trial and add it to the list to be made // Update the ref on the field trial and add it to the list to be made
// iterable. // iterable.
FieldTrial::FieldTrialRef new_ref = allocator->GetAsReference(new_entry);
FieldTrial* trial = global_->PreLockedFind(trial_name.as_string()); FieldTrial* trial = global_->PreLockedFind(trial_name.as_string());
trial->ref_ = new_ref; trial->ref_ = new_ref;
new_refs.push_back(new_ref); new_refs.push_back(new_ref);
// Mark the existing entry as unused. // Mark the existing entry as unused.
allocator->ChangeType(prev_ref, 0, kFieldTrialType); allocator->ChangeType(prev_ref, 0,
FieldTrial::FieldTrialEntry::kPersistentTypeId);
} }
for (const auto& ref : new_refs) { for (const auto& ref : new_refs) {
...@@ -1137,13 +1129,10 @@ std::vector<const FieldTrial::FieldTrialEntry*> ...@@ -1137,13 +1129,10 @@ std::vector<const FieldTrial::FieldTrialEntry*>
FieldTrialList::GetAllFieldTrialsFromPersistentAllocator( FieldTrialList::GetAllFieldTrialsFromPersistentAllocator(
PersistentMemoryAllocator const& allocator) { PersistentMemoryAllocator const& allocator) {
std::vector<const FieldTrial::FieldTrialEntry*> entries; std::vector<const FieldTrial::FieldTrialEntry*> entries;
FieldTrial::FieldTrialRef ref;
FieldTrialAllocator::Iterator iter(&allocator); FieldTrialAllocator::Iterator iter(&allocator);
while ((ref = iter.GetNextOfType(kFieldTrialType)) != const FieldTrial::FieldTrialEntry* entry;
FieldTrialAllocator::kReferenceNull) { while ((entry = iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
const FieldTrial::FieldTrialEntry* entry = nullptr) {
allocator.GetAsObject<const FieldTrial::FieldTrialEntry>(
ref, kFieldTrialType);
entries.push_back(entry); entries.push_back(entry);
} }
return entries; return entries;
...@@ -1182,13 +1171,9 @@ bool FieldTrialList::CreateTrialsFromSharedMemory( ...@@ -1182,13 +1171,9 @@ bool FieldTrialList::CreateTrialsFromSharedMemory(
FieldTrialAllocator* shalloc = global_->field_trial_allocator_.get(); FieldTrialAllocator* shalloc = global_->field_trial_allocator_.get();
FieldTrialAllocator::Iterator mem_iter(shalloc); FieldTrialAllocator::Iterator mem_iter(shalloc);
FieldTrial::FieldTrialRef ref; const FieldTrial::FieldTrialEntry* entry;
while ((ref = mem_iter.GetNextOfType(kFieldTrialType)) != while ((entry = mem_iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
FieldTrialAllocator::kReferenceNull) { nullptr) {
const FieldTrial::FieldTrialEntry* entry =
shalloc->GetAsObject<const FieldTrial::FieldTrialEntry>(
ref, kFieldTrialType);
StringPiece trial_name; StringPiece trial_name;
StringPiece group_name; StringPiece group_name;
if (!entry->GetTrialAndGroupName(&trial_name, &group_name)) if (!entry->GetTrialAndGroupName(&trial_name, &group_name))
...@@ -1199,7 +1184,7 @@ bool FieldTrialList::CreateTrialsFromSharedMemory( ...@@ -1199,7 +1184,7 @@ bool FieldTrialList::CreateTrialsFromSharedMemory(
FieldTrial* trial = FieldTrial* trial =
CreateFieldTrial(trial_name.as_string(), group_name.as_string()); CreateFieldTrial(trial_name.as_string(), group_name.as_string());
trial->ref_ = ref; trial->ref_ = mem_iter.GetAsReference(entry);
if (subtle::NoBarrier_Load(&entry->activated)) { if (subtle::NoBarrier_Load(&entry->activated)) {
// Call |group()| to mark the trial as "used" and notify observers, if // Call |group()| to mark the trial as "used" and notify observers, if
// any. This is useful to ensure that field trials created in child // any. This is useful to ensure that field trials created in child
...@@ -1286,15 +1271,15 @@ void FieldTrialList::AddToAllocatorWhileLocked( ...@@ -1286,15 +1271,15 @@ void FieldTrialList::AddToAllocatorWhileLocked(
} }
size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size(); size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size();
FieldTrial::FieldTrialRef ref = FieldTrial::FieldTrialRef ref = allocator->Allocate(
allocator->Allocate(total_size, kFieldTrialType); total_size, FieldTrial::FieldTrialEntry::kPersistentTypeId);
if (ref == FieldTrialAllocator::kReferenceNull) { if (ref == FieldTrialAllocator::kReferenceNull) {
NOTREACHED(); NOTREACHED();
return; return;
} }
FieldTrial::FieldTrialEntry* entry = FieldTrial::FieldTrialEntry* entry =
allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref, kFieldTrialType); allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref);
subtle::NoBarrier_Store(&entry->activated, trial_state.activated); subtle::NoBarrier_Store(&entry->activated, trial_state.activated);
entry->pickle_size = pickle.size(); entry->pickle_size = pickle.size();
...@@ -1328,8 +1313,7 @@ void FieldTrialList::ActivateFieldTrialEntryWhileLocked( ...@@ -1328,8 +1313,7 @@ void FieldTrialList::ActivateFieldTrialEntryWhileLocked(
// the only thing that happens on a stale read here is a slight performance // the only thing that happens on a stale read here is a slight performance
// hit from the child re-synchronizing activation state. // hit from the child re-synchronizing activation state.
FieldTrial::FieldTrialEntry* entry = FieldTrial::FieldTrialEntry* entry =
allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref, allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref);
kFieldTrialType);
subtle::NoBarrier_Store(&entry->activated, 1); subtle::NoBarrier_Store(&entry->activated, 1);
} }
} }
......
...@@ -137,9 +137,11 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> { ...@@ -137,9 +137,11 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
// We create one FieldTrialEntry per field trial in shared memory, via // We create one FieldTrialEntry per field trial in shared memory, via
// AddToAllocatorWhileLocked. The FieldTrialEntry is followed by a // AddToAllocatorWhileLocked. The FieldTrialEntry is followed by a
// base::Pickle object that we unpickle and read from. Any changes to this // base::Pickle object that we unpickle and read from.
// structure requires a bump in kFieldTrialType id defined in the .cc file.
struct BASE_EXPORT FieldTrialEntry { struct BASE_EXPORT FieldTrialEntry {
// SHA1(FieldTrialEntry): Increment this if structure changes!
static constexpr uint32_t kPersistentTypeId = 0xABA17E13 + 2;
// Expected size for 32/64-bit check. // Expected size for 32/64-bit check.
static constexpr size_t kExpectedInstanceSize = 8; static constexpr size_t kExpectedInstanceSize = 8;
......
...@@ -35,11 +35,8 @@ const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result"; ...@@ -35,11 +35,8 @@ const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result";
// so that, if the structure of that object changes, stored older versions // so that, if the structure of that object changes, stored older versions
// will be safely ignored. // will be safely ignored.
enum : uint32_t { enum : uint32_t {
kTypeIdHistogram = 0xF1645910 + 3, // SHA1(Histogram) v3
kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1 kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1
kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1 kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1
kTypeIdHistogramUnderConstruction = ~kTypeIdHistogram,
}; };
// The current globally-active persistent allocator for all new histograms. // The current globally-active persistent allocator for all new histograms.
...@@ -226,6 +223,9 @@ PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew( ...@@ -226,6 +223,9 @@ PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
// This data will be held in persistent memory in order for processes to // This data will be held in persistent memory in order for processes to
// locate and use histograms created elsewhere. // locate and use histograms created elsewhere.
struct PersistentHistogramAllocator::PersistentHistogramData { struct PersistentHistogramAllocator::PersistentHistogramData {
// SHA1(Histogram): Increment this if structure changes!
static constexpr uint32_t kPersistentTypeId = 0xF1645910 + 3;
// Expected size for 32/64-bit check. // Expected size for 32/64-bit check.
static constexpr size_t kExpectedInstanceSize = static constexpr size_t kExpectedInstanceSize =
40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize; 40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize;
...@@ -254,7 +254,7 @@ PersistentHistogramAllocator::Iterator::Iterator( ...@@ -254,7 +254,7 @@ PersistentHistogramAllocator::Iterator::Iterator(
std::unique_ptr<HistogramBase> std::unique_ptr<HistogramBase>
PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) { PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
PersistentMemoryAllocator::Reference ref; PersistentMemoryAllocator::Reference ref;
while ((ref = memory_iter_.GetNextOfType(kTypeIdHistogram)) != 0) { while ((ref = memory_iter_.GetNextOfType<PersistentHistogramData>()) != 0) {
if (ref != ignore) if (ref != ignore)
return allocator_->GetHistogram(ref); return allocator_->GetHistogram(ref);
} }
...@@ -277,8 +277,7 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram( ...@@ -277,8 +277,7 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
// add it to the local list of known histograms (while these may be simple // add it to the local list of known histograms (while these may be simple
// references to histograms in other processes). // references to histograms in other processes).
PersistentHistogramData* histogram_data = PersistentHistogramData* histogram_data =
memory_allocator_->GetAsObject<PersistentHistogramData>( memory_allocator_->GetAsObject<PersistentHistogramData>(ref);
ref, kTypeIdHistogram);
size_t length = memory_allocator_->GetAllocSize(ref); size_t length = memory_allocator_->GetAllocSize(ref);
// Check that metadata is reasonable: name is NUL terminated and non-empty, // Check that metadata is reasonable: name is NUL terminated and non-empty,
...@@ -319,13 +318,9 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram( ...@@ -319,13 +318,9 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
// during the datafill doesn't leave a bad record around that could cause // during the datafill doesn't leave a bad record around that could cause
// confusion by another process trying to read it. It will be corrected // confusion by another process trying to read it. It will be corrected
// once histogram construction is complete. // once histogram construction is complete.
PersistentMemoryAllocator::Reference histogram_ref =
memory_allocator_->Allocate(
offsetof(PersistentHistogramData, name) + name.length() + 1,
kTypeIdHistogramUnderConstruction);
PersistentHistogramData* histogram_data = PersistentHistogramData* histogram_data =
memory_allocator_->GetAsObject<PersistentHistogramData>( memory_allocator_->AllocateObject<PersistentHistogramData>(
histogram_ref, kTypeIdHistogramUnderConstruction); offsetof(PersistentHistogramData, name) + name.length() + 1);
if (histogram_data) { if (histogram_data) {
memcpy(histogram_data->name, name.c_str(), name.size() + 1); memcpy(histogram_data->name, name.c_str(), name.size() + 1);
histogram_data->histogram_type = histogram_type; histogram_data->histogram_type = histogram_type;
...@@ -384,9 +379,9 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram( ...@@ -384,9 +379,9 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
DCHECK(histogram); DCHECK(histogram);
DCHECK_NE(0U, histogram_data->samples_metadata.id); DCHECK_NE(0U, histogram_data->samples_metadata.id);
DCHECK_NE(0U, histogram_data->logged_metadata.id); DCHECK_NE(0U, histogram_data->logged_metadata.id);
memory_allocator_->ChangeType(histogram_ref, kTypeIdHistogram,
kTypeIdHistogramUnderConstruction);
PersistentMemoryAllocator::Reference histogram_ref =
memory_allocator_->GetAsReference(histogram_data);
if (ref_ptr != nullptr) if (ref_ptr != nullptr)
*ref_ptr = histogram_ref; *ref_ptr = histogram_ref;
...@@ -415,15 +410,19 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram( ...@@ -415,15 +410,19 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
void PersistentHistogramAllocator::FinalizeHistogram(Reference ref, void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
bool registered) { bool registered) {
// If the created persistent histogram was registered then it needs to if (registered) {
// be marked as "iterable" in order to be found by other processes. // If the created persistent histogram was registered then it needs to
if (registered) // be marked as "iterable" in order to be found by other processes. This
// happens only after the histogram is fully formed so it's impossible for
// code iterating through the allocator to read a partially created record.
memory_allocator_->MakeIterable(ref); memory_allocator_->MakeIterable(ref);
// If it wasn't registered then a race condition must have caused } else {
// two to be created. The allocator does not support releasing the // If it wasn't registered then a race condition must have caused two to
// acquired memory so just change the type to be empty. // be created. The allocator does not support releasing the acquired memory
else // so just change the type to be empty.
memory_allocator_->ChangeType(ref, 0, kTypeIdHistogram); memory_allocator_->ChangeType(ref, 0,
PersistentHistogramData::kPersistentTypeId);
}
} }
void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder( void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
...@@ -842,13 +841,9 @@ GlobalHistogramAllocator::ReleaseForTesting() { ...@@ -842,13 +841,9 @@ GlobalHistogramAllocator::ReleaseForTesting() {
// Recorder forget about the histograms contained therein; otherwise, // Recorder forget about the histograms contained therein; otherwise,
// some operations will try to access them and the released memory. // some operations will try to access them and the released memory.
PersistentMemoryAllocator::Iterator iter(memory_allocator); PersistentMemoryAllocator::Iterator iter(memory_allocator);
PersistentMemoryAllocator::Reference ref; const PersistentHistogramData* data;
while ((ref = iter.GetNextOfType(kTypeIdHistogram)) != 0) { while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) {
PersistentHistogramData* histogram_data = StatisticsRecorder::ForgetHistogramForTesting(data->name);
memory_allocator->GetAsObject<PersistentHistogramData>(
ref, kTypeIdHistogram);
DCHECK(histogram_data);
StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name);
// If a test breaks here then a memory region containing a histogram // If a test breaks here then a memory region containing a histogram
// actively used by this code is being released back to the test. // actively used by this code is being released back to the test.
...@@ -857,7 +852,7 @@ GlobalHistogramAllocator::ReleaseForTesting() { ...@@ -857,7 +852,7 @@ GlobalHistogramAllocator::ReleaseForTesting() {
// the method GetCreateHistogramResultHistogram() *before* setting // the method GetCreateHistogramResultHistogram() *before* setting
// the (temporary) memory allocator via SetGlobalAllocator() so that // the (temporary) memory allocator via SetGlobalAllocator() so that
// histogram is instead allocated from the process heap. // histogram is instead allocated from the process heap.
DCHECK_NE(kResultHistogram, histogram_data->name); DCHECK_NE(kResultHistogram, data->name);
} }
g_allocator = nullptr; g_allocator = nullptr;
......
...@@ -56,8 +56,8 @@ class BASE_EXPORT PersistentSparseHistogramDataManager { ...@@ -56,8 +56,8 @@ class BASE_EXPORT PersistentSparseHistogramDataManager {
// Convenience method that gets the object for a given reference so callers // Convenience method that gets the object for a given reference so callers
// don't have to also keep their own pointer to the appropriate allocator. // don't have to also keep their own pointer to the appropriate allocator.
template <typename T> template <typename T>
T* GetAsObject(PersistentMemoryAllocator::Reference ref, uint32_t type_id) { T* GetAsObject(PersistentMemoryAllocator::Reference ref) {
return allocator_->GetAsObject<T>(ref, type_id); return allocator_->GetAsObject<T>(ref);
} }
private: private:
...@@ -131,8 +131,8 @@ class BASE_EXPORT PersistentSampleMapRecords { ...@@ -131,8 +131,8 @@ class BASE_EXPORT PersistentSampleMapRecords {
// cleanliness of the interface), a template is defined that will be // cleanliness of the interface), a template is defined that will be
// resolved when used inside that file. // resolved when used inside that file.
template <typename T> template <typename T>
T* GetAsObject(PersistentMemoryAllocator::Reference ref, uint32_t type_id) { T* GetAsObject(PersistentMemoryAllocator::Reference ref) {
return data_manager_->GetAsObject<T>(ref, type_id); return data_manager_->GetAsObject<T>(ref);
} }
private: private:
......
...@@ -750,10 +750,10 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id, ...@@ -750,10 +750,10 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
uint32_t size, bool queue_ok, uint32_t size, bool queue_ok,
bool free_ok) const { bool free_ok) const {
// Validation of parameters. // Validation of parameters.
if (ref % kAllocAlignment != 0)
return nullptr;
if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata))) if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
return nullptr; return nullptr;
if (ref % kAllocAlignment != 0)
return nullptr;
size += sizeof(BlockHeader); size += sizeof(BlockHeader);
if (ref + size > mem_size_) if (ref + size > mem_size_)
return nullptr; return nullptr;
......
This diff is collapsed.
...@@ -40,12 +40,14 @@ class PersistentMemoryAllocatorTest : public testing::Test { ...@@ -40,12 +40,14 @@ class PersistentMemoryAllocatorTest : public testing::Test {
uint32_t kAllocAlignment; uint32_t kAllocAlignment;
struct TestObject1 { struct TestObject1 {
static constexpr uint32_t kPersistentTypeId = 1;
static constexpr size_t kExpectedInstanceSize = 4 + 1 + 3; static constexpr size_t kExpectedInstanceSize = 4 + 1 + 3;
int32_t onething; int32_t onething;
char oranother; char oranother;
}; };
struct TestObject2 { struct TestObject2 {
static constexpr uint32_t kPersistentTypeId = 2;
static constexpr size_t kExpectedInstanceSize = 8 + 4 + 4 + 8 + 8; static constexpr size_t kExpectedInstanceSize = 8 + 4 + 4 + 8 + 8;
int64_t thiis; int64_t thiis;
int32_t that; int32_t that;
...@@ -109,10 +111,12 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) { ...@@ -109,10 +111,12 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
// Validate allocation of test object and make sure it can be referenced // Validate allocation of test object and make sure it can be referenced
// and all metadata looks correct. // and all metadata looks correct.
Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1); TestObject1* obj1 = allocator_->AllocateObject<TestObject1>();
EXPECT_NE(0U, block1); ASSERT_TRUE(obj1);
EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1, 1)); Reference block1 = allocator_->GetAsReference(obj1);
EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1, 1)); ASSERT_NE(0U, block1);
EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1));
EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1));
EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1)); EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1));
EXPECT_GT(sizeof(TestObject1) + kAllocAlignment, EXPECT_GT(sizeof(TestObject1) + kAllocAlignment,
allocator_->GetAllocSize(block1)); allocator_->GetAllocSize(block1));
...@@ -147,10 +151,12 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) { ...@@ -147,10 +151,12 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
// Create second test-object and ensure everything is good and it cannot // Create second test-object and ensure everything is good and it cannot
// be confused with test-object of another type. // be confused with test-object of another type.
Reference block2 = allocator_->Allocate(sizeof(TestObject2), 2); TestObject2* obj2 = allocator_->AllocateObject<TestObject2>();
EXPECT_NE(0U, block2); ASSERT_TRUE(obj2);
EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2, 2)); Reference block2 = allocator_->GetAsReference(obj2);
EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block2, 1)); ASSERT_NE(0U, block2);
EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2));
EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject1>(block2));
EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2)); EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2));
EXPECT_GT(sizeof(TestObject2) + kAllocAlignment, EXPECT_GT(sizeof(TestObject2) + kAllocAlignment,
allocator_->GetAllocSize(block2)); allocator_->GetAllocSize(block2));
...@@ -160,7 +166,7 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) { ...@@ -160,7 +166,7 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_GT(meminfo1.free, meminfo2.free); EXPECT_GT(meminfo1.free, meminfo2.free);
// Ensure that second test-object can also be made iterable. // Ensure that second test-object can also be made iterable.
allocator_->MakeIterable(block2); allocator_->MakeIterable(obj2);
EXPECT_EQ(block2, iter1a.GetNext(&type)); EXPECT_EQ(block2, iter1a.GetNext(&type));
EXPECT_EQ(2U, type); EXPECT_EQ(2U, type);
EXPECT_EQ(block2, iter1a.GetLast()); EXPECT_EQ(block2, iter1a.GetLast());
...@@ -214,11 +220,11 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) { ...@@ -214,11 +220,11 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_EQ(1, allocs_samples->GetCount(0)); EXPECT_EQ(1, allocs_samples->GetCount(0));
#endif #endif
// Check that an objcet's type can be changed. // Check that an object's type can be changed.
EXPECT_EQ(2U, allocator_->GetType(block2)); EXPECT_EQ(2U, allocator_->GetType(block2));
allocator_->ChangeType(block2, 3, 2); allocator_->ChangeType(block2, 3, 2);
EXPECT_EQ(3U, allocator_->GetType(block2)); EXPECT_EQ(3U, allocator_->GetType(block2));
allocator_->ChangeType(block2, 2, 3); allocator_->ChangeObject<TestObject2>(block2, 3);
EXPECT_EQ(2U, allocator_->GetType(block2)); EXPECT_EQ(2U, allocator_->GetType(block2));
// Create second allocator (read/write) using the same memory segment. // Create second allocator (read/write) using the same memory segment.
...@@ -235,8 +241,8 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) { ...@@ -235,8 +241,8 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_EQ(block1, iter2.GetNext(&type)); EXPECT_EQ(block1, iter2.GetNext(&type));
EXPECT_EQ(block2, iter2.GetNext(&type)); EXPECT_EQ(block2, iter2.GetNext(&type));
EXPECT_EQ(0U, iter2.GetNext(&type)); EXPECT_EQ(0U, iter2.GetNext(&type));
EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1, 1)); EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1));
EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2, 2)); EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2));
// Create a third allocator (read-only) using the same memory segment. // Create a third allocator (read-only) using the same memory segment.
std::unique_ptr<const PersistentMemoryAllocator> allocator3( std::unique_ptr<const PersistentMemoryAllocator> allocator3(
...@@ -251,13 +257,23 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) { ...@@ -251,13 +257,23 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_EQ(block1, iter3.GetNext(&type)); EXPECT_EQ(block1, iter3.GetNext(&type));
EXPECT_EQ(block2, iter3.GetNext(&type)); EXPECT_EQ(block2, iter3.GetNext(&type));
EXPECT_EQ(0U, iter3.GetNext(&type)); EXPECT_EQ(0U, iter3.GetNext(&type));
EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1, 1)); EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1));
EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2, 2)); EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2));
// Ensure that GetNextOfType works. // Ensure that GetNextOfType works.
PersistentMemoryAllocator::Iterator iter1c(allocator_.get()); PersistentMemoryAllocator::Iterator iter1c(allocator_.get());
EXPECT_EQ(block2, iter1c.GetNextOfType(2)); EXPECT_EQ(block2, iter1c.GetNextOfType<TestObject2>());
EXPECT_EQ(0U, iter1c.GetNextOfType(2)); EXPECT_EQ(0U, iter1c.GetNextOfType(2));
// Ensure that GetNextOfObject works.
PersistentMemoryAllocator::Iterator iter1d(allocator_.get());
EXPECT_EQ(obj2, iter1d.GetNextOfObject<TestObject2>());
EXPECT_EQ(nullptr, iter1d.GetNextOfObject<TestObject2>());
// Ensure that deleting an object works.
allocator_->DeleteObject(obj2);
PersistentMemoryAllocator::Iterator iter1z(allocator_.get());
EXPECT_EQ(nullptr, iter1z.GetNextOfObject<TestObject2>());
} }
TEST_F(PersistentMemoryAllocatorTest, PageTest) { TEST_F(PersistentMemoryAllocatorTest, PageTest) {
......
...@@ -82,6 +82,9 @@ void PersistentSampleMapIterator::SkipEmptyBuckets() { ...@@ -82,6 +82,9 @@ void PersistentSampleMapIterator::SkipEmptyBuckets() {
// memory allocator. The "id" must be unique across all maps held by an // memory allocator. The "id" must be unique across all maps held by an
// allocator or they will get attached to the wrong sample map. // allocator or they will get attached to the wrong sample map.
struct SampleRecord { struct SampleRecord {
// SHA1(SampleRecord): Increment this if structure changes!
static constexpr uint32_t kPersistentTypeId = 0x8FE6A69F + 1;
// Expected size for 32/64-bit check. // Expected size for 32/64-bit check.
static constexpr size_t kExpectedInstanceSize = 16; static constexpr size_t kExpectedInstanceSize = 16;
...@@ -90,9 +93,6 @@ struct SampleRecord { ...@@ -90,9 +93,6 @@ struct SampleRecord {
Count count; // The count associated with the above value. Count count; // The count associated with the above value.
}; };
// The type-id used to identify sample records inside an allocator.
const uint32_t kTypeIdSampleRecord = 0x8FE6A69F + 1; // SHA1(SampleRecord) v1
} // namespace } // namespace
PersistentSampleMap::PersistentSampleMap( PersistentSampleMap::PersistentSampleMap(
...@@ -144,15 +144,12 @@ PersistentMemoryAllocator::Reference ...@@ -144,15 +144,12 @@ PersistentMemoryAllocator::Reference
PersistentSampleMap::GetNextPersistentRecord( PersistentSampleMap::GetNextPersistentRecord(
PersistentMemoryAllocator::Iterator& iterator, PersistentMemoryAllocator::Iterator& iterator,
uint64_t* sample_map_id) { uint64_t* sample_map_id) {
PersistentMemoryAllocator::Reference ref = const SampleRecord* record = iterator.GetNextOfObject<SampleRecord>();
iterator.GetNextOfType(kTypeIdSampleRecord);
const SampleRecord* record =
iterator.GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
if (!record) if (!record)
return 0; return 0;
*sample_map_id = record->id; *sample_map_id = record->id;
return ref; return iterator.GetAsReference(record);
} }
// static // static
...@@ -161,11 +158,7 @@ PersistentSampleMap::CreatePersistentRecord( ...@@ -161,11 +158,7 @@ PersistentSampleMap::CreatePersistentRecord(
PersistentMemoryAllocator* allocator, PersistentMemoryAllocator* allocator,
uint64_t sample_map_id, uint64_t sample_map_id,
Sample value) { Sample value) {
PersistentMemoryAllocator::Reference ref = SampleRecord* record = allocator->AllocateObject<SampleRecord>();
allocator->Allocate(sizeof(SampleRecord), kTypeIdSampleRecord);
SampleRecord* record =
allocator->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
if (!record) { if (!record) {
NOTREACHED() << "full=" << allocator->IsFull() NOTREACHED() << "full=" << allocator->IsFull()
<< ", corrupt=" << allocator->IsCorrupt(); << ", corrupt=" << allocator->IsCorrupt();
...@@ -175,6 +168,8 @@ PersistentSampleMap::CreatePersistentRecord( ...@@ -175,6 +168,8 @@ PersistentSampleMap::CreatePersistentRecord(
record->id = sample_map_id; record->id = sample_map_id;
record->value = value; record->value = value;
record->count = 0; record->count = 0;
PersistentMemoryAllocator::Reference ref = allocator->GetAsReference(record);
allocator->MakeIterable(ref); allocator->MakeIterable(ref);
return ref; return ref;
} }
...@@ -256,8 +251,7 @@ Count* PersistentSampleMap::ImportSamples(Sample until_value, ...@@ -256,8 +251,7 @@ Count* PersistentSampleMap::ImportSamples(Sample until_value,
PersistentMemoryAllocator::Reference ref; PersistentMemoryAllocator::Reference ref;
PersistentSampleMapRecords* records = GetRecords(); PersistentSampleMapRecords* records = GetRecords();
while ((ref = records->GetNext()) != 0) { while ((ref = records->GetNext()) != 0) {
SampleRecord* record = SampleRecord* record = records->GetAsObject<SampleRecord>(ref);
records->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
if (!record) if (!record)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment