Commit b45f8a74 authored by reveman@chromium.org's avatar reveman@chromium.org

base: Use DiscardableMemoryManager on MacOSX.

This limits the address space used for discardable memory on MacOSX
by using the DiscardableMemoryManager for userspace eviction.

Includes some minor refactoring to keep all code using
DiscardableMemoryManager consistent.

BUG=369246

Review URL: https://codereview.chromium.org/261993003

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@269640 0039d316-1c4b-4281-b951-d872f2087c98
parent 5079b2b9
...@@ -19,31 +19,27 @@ namespace { ...@@ -19,31 +19,27 @@ namespace {
const char kAshmemAllocatorName[] = "DiscardableMemoryAshmemAllocator"; const char kAshmemAllocatorName[] = "DiscardableMemoryAshmemAllocator";
// When ashmem is used, have the DiscardableMemoryManager trigger userspace // For Ashmem, have the DiscardableMemoryManager trigger userspace eviction
// eviction when address space usage gets too high (e.g. 512 MBytes). // when address space usage gets too high (e.g. 512 MBytes).
const size_t kAshmemMaxAddressSpaceUsage = 512 * 1024 * 1024; const size_t kAshmemMemoryLimit = 512 * 1024 * 1024;
// Holds the state used for ashmem allocations.
struct AshmemGlobalContext {
AshmemGlobalContext()
: allocator(kAshmemAllocatorName,
GetOptimalAshmemRegionSizeForAllocator()) {
manager.SetMemoryLimit(kAshmemMaxAddressSpaceUsage);
}
internal::DiscardableMemoryAshmemAllocator allocator;
internal::DiscardableMemoryManager manager;
private: size_t GetOptimalAshmemRegionSizeForAllocator() {
// Returns 64 MBytes for a 512 MBytes device, 128 MBytes for 1024 MBytes...
static size_t GetOptimalAshmemRegionSizeForAllocator() {
// Note that this may do some I/O (without hitting the disk though) so it // Note that this may do some I/O (without hitting the disk though) so it
// should not be called on the critical path. // should not be called on the critical path.
return base::android::SysUtils::AmountOfPhysicalMemoryKB() * 1024 / 8; return base::android::SysUtils::AmountOfPhysicalMemoryKB() * 1024 / 8;
} }
};
// Holds the shared state used for allocations.
struct SharedState {
SharedState()
: manager(kAshmemMemoryLimit, kAshmemMemoryLimit),
allocator(kAshmemAllocatorName,
GetOptimalAshmemRegionSizeForAllocator()) {}
LazyInstance<AshmemGlobalContext>::Leaky g_context = LAZY_INSTANCE_INITIALIZER; internal::DiscardableMemoryManager manager;
internal::DiscardableMemoryAshmemAllocator allocator;
};
LazyInstance<SharedState>::Leaky g_shared_state = LAZY_INSTANCE_INITIALIZER;
} // namespace } // namespace
...@@ -76,10 +72,10 @@ scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType( ...@@ -76,10 +72,10 @@ scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
case DISCARDABLE_MEMORY_TYPE_MAC: case DISCARDABLE_MEMORY_TYPE_MAC:
return scoped_ptr<DiscardableMemory>(); return scoped_ptr<DiscardableMemory>();
case DISCARDABLE_MEMORY_TYPE_ASHMEM: { case DISCARDABLE_MEMORY_TYPE_ASHMEM: {
AshmemGlobalContext* const global_context = g_context.Pointer(); SharedState* const shared_state = g_shared_state.Pointer();
scoped_ptr<internal::DiscardableMemoryAshmem> memory( scoped_ptr<internal::DiscardableMemoryAshmem> memory(
new internal::DiscardableMemoryAshmem( new internal::DiscardableMemoryAshmem(
size, &global_context->allocator, &global_context->manager)); size, &shared_state->allocator, &shared_state->manager));
if (!memory->Initialize()) if (!memory->Initialize())
return scoped_ptr<DiscardableMemory>(); return scoped_ptr<DiscardableMemory>();
...@@ -109,7 +105,7 @@ scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType( ...@@ -109,7 +105,7 @@ scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
// static // static
void DiscardableMemory::PurgeForTesting() { void DiscardableMemory::PurgeForTesting() {
g_context.Pointer()->manager.PurgeAll(); g_shared_state.Pointer()->manager.PurgeAll();
internal::DiscardableMemoryEmulated::PurgeForTesting(); internal::DiscardableMemoryEmulated::PurgeForTesting();
} }
......
...@@ -10,8 +10,19 @@ ...@@ -10,8 +10,19 @@
namespace base { namespace base {
namespace { namespace {
base::LazyInstance<internal::DiscardableMemoryManager>::Leaky g_manager = // This is admittedly pretty magical. It's approximately enough memory for eight
LAZY_INSTANCE_INITIALIZER; // 2560x1600 images.
const size_t kEmulatedMemoryLimit = 128 * 1024 * 1024;
const size_t kEmulatedBytesToKeepUnderModeratePressure = 12 * 1024 * 1024;
struct SharedState {
SharedState()
: manager(kEmulatedMemoryLimit,
kEmulatedBytesToKeepUnderModeratePressure) {}
internal::DiscardableMemoryManager manager;
};
LazyInstance<SharedState>::Leaky g_shared_state = LAZY_INSTANCE_INITIALIZER;
} // namespace } // namespace
...@@ -20,28 +31,28 @@ namespace internal { ...@@ -20,28 +31,28 @@ namespace internal {
DiscardableMemoryEmulated::DiscardableMemoryEmulated(size_t bytes) DiscardableMemoryEmulated::DiscardableMemoryEmulated(size_t bytes)
: bytes_(bytes), : bytes_(bytes),
is_locked_(false) { is_locked_(false) {
g_manager.Pointer()->Register(this, bytes); g_shared_state.Pointer()->manager.Register(this, bytes);
} }
DiscardableMemoryEmulated::~DiscardableMemoryEmulated() { DiscardableMemoryEmulated::~DiscardableMemoryEmulated() {
if (is_locked_) if (is_locked_)
Unlock(); Unlock();
g_manager.Pointer()->Unregister(this); g_shared_state.Pointer()->manager.Unregister(this);
} }
// static // static
void DiscardableMemoryEmulated::RegisterMemoryPressureListeners() { void DiscardableMemoryEmulated::RegisterMemoryPressureListeners() {
g_manager.Pointer()->RegisterMemoryPressureListener(); g_shared_state.Pointer()->manager.RegisterMemoryPressureListener();
} }
// static // static
void DiscardableMemoryEmulated::UnregisterMemoryPressureListeners() { void DiscardableMemoryEmulated::UnregisterMemoryPressureListeners() {
g_manager.Pointer()->UnregisterMemoryPressureListener(); g_shared_state.Pointer()->manager.UnregisterMemoryPressureListener();
} }
// static // static
void DiscardableMemoryEmulated::PurgeForTesting() { void DiscardableMemoryEmulated::PurgeForTesting() {
g_manager.Pointer()->PurgeAll(); g_shared_state.Pointer()->manager.PurgeAll();
} }
bool DiscardableMemoryEmulated::Initialize() { bool DiscardableMemoryEmulated::Initialize() {
...@@ -52,7 +63,7 @@ DiscardableMemoryLockStatus DiscardableMemoryEmulated::Lock() { ...@@ -52,7 +63,7 @@ DiscardableMemoryLockStatus DiscardableMemoryEmulated::Lock() {
DCHECK(!is_locked_); DCHECK(!is_locked_);
bool purged = false; bool purged = false;
if (!g_manager.Pointer()->AcquireLock(this, &purged)) if (!g_shared_state.Pointer()->manager.AcquireLock(this, &purged))
return DISCARDABLE_MEMORY_LOCK_STATUS_FAILED; return DISCARDABLE_MEMORY_LOCK_STATUS_FAILED;
is_locked_ = true; is_locked_ = true;
...@@ -62,7 +73,7 @@ DiscardableMemoryLockStatus DiscardableMemoryEmulated::Lock() { ...@@ -62,7 +73,7 @@ DiscardableMemoryLockStatus DiscardableMemoryEmulated::Lock() {
void DiscardableMemoryEmulated::Unlock() { void DiscardableMemoryEmulated::Unlock() {
DCHECK(is_locked_); DCHECK(is_locked_);
g_manager.Pointer()->ReleaseLock(this); g_shared_state.Pointer()->manager.ReleaseLock(this);
is_locked_ = false; is_locked_ = false;
} }
......
...@@ -9,80 +9,123 @@ ...@@ -9,80 +9,123 @@
#include "base/basictypes.h" #include "base/basictypes.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/lazy_instance.h"
#include "base/logging.h" #include "base/logging.h"
#include "base/memory/discardable_memory_emulated.h" #include "base/memory/discardable_memory_emulated.h"
#include "base/memory/discardable_memory_malloc.h" #include "base/memory/discardable_memory_malloc.h"
#include "base/memory/discardable_memory_manager.h"
#include "base/memory/scoped_ptr.h" #include "base/memory/scoped_ptr.h"
namespace base { namespace base {
namespace { namespace {
// For Mac, have the DiscardableMemoryManager trigger userspace eviction when
// address space usage gets too high (e.g. 512 MBytes).
const size_t kMacMemoryLimit = 512 * 1024 * 1024;
struct SharedState {
SharedState() : manager(kMacMemoryLimit, kMacMemoryLimit) {}
internal::DiscardableMemoryManager manager;
};
LazyInstance<SharedState>::Leaky g_shared_state = LAZY_INSTANCE_INITIALIZER;
// The VM subsystem allows tagging of memory and 240-255 is reserved for // The VM subsystem allows tagging of memory and 240-255 is reserved for
// application use (see mach/vm_statistics.h). Pick 252 (after chromium's atomic // application use (see mach/vm_statistics.h). Pick 252 (after chromium's atomic
// weight of ~52). // weight of ~52).
const int kDiscardableMemoryTag = VM_MAKE_TAG(252); const int kDiscardableMemoryTag = VM_MAKE_TAG(252);
class DiscardableMemoryMac : public DiscardableMemory { class DiscardableMemoryMac
: public DiscardableMemory,
public internal::DiscardableMemoryManagerAllocation {
public: public:
explicit DiscardableMemoryMac(size_t size) explicit DiscardableMemoryMac(size_t bytes)
: buffer_(0), : buffer_(0), bytes_(bytes), is_locked_(false) {
size_(size) { g_shared_state.Pointer()->manager.Register(this, bytes);
} }
bool Initialize() { bool Initialize() { return Lock() == DISCARDABLE_MEMORY_LOCK_STATUS_PURGED; }
kern_return_t ret = vm_allocate(mach_task_self(),
&buffer_,
size_,
VM_FLAGS_PURGABLE |
VM_FLAGS_ANYWHERE |
kDiscardableMemoryTag);
if (ret != KERN_SUCCESS) {
DLOG(ERROR) << "vm_allocate() failed";
return false;
}
return true;
}
virtual ~DiscardableMemoryMac() { virtual ~DiscardableMemoryMac() {
if (is_locked_)
Unlock();
g_shared_state.Pointer()->manager.Unregister(this);
if (buffer_) if (buffer_)
vm_deallocate(mach_task_self(), buffer_, size_); vm_deallocate(mach_task_self(), buffer_, bytes_);
} }
// Overridden from DiscardableMemory:
virtual DiscardableMemoryLockStatus Lock() OVERRIDE { virtual DiscardableMemoryLockStatus Lock() OVERRIDE {
DCHECK_EQ(0, mprotect(reinterpret_cast<void*>(buffer_), DCHECK(!is_locked_);
size_,
PROT_READ | PROT_WRITE)); bool purged = false;
if (!g_shared_state.Pointer()->manager.AcquireLock(this, &purged))
return DISCARDABLE_MEMORY_LOCK_STATUS_FAILED;
is_locked_ = true;
return purged ? DISCARDABLE_MEMORY_LOCK_STATUS_PURGED
: DISCARDABLE_MEMORY_LOCK_STATUS_SUCCESS;
}
virtual void Unlock() OVERRIDE {
DCHECK(is_locked_);
g_shared_state.Pointer()->manager.ReleaseLock(this);
is_locked_ = false;
}
virtual void* Memory() const OVERRIDE {
DCHECK(is_locked_);
return reinterpret_cast<void*>(buffer_);
}
// Overridden from internal::DiscardableMemoryManagerAllocation:
virtual bool AllocateAndAcquireLock() OVERRIDE {
bool persistent = true;
if (!buffer_) {
kern_return_t ret = vm_allocate(
mach_task_self(),
&buffer_,
bytes_,
VM_FLAGS_PURGABLE | VM_FLAGS_ANYWHERE | kDiscardableMemoryTag);
CHECK_EQ(KERN_SUCCESS, ret) << "wm_allocate() failed.";
persistent = false;
}
#if !defined(NDEBUG)
int status = mprotect(
reinterpret_cast<void*>(buffer_), bytes_, PROT_READ | PROT_WRITE);
DCHECK_EQ(0, status);
#endif
int state = VM_PURGABLE_NONVOLATILE; int state = VM_PURGABLE_NONVOLATILE;
kern_return_t ret = vm_purgable_control(mach_task_self(), kern_return_t ret = vm_purgable_control(mach_task_self(),
buffer_, buffer_,
VM_PURGABLE_SET_STATE, VM_PURGABLE_SET_STATE,
&state); &state);
if (ret != KERN_SUCCESS) CHECK_EQ(KERN_SUCCESS, ret) << "Failed to lock memory.";
return DISCARDABLE_MEMORY_LOCK_STATUS_FAILED; if (state & VM_PURGABLE_EMPTY)
persistent = false;
return state & VM_PURGABLE_EMPTY ? DISCARDABLE_MEMORY_LOCK_STATUS_PURGED return persistent;
: DISCARDABLE_MEMORY_LOCK_STATUS_SUCCESS;
} }
virtual void ReleaseLock() OVERRIDE {
virtual void Unlock() OVERRIDE {
int state = VM_PURGABLE_VOLATILE | VM_VOLATILE_GROUP_DEFAULT; int state = VM_PURGABLE_VOLATILE | VM_VOLATILE_GROUP_DEFAULT;
kern_return_t ret = vm_purgable_control(mach_task_self(), kern_return_t ret = vm_purgable_control(mach_task_self(),
buffer_, buffer_,
VM_PURGABLE_SET_STATE, VM_PURGABLE_SET_STATE,
&state); &state);
DCHECK_EQ(0, mprotect(reinterpret_cast<void*>(buffer_), size_, PROT_NONE)); CHECK_EQ(KERN_SUCCESS, ret) << "Failed to unlock memory.";
if (ret != KERN_SUCCESS) #if !defined(NDEBUG)
DLOG(ERROR) << "Failed to unlock memory."; int status = mprotect(reinterpret_cast<void*>(buffer_), bytes_, PROT_NONE);
DCHECK_EQ(0, status);
#endif
}
virtual void Purge() OVERRIDE {
if (buffer_) {
vm_deallocate(mach_task_self(), buffer_, bytes_);
buffer_ = 0;
} }
virtual void* Memory() const OVERRIDE {
return reinterpret_cast<void*>(buffer_);
} }
private: private:
vm_address_t buffer_; vm_address_t buffer_;
const size_t size_; const size_t bytes_;
bool is_locked_;
DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryMac); DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryMac);
}; };
......
...@@ -12,21 +12,15 @@ ...@@ -12,21 +12,15 @@
namespace base { namespace base {
namespace internal { namespace internal {
namespace {
// This is admittedly pretty magical. It's approximately enough memory for eight DiscardableMemoryManager::DiscardableMemoryManager(
// 2560x1600 images. size_t memory_limit,
static const size_t kDefaultMemoryLimit = 128 * 1024 * 1024; size_t bytes_to_keep_under_moderate_pressure)
static const size_t kDefaultBytesToKeepUnderModeratePressure = 12 * 1024 * 1024;
} // namespace
DiscardableMemoryManager::DiscardableMemoryManager()
: allocations_(AllocationMap::NO_AUTO_EVICT), : allocations_(AllocationMap::NO_AUTO_EVICT),
bytes_allocated_(0), bytes_allocated_(0),
memory_limit_(kDefaultMemoryLimit), memory_limit_(memory_limit),
bytes_to_keep_under_moderate_pressure_( bytes_to_keep_under_moderate_pressure_(
kDefaultBytesToKeepUnderModeratePressure) { bytes_to_keep_under_moderate_pressure) {
BytesAllocatedChanged(); BytesAllocatedChanged();
} }
......
...@@ -67,7 +67,8 @@ class BASE_EXPORT_PRIVATE DiscardableMemoryManager { ...@@ -67,7 +67,8 @@ class BASE_EXPORT_PRIVATE DiscardableMemoryManager {
public: public:
typedef DiscardableMemoryManagerAllocation Allocation; typedef DiscardableMemoryManagerAllocation Allocation;
DiscardableMemoryManager(); DiscardableMemoryManager(size_t memory_limit,
size_t bytes_to_keep_under_moderate_pressure);
~DiscardableMemoryManager(); ~DiscardableMemoryManager();
// Call this to register memory pressure listener. Must be called on a thread // Call this to register memory pressure listener. Must be called on a thread
......
...@@ -42,9 +42,16 @@ class TestAllocationImpl : public internal::DiscardableMemoryManagerAllocation { ...@@ -42,9 +42,16 @@ class TestAllocationImpl : public internal::DiscardableMemoryManagerAllocation {
bool is_locked_; bool is_locked_;
}; };
// Tests can assume that the default limit is at least 1024. Tests that rely on
// something else needs to explicit set the limit.
const size_t kDefaultMemoryLimit = 1024;
const size_t kDefaultBytesToKeepUnderModeratePressure = kDefaultMemoryLimit;
class DiscardableMemoryManagerTestBase { class DiscardableMemoryManagerTestBase {
public: public:
DiscardableMemoryManagerTestBase() { DiscardableMemoryManagerTestBase()
: manager_(kDefaultMemoryLimit,
kDefaultBytesToKeepUnderModeratePressure) {
manager_.RegisterMemoryPressureListener(); manager_.RegisterMemoryPressureListener();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment