Commit 6e909fed authored by reveman's avatar reveman Committed by Commit bot

content: Close in-process discardable memory segments.

This prevents in-process discardable shared memory instances
from requiring an open file descriptor.

BUG=489174
TEST=base_unittests --gtest_filter=DiscardableSharedMemory.Close

Review URL: https://codereview.chromium.org/1146103002

Cr-Commit-Position: refs/heads/master@{#330806}
parent 465933b5
...@@ -157,6 +157,14 @@ bool DiscardableSharedMemory::Map(size_t size) { ...@@ -157,6 +157,14 @@ bool DiscardableSharedMemory::Map(size_t size) {
return true; return true;
} }
bool DiscardableSharedMemory::Unmap() {
if (!shared_memory_.Unmap())
return false;
mapped_size_ = 0;
return true;
}
DiscardableSharedMemory::LockResult DiscardableSharedMemory::Lock( DiscardableSharedMemory::LockResult DiscardableSharedMemory::Lock(
size_t offset, size_t length) { size_t offset, size_t length) {
DCHECK_EQ(AlignToPageSize(offset), offset); DCHECK_EQ(AlignToPageSize(offset), offset);
...@@ -212,11 +220,12 @@ DiscardableSharedMemory::LockResult DiscardableSharedMemory::Lock( ...@@ -212,11 +220,12 @@ DiscardableSharedMemory::LockResult DiscardableSharedMemory::Lock(
#if defined(OS_ANDROID) #if defined(OS_ANDROID)
SharedMemoryHandle handle = shared_memory_.handle(); SharedMemoryHandle handle = shared_memory_.handle();
DCHECK(SharedMemory::IsHandleValid(handle)); if (SharedMemory::IsHandleValid(handle)) {
if (ashmem_pin_region( if (ashmem_pin_region(
handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) { handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) {
return PURGED; return PURGED;
} }
}
#endif #endif
return SUCCESS; return SUCCESS;
...@@ -237,11 +246,12 @@ void DiscardableSharedMemory::Unlock(size_t offset, size_t length) { ...@@ -237,11 +246,12 @@ void DiscardableSharedMemory::Unlock(size_t offset, size_t length) {
#if defined(OS_ANDROID) #if defined(OS_ANDROID)
SharedMemoryHandle handle = shared_memory_.handle(); SharedMemoryHandle handle = shared_memory_.handle();
DCHECK(SharedMemory::IsHandleValid(handle)); if (SharedMemory::IsHandleValid(handle)) {
if (ashmem_unpin_region( if (ashmem_unpin_region(
handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) { handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) {
DPLOG(ERROR) << "ashmem_unpin_region() failed"; DPLOG(ERROR) << "ashmem_unpin_region() failed";
} }
}
#endif #endif
size_t start = offset / base::GetPageSize(); size_t start = offset / base::GetPageSize();
...@@ -336,9 +346,7 @@ bool DiscardableSharedMemory::IsMemoryResident() const { ...@@ -336,9 +346,7 @@ bool DiscardableSharedMemory::IsMemoryResident() const {
} }
void DiscardableSharedMemory::Close() { void DiscardableSharedMemory::Close() {
shared_memory_.Unmap();
shared_memory_.Close(); shared_memory_.Close();
mapped_size_ = 0;
} }
#if defined(DISCARDABLE_SHARED_MEMORY_SHRINKING) #if defined(DISCARDABLE_SHARED_MEMORY_SHRINKING)
......
...@@ -48,6 +48,11 @@ class BASE_EXPORT DiscardableSharedMemory { ...@@ -48,6 +48,11 @@ class BASE_EXPORT DiscardableSharedMemory {
// Returns true on success, false otherwise. // Returns true on success, false otherwise.
bool Map(size_t size); bool Map(size_t size);
// Unmaps the discardable shared memory from the caller's address space.
// Returns true if successful; returns false on error or if the memory is
// not mapped.
bool Unmap();
// The actual size of the mapped memory (may be larger than requested). // The actual size of the mapped memory (may be larger than requested).
size_t mapped_size() const { return mapped_size_; } size_t mapped_size() const { return mapped_size_; }
......
...@@ -306,11 +306,34 @@ TEST(DiscardableSharedMemoryTest, MappedSize) { ...@@ -306,11 +306,34 @@ TEST(DiscardableSharedMemoryTest, MappedSize) {
EXPECT_LE(kDataSize, memory.mapped_size()); EXPECT_LE(kDataSize, memory.mapped_size());
// Mapped size should be 0 after memory segment has been closed. // Mapped size should be 0 after memory segment has been unmapped.
memory.Close(); rv = memory.Unmap();
EXPECT_TRUE(rv);
EXPECT_EQ(0u, memory.mapped_size()); EXPECT_EQ(0u, memory.mapped_size());
} }
TEST(DiscardableSharedMemoryTest, Close) {
const uint32 kDataSize = 1024;
TestDiscardableSharedMemory memory;
bool rv = memory.CreateAndMap(kDataSize);
ASSERT_TRUE(rv);
// Mapped size should be unchanged after memory segment has been closed.
memory.Close();
EXPECT_LE(kDataSize, memory.mapped_size());
// Memory is initially locked. Unlock it.
memory.SetNow(Time::FromDoubleT(1));
memory.Unlock(0, 0);
// Lock and unlock memory.
auto lock_rv = memory.Lock(0, 0);
EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
memory.SetNow(Time::FromDoubleT(2));
memory.Unlock(0, 0);
}
#if defined(DISCARDABLE_SHARED_MEMORY_SHRINKING) #if defined(DISCARDABLE_SHARED_MEMORY_SHRINKING)
TEST(DiscardableSharedMemoryTest, Shrink) { TEST(DiscardableSharedMemoryTest, Shrink) {
const uint32 kDataSize = 1024; const uint32 kDataSize = 1024;
......
...@@ -132,6 +132,8 @@ HostDiscardableSharedMemoryManager::AllocateLockedDiscardableMemory( ...@@ -132,6 +132,8 @@ HostDiscardableSharedMemoryManager::AllocateLockedDiscardableMemory(
scoped_ptr<base::DiscardableSharedMemory> memory( scoped_ptr<base::DiscardableSharedMemory> memory(
new base::DiscardableSharedMemory(handle)); new base::DiscardableSharedMemory(handle));
CHECK(memory->Map(size)); CHECK(memory->Map(size));
// Close file descriptor to avoid running out.
memory->Close();
return make_scoped_ptr(new DiscardableMemoryImpl( return make_scoped_ptr(new DiscardableMemoryImpl(
memory.Pass(), memory.Pass(),
base::Bind( base::Bind(
...@@ -246,6 +248,11 @@ void HostDiscardableSharedMemoryManager::AllocateLockedDiscardableSharedMemory( ...@@ -246,6 +248,11 @@ void HostDiscardableSharedMemoryManager::AllocateLockedDiscardableSharedMemory(
bytes_allocated_ = checked_bytes_allocated.ValueOrDie(); bytes_allocated_ = checked_bytes_allocated.ValueOrDie();
BytesAllocatedChanged(bytes_allocated_); BytesAllocatedChanged(bytes_allocated_);
#if !defined(DISCARDABLE_SHARED_MEMORY_SHRINKING)
// Close file descriptor to avoid running out.
memory->Close();
#endif
scoped_refptr<MemorySegment> segment(new MemorySegment(memory.Pass())); scoped_refptr<MemorySegment> segment(new MemorySegment(memory.Pass()));
process_segments[id] = segment.get(); process_segments[id] = segment.get();
segments_.push_back(segment.get()); segments_.push_back(segment.get());
...@@ -374,6 +381,7 @@ void HostDiscardableSharedMemoryManager::ReleaseMemory( ...@@ -374,6 +381,7 @@ void HostDiscardableSharedMemoryManager::ReleaseMemory(
// Note: We intentionally leave the segment in the |segments| vector to // Note: We intentionally leave the segment in the |segments| vector to
// avoid reconstructing the heap. The element will be removed from the heap // avoid reconstructing the heap. The element will be removed from the heap
// when its last usage time is older than all other segments. // when its last usage time is older than all other segments.
memory->Unmap();
memory->Close(); memory->Close();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment