Commit cdd8661f authored by Alexandr Ilin's avatar Alexandr Ilin Committed by Commit Bot

base: Introduce the new Shared Memory API

Design doc (public): https://goo.gl/HmBYy6

This patch proposes the new API for the Shared Memory based on two
concepts, a shared memory region and a shared memory mapping. The
region controls the platform-specific handle and mediates sharing and
access control. The mapping is just a reference to the memory bytes
which knows how to unmap itself. Ownership is per-instance: neither a
region nor a mapping may be copied, and resources are freed when then
instance dies. This is accomplished through move-only semantics.

The goal of the new API is to provide a correct cross-platform
implementation that makes the sharing mode explicit via the C++ type
system.

Thus, this CL introduces two classes for shared memory mappings:
- WritableSharedMemoryMapping owns and provides an access to writable
shared memory
- ReadOnlySharedMemoryMapping for read-only shared memory

and three classes representing shared memory regions:
- UnsafeSharedMemoryRegion allows to create
WritableSharedMemoryMapping instances and cannot be used for making
read-only mappings
- ReadOnlySharedMemoryRegion, on the contrary, can produce only
ReadOnlySharedMemoryMapping instances and it also creates one writable
mapping in the caller process address space
- WritableSharedMemoryRegion allows to create writable
mappings and can be eventually converted to ReadOnlySharedMemoryRegion.
The important limitation in comparison with UnsafeSharedMemoryRegion
is that the WritableSharedMemoryRegion cannot be duplicated so in
order to pass the writable region to another process, the ownership of
the region must be moved/transferred to this process.

Bug: 795291
Change-Id: I7d1d0a24932145cc0f449b01ceb8cafef9b7e407
Reviewed-on: https://chromium-review.googlesource.com/893458
Commit-Queue: Alexandr Ilin <alexilin@chromium.org>
Reviewed-by: default avatarMatthew Cary <mattcary@chromium.org>
Reviewed-by: default avatarDaniel Cheng <dcheng@chromium.org>
Reviewed-by: default avatarErik Chen <erikchen@chromium.org>
Reviewed-by: default avatarRobert Sesek <rsesek@chromium.org>
Reviewed-by: default avatarKen Rockot <rockot@chromium.org>
Cr-Commit-Position: refs/heads/master@{#545521}
parent 82c618ca
...@@ -513,6 +513,8 @@ jumbo_component("base") { ...@@ -513,6 +513,8 @@ jumbo_component("base") {
"memory/memory_pressure_monitor_mac.h", "memory/memory_pressure_monitor_mac.h",
"memory/memory_pressure_monitor_win.cc", "memory/memory_pressure_monitor_win.cc",
"memory/memory_pressure_monitor_win.h", "memory/memory_pressure_monitor_win.h",
"memory/platform_shared_memory_region.cc",
"memory/platform_shared_memory_region.h",
"memory/protected_memory.cc", "memory/protected_memory.cc",
"memory/protected_memory.h", "memory/protected_memory.h",
"memory/protected_memory_cfi.h", "memory/protected_memory_cfi.h",
...@@ -520,6 +522,8 @@ jumbo_component("base") { ...@@ -520,6 +522,8 @@ jumbo_component("base") {
"memory/protected_memory_win.cc", "memory/protected_memory_win.cc",
"memory/ptr_util.h", "memory/ptr_util.h",
"memory/raw_scoped_refptr_mismatch_checker.h", "memory/raw_scoped_refptr_mismatch_checker.h",
"memory/read_only_shared_memory_region.cc",
"memory/read_only_shared_memory_region.h",
"memory/ref_counted.cc", "memory/ref_counted.cc",
"memory/ref_counted.h", "memory/ref_counted.h",
"memory/ref_counted_delete_on_sequence.h", "memory/ref_counted_delete_on_sequence.h",
...@@ -532,11 +536,17 @@ jumbo_component("base") { ...@@ -532,11 +536,17 @@ jumbo_component("base") {
"memory/shared_memory_handle.h", "memory/shared_memory_handle.h",
"memory/shared_memory_helper.cc", "memory/shared_memory_helper.cc",
"memory/shared_memory_helper.h", "memory/shared_memory_helper.h",
"memory/shared_memory_mapping.cc",
"memory/shared_memory_mapping.h",
"memory/shared_memory_tracker.cc", "memory/shared_memory_tracker.cc",
"memory/shared_memory_tracker.h", "memory/shared_memory_tracker.h",
"memory/singleton.h", "memory/singleton.h",
"memory/unsafe_shared_memory_region.cc",
"memory/unsafe_shared_memory_region.h",
"memory/weak_ptr.cc", "memory/weak_ptr.cc",
"memory/weak_ptr.h", "memory/weak_ptr.h",
"memory/writable_shared_memory_region.cc",
"memory/writable_shared_memory_region.h",
"message_loop/incoming_task_queue.cc", "message_loop/incoming_task_queue.cc",
"message_loop/incoming_task_queue.h", "message_loop/incoming_task_queue.h",
"message_loop/message_loop.cc", "message_loop/message_loop.cc",
...@@ -1231,6 +1241,7 @@ jumbo_component("base") { ...@@ -1231,6 +1241,7 @@ jumbo_component("base") {
if (is_android) { if (is_android) {
sources -= [ "debug/stack_trace_posix.cc" ] sources -= [ "debug/stack_trace_posix.cc" ]
sources += [ sources += [
"memory/platform_shared_memory_region_android.cc",
"memory/shared_memory_android.cc", "memory/shared_memory_android.cc",
"memory/shared_memory_handle_android.cc", "memory/shared_memory_handle_android.cc",
"time/time_android.cc", "time/time_android.cc",
...@@ -1298,6 +1309,7 @@ jumbo_component("base") { ...@@ -1298,6 +1309,7 @@ jumbo_component("base") {
"fuchsia/fuchsia_logging.cc", "fuchsia/fuchsia_logging.cc",
"fuchsia/fuchsia_logging.h", "fuchsia/fuchsia_logging.h",
"fuchsia/scoped_zx_handle.h", "fuchsia/scoped_zx_handle.h",
"memory/platform_shared_memory_region_fuchsia.cc",
"memory/shared_memory_fuchsia.cc", "memory/shared_memory_fuchsia.cc",
"memory/shared_memory_handle_fuchsia.cc", "memory/shared_memory_handle_fuchsia.cc",
"message_loop/message_pump_fuchsia.cc", "message_loop/message_pump_fuchsia.cc",
...@@ -1432,6 +1444,7 @@ jumbo_component("base") { ...@@ -1432,6 +1444,7 @@ jumbo_component("base") {
# Windows. # Windows.
if (is_win) { if (is_win) {
sources += [ sources += [
"memory/platform_shared_memory_region_win.cc",
"memory/shared_memory_handle_win.cc", "memory/shared_memory_handle_win.cc",
"memory/shared_memory_win.cc", "memory/shared_memory_win.cc",
"power_monitor/power_monitor_device_source_win.cc", "power_monitor/power_monitor_device_source_win.cc",
...@@ -1552,6 +1565,7 @@ jumbo_component("base") { ...@@ -1552,6 +1565,7 @@ jumbo_component("base") {
sources -= [ "profiler/native_stack_sampler_posix.cc" ] sources -= [ "profiler/native_stack_sampler_posix.cc" ]
sources += [ sources += [
"mac/scoped_typeref.h", "mac/scoped_typeref.h",
"memory/platform_shared_memory_region_mac.cc",
"memory/shared_memory_handle_mac.cc", "memory/shared_memory_handle_mac.cc",
"memory/shared_memory_mac.cc", "memory/shared_memory_mac.cc",
"power_monitor/power_monitor_device_source_mac.mm", "power_monitor/power_monitor_device_source_mac.mm",
...@@ -1704,7 +1718,10 @@ jumbo_component("base") { ...@@ -1704,7 +1718,10 @@ jumbo_component("base") {
# Android, Fuchsia, and MacOS have their own custom shared memory handle # Android, Fuchsia, and MacOS have their own custom shared memory handle
# implementations. e.g. due to supporting both POSIX and native handles. # implementations. e.g. due to supporting both POSIX and native handles.
if (is_posix && !is_android && !is_fuchsia && !is_mac) { if (is_posix && !is_android && !is_fuchsia && !is_mac) {
sources += [ "memory/shared_memory_handle_posix.cc" ] sources += [
"memory/platform_shared_memory_region_posix.cc",
"memory/shared_memory_handle_posix.cc",
]
} }
if (is_posix && !is_fuchsia && !is_mac && !is_nacl) { if (is_posix && !is_fuchsia && !is_mac && !is_nacl) {
...@@ -2190,11 +2207,13 @@ test("base_unittests") { ...@@ -2190,11 +2207,13 @@ test("base_unittests") {
"memory/memory_pressure_monitor_mac_unittest.cc", "memory/memory_pressure_monitor_mac_unittest.cc",
"memory/memory_pressure_monitor_unittest.cc", "memory/memory_pressure_monitor_unittest.cc",
"memory/memory_pressure_monitor_win_unittest.cc", "memory/memory_pressure_monitor_win_unittest.cc",
"memory/platform_shared_memory_region_unittest.cc",
"memory/protected_memory_unittest.cc", "memory/protected_memory_unittest.cc",
"memory/ptr_util_unittest.cc", "memory/ptr_util_unittest.cc",
"memory/ref_counted_memory_unittest.cc", "memory/ref_counted_memory_unittest.cc",
"memory/ref_counted_unittest.cc", "memory/ref_counted_unittest.cc",
"memory/shared_memory_mac_unittest.cc", "memory/shared_memory_mac_unittest.cc",
"memory/shared_memory_region_unittest.cc",
"memory/shared_memory_unittest.cc", "memory/shared_memory_unittest.cc",
"memory/shared_memory_win_unittest.cc", "memory/shared_memory_win_unittest.cc",
"memory/singleton_unittest.cc", "memory/singleton_unittest.cc",
......
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/shared_memory_mapping.h"
namespace base {
namespace subtle {
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::CreateWritable(
size_t size) {
return Create(Mode::kWritable, size);
}
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::CreateUnsafe(
size_t size) {
return Create(Mode::kUnsafe, size);
}
PlatformSharedMemoryRegion::PlatformSharedMemoryRegion() = default;
PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
PlatformSharedMemoryRegion&& other) = default;
PlatformSharedMemoryRegion& PlatformSharedMemoryRegion::operator=(
PlatformSharedMemoryRegion&& other) = default;
PlatformSharedMemoryRegion::~PlatformSharedMemoryRegion() = default;
PlatformSharedMemoryRegion::ScopedPlatformHandle
PlatformSharedMemoryRegion::PassPlatformHandle() {
return std::move(handle_);
}
// static
bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
PlatformHandle handle,
Mode mode,
size_t size) {
// TODO(https://crbug.com/825177): implement this in platform-specific way.
return true;
}
} // namespace subtle
} // namespace base
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
#define BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
#include <utility>
#include "base/compiler_specific.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/unguessable_token.h"
#include "build/build_config.h"
#if defined(OS_MACOSX) && !defined(OS_IOS)
#include <mach/mach.h>
#include "base/mac/scoped_mach_port.h"
#elif defined(OS_FUCHSIA)
#include "base/fuchsia/scoped_zx_handle.h"
#elif defined(OS_WIN)
#include "base/win/scoped_handle.h"
#include "base/win/windows_types.h"
#elif defined(OS_POSIX)
#include <sys/types.h>
#include "base/file_descriptor_posix.h"
#include "base/files/scoped_file.h"
#endif
namespace base {
namespace subtle {
#if defined(OS_POSIX) && (!defined(OS_MACOSX) || defined(OS_IOS)) && \
!defined(OS_FUCHSIA) && !defined(OS_ANDROID)
// Helper structs to keep two descriptors on POSIX. It's needed to support
// ConvertToReadOnly().
struct BASE_EXPORT FDPair {
int fd;
int readonly_fd;
};
struct BASE_EXPORT ScopedFDPair {
ScopedFDPair();
ScopedFDPair(ScopedFD in_fd, ScopedFD in_readonly_fd);
ScopedFDPair(ScopedFDPair&&);
ScopedFDPair& operator=(ScopedFDPair&&);
~ScopedFDPair();
FDPair get() const;
ScopedFD fd;
ScopedFD readonly_fd;
};
#endif
// Implementation class for shared memory regions.
//
// This class does the following:
//
// - Wraps and owns a shared memory region platform handle.
// - Provides a way to allocate a new region of platform shared memory of given
// size.
// - Provides a way to create mapping of the region in the current process'
// address space, under special access-control constraints (see Mode).
// - Provides methods to help transferring the handle across process boundaries.
// - Holds a 128-bit unique identifier used to uniquely identify the same
// kernel region resource across processes (used for memory tracking).
// - Has a method to retrieve the region's size in bytes.
//
// IMPORTANT NOTE: Users should never use this directly, but
// ReadOnlySharedMemoryRegion, WritableSharedMemoryRegion or
// UnsafeSharedMemoryRegion since this is an implementation class.
class BASE_EXPORT PlatformSharedMemoryRegion {
public:
// Permission mode of the platform handle. Each mode corresponds to one of the
// typed shared memory classes:
//
// * ReadOnlySharedMemoryRegion: A region that can only create read-only
// mappings.
//
// * WritableSharedMemoryRegion: A region that can only create writable
// mappings. The region can be demoted to ReadOnlySharedMemoryRegion without
// the possibility of promoting back to writable.
//
// * UnsafeSharedMemoryRegion: A region that can only create writable
// mappings. The region cannot be demoted to ReadOnlySharedMemoryRegion.
enum class Mode {
kReadOnly, // ReadOnlySharedMemoryRegion
kWritable, // WritableSharedMemoryRegion
kUnsafe, // UnsafeSharedMemoryRegion
};
// Platform-specific shared memory type used by this class.
#if defined(OS_MACOSX) && !defined(OS_IOS)
using PlatformHandle = mach_port_t;
using ScopedPlatformHandle = mac::ScopedMachSendRight;
#elif defined(OS_FUCHSIA)
using PlatformHandle = zx_handle_t;
using ScopedPlatformHandle = ScopedZxHandle;
#elif defined(OS_WIN)
using PlatformHandle = HANDLE;
using ScopedPlatformHandle = win::ScopedHandle;
#elif defined(OS_ANDROID)
using PlatformHandle = int;
using ScopedPlatformHandle = ScopedFD;
#else
using PlatformHandle = FDPair;
using ScopedPlatformHandle = ScopedFDPair;
#endif
// The minimum alignment in bytes that any mapped address produced by Map()
// and MapAt() is guaranteed to have.
enum { kMapMinimumAlignment = 32 };
// Creates a new PlatformSharedMemoryRegion with corresponding mode and size.
// Creating in kReadOnly mode isn't supported because then there will be no
// way to modify memory content.
static PlatformSharedMemoryRegion CreateWritable(size_t size);
static PlatformSharedMemoryRegion CreateUnsafe(size_t size);
// Returns a new PlatformSharedMemoryRegion that takes ownership of the
// |handle|. All parameters must be taken from another valid
// PlatformSharedMemoryRegion instance, e.g. |size| must be equal to the
// actual region size as allocated by the kernel.
// Closes the |handle| and returns an invalid instance if passed parameters
// are invalid.
static PlatformSharedMemoryRegion Take(ScopedPlatformHandle handle,
Mode mode,
size_t size,
const UnguessableToken& guid);
// Default constructor initializes an invalid instance, i.e. an instance that
// doesn't wrap any valid platform handle.
PlatformSharedMemoryRegion();
// Move operations are allowed.
PlatformSharedMemoryRegion(PlatformSharedMemoryRegion&&);
PlatformSharedMemoryRegion& operator=(PlatformSharedMemoryRegion&&);
// Destructor closes the platform handle. Does nothing if the handle is
// invalid.
~PlatformSharedMemoryRegion();
// Passes ownership of the platform handle to the caller. The current instance
// becomes invalid. It's the responsibility of the caller to close the handle.
ScopedPlatformHandle PassPlatformHandle() WARN_UNUSED_RESULT;
// Returns the platform handle. The current instance keeps ownership of this
// handle.
PlatformHandle GetPlatformHandle() const;
// Whether the platform handle is valid.
bool IsValid() const;
// Duplicates the platform handle and creates a new PlatformSharedMemoryRegion
// with the same |mode_|, |size_| and |guid_| that owns this handle. Returns
// invalid region on failure, the current instance remains valid.
// Can be called only in kReadOnly and kUnsafe modes, CHECK-fails if is
// called in kWritable mode.
PlatformSharedMemoryRegion Duplicate();
// Converts the region to read-only. Returns whether the operation succeeded.
// Makes the current instance invalid on failure. Can be called only in
// kWritable mode, all other modes will CHECK-fail. The object will have
// kReadOnly mode after this call on success.
bool ConvertToReadOnly();
#if defined(OS_MACOSX) && !defined(OS_IOS)
// Same as above, but |mapped_addr| is used as a hint to avoid additional
// mapping of the memory object.
// |mapped_addr| must be mapped location of |memory_object_|. If the location
// is unknown, |mapped_addr| should be |nullptr|.
bool ConvertToReadOnly(void* mapped_addr);
#endif // defined(OS_MACOSX) && !defined(OS_IOS)
// Maps |size| bytes of the shared memory region starting with the given
// |offset| into the caller's address space. |offset| must be aligned to value
// of |SysInfo::VMAllocationGranularity()|. Fails if requested bytes are out
// of the region limits.
// Returns true and sets |memory| and |mapped_size| on success, returns false
// and leaves output parameters in unspecified state otherwise. The mapped
// address is guaranteed to have an alignment of at least
// |kMapMinimumAlignment|.
bool MapAt(off_t offset, size_t size, void** memory, size_t* mapped_size);
const UnguessableToken& GetGUID() const { return guid_; }
size_t GetSize() const { return size_; }
Mode GetMode() const { return mode_; }
private:
FRIEND_TEST_ALL_PREFIXES(PlatformSharedMemoryRegionTest,
CreateReadOnlyRegionDeathTest);
FRIEND_TEST_ALL_PREFIXES(PlatformSharedMemoryRegionTest,
CheckPlatformHandlePermissionsCorrespondToMode);
static PlatformSharedMemoryRegion Create(Mode mode, size_t size);
static bool CheckPlatformHandlePermissionsCorrespondToMode(
PlatformHandle handle,
Mode mode,
size_t size);
PlatformSharedMemoryRegion(ScopedPlatformHandle handle,
Mode mode,
size_t size,
const UnguessableToken& guid);
ScopedPlatformHandle handle_;
Mode mode_ = Mode::kReadOnly;
size_t size_ = 0;
UnguessableToken guid_;
DISALLOW_COPY_AND_ASSIGN(PlatformSharedMemoryRegion);
};
} // namespace subtle
} // namespace base
#endif // BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/platform_shared_memory_region.h"
#include <sys/mman.h>
#include "base/memory/shared_memory_tracker.h"
#include "base/posix/eintr_wrapper.h"
#include "third_party/ashmem/ashmem.h"
namespace base {
namespace subtle {
// For Android, we use ashmem to implement SharedMemory. ashmem_create_region
// will automatically pin the region. We never explicitly call pin/unpin. When
// all the file descriptors from different processes associated with the region
// are closed, the memory buffer will go away.
namespace {
static int GetAshmemRegionProtectionMask(int fd) {
int prot = ashmem_get_prot_region(fd);
if (prot < 0) {
DPLOG(ERROR) << "ashmem_get_prot_region failed";
return -1;
}
return prot;
}
} // namespace
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
ScopedFD fd,
Mode mode,
size_t size,
const UnguessableToken& guid) {
if (!fd.is_valid())
return {};
if (size == 0)
return {};
if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
return {};
CHECK(CheckPlatformHandlePermissionsCorrespondToMode(fd.get(), mode, size));
return PlatformSharedMemoryRegion(std::move(fd), mode, size, guid);
}
int PlatformSharedMemoryRegion::GetPlatformHandle() const {
return handle_.get();
}
bool PlatformSharedMemoryRegion::IsValid() const {
return handle_.is_valid();
}
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() {
if (!IsValid())
return {};
CHECK_NE(mode_, Mode::kWritable)
<< "Duplicating a writable shared memory region is prohibited";
ScopedFD duped_fd(HANDLE_EINTR(dup(handle_.get())));
if (!duped_fd.is_valid()) {
DPLOG(ERROR) << "dup(" << handle_.get() << ") failed";
return {};
}
return PlatformSharedMemoryRegion(std::move(duped_fd), mode_, size_, guid_);
}
bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
if (!IsValid())
return false;
CHECK_EQ(mode_, Mode::kWritable)
<< "Only writable shared memory region can be converted to read-only";
ScopedFD handle_copy(handle_.release());
int prot = GetAshmemRegionProtectionMask(handle_copy.get());
if (prot < 0)
return false;
prot &= ~PROT_WRITE;
int ret = ashmem_set_prot_region(handle_copy.get(), prot);
if (ret != 0) {
DPLOG(ERROR) << "ashmem_set_prot_region failed";
return false;
}
handle_ = std::move(handle_copy);
mode_ = Mode::kReadOnly;
return true;
}
bool PlatformSharedMemoryRegion::MapAt(off_t offset,
size_t size,
void** memory,
size_t* mapped_size) {
if (!IsValid())
return false;
size_t end_byte;
if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
return false;
}
bool write_allowed = mode_ != Mode::kReadOnly;
*memory = mmap(nullptr, size, PROT_READ | (write_allowed ? PROT_WRITE : 0),
MAP_SHARED, handle_.get(), offset);
bool mmap_succeeded = *memory && *memory != reinterpret_cast<void*>(-1);
if (!mmap_succeeded) {
DPLOG(ERROR) << "mmap " << handle_.get() << " failed";
return false;
}
*mapped_size = size;
DCHECK_EQ(0U,
reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
return true;
}
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
size_t size) {
if (size == 0)
return {};
if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
return {};
LOG(ERROR) << "Before CHECK";
CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
"lead to this region being non-modifiable";
UnguessableToken guid = UnguessableToken::Create();
ScopedFD fd(ashmem_create_region(
SharedMemoryTracker::GetDumpNameForTracing(guid).c_str(), size));
if (!fd.is_valid()) {
DPLOG(ERROR) << "ashmem_create_region failed";
return {};
}
int err = ashmem_set_prot_region(fd.get(), PROT_READ | PROT_WRITE);
if (err < 0) {
DPLOG(ERROR) << "ashmem_set_prot_region failed";
return {};
}
return PlatformSharedMemoryRegion(std::move(fd), mode, size, guid);
}
PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
ScopedFD fd,
Mode mode,
size_t size,
const UnguessableToken& guid)
: handle_(std::move(fd)), mode_(mode), size_(size), guid_(guid) {}
} // namespace subtle
} // namespace base
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/platform_shared_memory_region.h"
#include <zircon/process.h>
#include <zircon/rights.h>
#include <zircon/syscalls.h>
#include "base/bits.h"
#include "base/numerics/checked_math.h"
#include "base/process/process_metrics.h"
namespace base {
namespace subtle {
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
ScopedZxHandle handle,
Mode mode,
size_t size,
const UnguessableToken& guid) {
if (!handle.is_valid())
return {};
if (size == 0)
return {};
if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
return {};
CHECK(
CheckPlatformHandlePermissionsCorrespondToMode(handle.get(), mode, size));
return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
}
zx_handle_t PlatformSharedMemoryRegion::GetPlatformHandle() const {
return handle_.get();
}
bool PlatformSharedMemoryRegion::IsValid() const {
return handle_.is_valid();
}
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() {
if (!IsValid())
return {};
CHECK_NE(mode_, Mode::kWritable)
<< "Duplicating a writable shared memory region is prohibited";
ScopedZxHandle duped_handle;
zx_status_t status = zx_handle_duplicate(handle_.get(), ZX_RIGHT_SAME_RIGHTS,
duped_handle.receive());
if (status != ZX_OK) {
DLOG(ERROR) << "zx_handle_duplicate failed: "
<< zx_status_get_string(status);
return {};
}
return PlatformSharedMemoryRegion(std::move(duped_handle), mode_, size_,
guid_);
}
bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
if (!IsValid())
return false;
CHECK_EQ(mode_, Mode::kWritable)
<< "Only writable shared memory region can be converted to read-only";
ScopedZxHandle old_handle(handle_.release());
ScopedZxHandle new_handle;
const int kNoWriteOrExec =
ZX_DEFAULT_VMO_RIGHTS &
~(ZX_RIGHT_WRITE | ZX_RIGHT_EXECUTE | ZX_RIGHT_SET_PROPERTY);
zx_status_t status =
zx_handle_replace(old_handle.get(), kNoWriteOrExec, new_handle.receive());
if (status != ZX_OK) {
DLOG(ERROR) << "zx_handle_replace failed: " << zx_status_get_string(status);
return false;
}
ignore_result(old_handle.release());
handle_ = std::move(new_handle);
mode_ = Mode::kReadOnly;
return true;
}
bool PlatformSharedMemoryRegion::MapAt(off_t offset,
size_t size,
void** memory,
size_t* mapped_size) {
if (!IsValid())
return false;
size_t end_byte;
if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
return false;
}
bool write_allowed = mode_ != Mode::kReadOnly;
uintptr_t addr;
zx_status_t status = zx_vmar_map(
zx_vmar_root_self(), 0, handle_.get(), offset, size,
ZX_VM_FLAG_PERM_READ | (write_allowed ? ZX_VM_FLAG_PERM_WRITE : 0),
&addr);
if (status != ZX_OK) {
DLOG(ERROR) << "zx_vmar_map failed: " << zx_status_get_string(status);
return false;
}
*memory = reinterpret_cast<void*>(addr);
*mapped_size = size;
DCHECK_EQ(0U,
reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
return true;
}
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
size_t size) {
if (size == 0)
return {};
size_t rounded_size = bits::Align(size, GetPageSize());
if (rounded_size > static_cast<size_t>(std::numeric_limits<int>::max()))
return {};
CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
"lead to this region being non-modifiable";
ScopedZxHandle vmo;
zx_status_t status = zx_vmo_create(rounded_size, 0, vmo.receive());
if (status != ZX_OK) {
DLOG(ERROR) << "zx_vmo_create failed: " << zx_status_get_string(status);
return {};
}
const int kNoExecFlags = ZX_DEFAULT_VMO_RIGHTS & ~ZX_RIGHT_EXECUTE;
ScopedZxHandle old_vmo(std::move(vmo));
status = zx_handle_replace(old_vmo.get(), kNoExecFlags, vmo.receive());
if (status != ZX_OK) {
DLOG(ERROR) << "zx_handle_replace failed: " << zx_status_get_string(status);
return {};
}
ignore_result(old_vmo.release());
return PlatformSharedMemoryRegion(std::move(vmo), mode, rounded_size,
UnguessableToken::Create());
}
PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
ScopedZxHandle handle,
Mode mode,
size_t size,
const UnguessableToken& guid)
: handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
} // namespace subtle
} // namespace base
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/platform_shared_memory_region.h"
#include <mach/mach_vm.h>
#include "base/mac/mach_logging.h"
#include "base/mac/scoped_mach_vm.h"
#include "base/numerics/checked_math.h"
#include "build/build_config.h"
#if defined(OS_IOS)
#error "MacOS only - iOS uses platform_shared_memory_region_posix.cc"
#endif
namespace base {
namespace subtle {
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
mac::ScopedMachSendRight handle,
Mode mode,
size_t size,
const UnguessableToken& guid) {
if (!handle.is_valid())
return {};
if (size == 0)
return {};
if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
return {};
CHECK(
CheckPlatformHandlePermissionsCorrespondToMode(handle.get(), mode, size));
return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
}
mach_port_t PlatformSharedMemoryRegion::GetPlatformHandle() const {
return handle_.get();
}
bool PlatformSharedMemoryRegion::IsValid() const {
return handle_.is_valid();
}
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() {
if (!IsValid())
return {};
CHECK_NE(mode_, Mode::kWritable)
<< "Duplicating a writable shared memory region is prohibited";
// Increment the ref count.
kern_return_t kr = mach_port_mod_refs(mach_task_self(), handle_.get(),
MACH_PORT_RIGHT_SEND, 1);
if (kr != KERN_SUCCESS) {
MACH_DLOG(ERROR, kr) << "mach_port_mod_refs";
return {};
}
return PlatformSharedMemoryRegion(mac::ScopedMachSendRight(handle_.get()),
mode_, size_, guid_);
}
bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
return ConvertToReadOnly(nullptr);
}
bool PlatformSharedMemoryRegion::ConvertToReadOnly(void* mapped_addr) {
if (!IsValid())
return false;
CHECK_EQ(mode_, Mode::kWritable)
<< "Only writable shared memory region can be converted to read-only";
mac::ScopedMachSendRight handle_copy(handle_.release());
void* temp_addr = mapped_addr;
mac::ScopedMachVM scoped_memory;
if (!temp_addr) {
// Intentionally lower current prot and max prot to |VM_PROT_READ|.
kern_return_t kr = mach_vm_map(
mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&temp_addr),
size_, 0, VM_FLAGS_ANYWHERE, handle_copy.get(), 0, FALSE, VM_PROT_READ,
VM_PROT_READ, VM_INHERIT_NONE);
if (kr != KERN_SUCCESS) {
MACH_DLOG(ERROR, kr) << "mach_vm_map";
return false;
}
scoped_memory.reset(reinterpret_cast<vm_address_t>(temp_addr),
mach_vm_round_page(size_));
}
// Make new memory object.
mac::ScopedMachSendRight named_right;
kern_return_t kr = mach_make_memory_entry_64(
mach_task_self(), reinterpret_cast<memory_object_size_t*>(&size_),
reinterpret_cast<memory_object_offset_t>(temp_addr), VM_PROT_READ,
named_right.receive(), MACH_PORT_NULL);
if (kr != KERN_SUCCESS) {
MACH_DLOG(ERROR, kr) << "mach_make_memory_entry_64";
return false;
}
handle_ = std::move(named_right);
mode_ = Mode::kReadOnly;
return true;
}
bool PlatformSharedMemoryRegion::MapAt(off_t offset,
size_t size,
void** memory,
size_t* mapped_size) {
if (!IsValid())
return false;
size_t end_byte;
if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
return false;
}
bool write_allowed = mode_ != Mode::kReadOnly;
vm_prot_t vm_prot_write = write_allowed ? VM_PROT_WRITE : 0;
kern_return_t kr = mach_vm_map(
mach_task_self(),
reinterpret_cast<mach_vm_address_t*>(memory), // Output parameter
size,
0, // Alignment mask
VM_FLAGS_ANYWHERE, handle_.get(), offset,
FALSE, // Copy
VM_PROT_READ | vm_prot_write, // Current protection
VM_PROT_READ | vm_prot_write, // Maximum protection
VM_INHERIT_NONE);
if (kr != KERN_SUCCESS) {
MACH_DLOG(ERROR, kr) << "mach_vm_map";
return false;
}
*mapped_size = size;
DCHECK_EQ(0U,
reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
return true;
}
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
size_t size) {
if (size == 0)
return {};
if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
return {};
CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
"lead to this region being non-modifiable";
mach_vm_size_t vm_size = size;
mac::ScopedMachSendRight named_right;
kern_return_t kr = mach_make_memory_entry_64(
mach_task_self(), &vm_size,
0, // Address.
MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE,
named_right.receive(),
MACH_PORT_NULL); // Parent handle.
if (kr != KERN_SUCCESS) {
MACH_DLOG(ERROR, kr) << "mach_make_memory_entry_64";
return {};
}
return PlatformSharedMemoryRegion(std::move(named_right), mode, vm_size,
UnguessableToken::Create());
}
PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
mac::ScopedMachSendRight handle,
Mode mode,
size_t size,
const UnguessableToken& guid)
: handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
} // namespace subtle
} // namespace base
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/platform_shared_memory_region.h"
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include "base/files/file_util.h"
#include "base/numerics/checked_math.h"
#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
namespace base {
namespace subtle {
namespace {
struct ScopedPathUnlinkerTraits {
static const FilePath* InvalidValue() { return nullptr; }
static void Free(const FilePath* path) {
if (unlink(path->value().c_str()))
PLOG(WARNING) << "unlink";
}
};
// Unlinks the FilePath when the object is destroyed.
using ScopedPathUnlinker =
ScopedGeneric<const FilePath*, ScopedPathUnlinkerTraits>;
} // namespace
ScopedFDPair::ScopedFDPair() = default;
ScopedFDPair::ScopedFDPair(ScopedFDPair&&) = default;
ScopedFDPair& ScopedFDPair::operator=(ScopedFDPair&&) = default;
ScopedFDPair::~ScopedFDPair() = default;
ScopedFDPair::ScopedFDPair(ScopedFD in_fd, ScopedFD in_readonly_fd)
: fd(std::move(in_fd)), readonly_fd(std::move(in_readonly_fd)) {}
FDPair ScopedFDPair::get() const {
return {fd.get(), readonly_fd.get()};
}
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
ScopedFDPair handle,
Mode mode,
size_t size,
const UnguessableToken& guid) {
if (!handle.fd.is_valid())
return {};
if (size == 0)
return {};
if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
return {};
CHECK(
CheckPlatformHandlePermissionsCorrespondToMode(handle.get(), mode, size));
switch (mode) {
case Mode::kReadOnly:
case Mode::kUnsafe:
if (handle.readonly_fd.is_valid()) {
handle.readonly_fd.reset();
DLOG(WARNING) << "Readonly handle shouldn't be valid for a "
"non-writable memory region; closing";
}
break;
case Mode::kWritable:
if (!handle.readonly_fd.is_valid()) {
DLOG(ERROR)
<< "Readonly handle must be valid for writable memory region";
return {};
}
break;
default:
DLOG(ERROR) << "Invalid permission mode: " << static_cast<int>(mode);
return {};
}
return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
}
FDPair PlatformSharedMemoryRegion::GetPlatformHandle() const {
return handle_.get();
}
bool PlatformSharedMemoryRegion::IsValid() const {
return handle_.fd.is_valid() &&
(mode_ == Mode::kWritable ? handle_.readonly_fd.is_valid() : true);
}
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() {
if (!IsValid())
return {};
CHECK_NE(mode_, Mode::kWritable)
<< "Duplicating a writable shared memory region is prohibited";
ScopedFD duped_fd(HANDLE_EINTR(dup(handle_.fd.get())));
if (!duped_fd.is_valid()) {
DPLOG(ERROR) << "dup(" << handle_.fd.get() << ") failed";
return {};
}
return PlatformSharedMemoryRegion({std::move(duped_fd), ScopedFD()}, mode_,
size_, guid_);
}
bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
if (!IsValid())
return false;
CHECK_EQ(mode_, Mode::kWritable)
<< "Only writable shared memory region can be converted to read-only";
handle_.fd.reset(handle_.readonly_fd.release());
mode_ = Mode::kReadOnly;
return true;
}
bool PlatformSharedMemoryRegion::MapAt(off_t offset,
size_t size,
void** memory,
size_t* mapped_size) {
if (!IsValid())
return false;
size_t end_byte;
if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
return false;
}
bool write_allowed = mode_ != Mode::kReadOnly;
*memory = mmap(nullptr, size, PROT_READ | (write_allowed ? PROT_WRITE : 0),
MAP_SHARED, handle_.fd.get(), offset);
bool mmap_succeeded = *memory && *memory != reinterpret_cast<void*>(-1);
if (!mmap_succeeded) {
DPLOG(ERROR) << "mmap " << handle_.fd.get() << " failed";
return false;
}
*mapped_size = size;
DCHECK_EQ(0U,
reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
return true;
}
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
size_t size) {
#if defined(OS_NACL)
// Untrusted code can't create descriptors or handles.
return {};
#else
if (size == 0)
return {};
if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
return {};
CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
"lead to this region being non-modifiable";
// This function theoretically can block on the disk, but realistically
// the temporary files we create will just go into the buffer cache
// and be deleted before they ever make it out to disk.
ThreadRestrictions::ScopedAllowIO allow_io;
// We don't use shm_open() API in order to support the --disable-dev-shm-usage
// flag.
FilePath directory;
if (!GetShmemTempDir(false /* executable */, &directory))
return {};
ScopedFD fd;
FilePath path;
fd.reset(CreateAndOpenFdForTemporaryFileInDir(directory, &path));
if (!fd.is_valid()) {
PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
FilePath dir = path.DirName();
if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
PLOG(ERROR) << "Unable to access(W_OK|X_OK) " << dir.value();
if (dir.value() == "/dev/shm") {
LOG(FATAL) << "This is frequently caused by incorrect permissions on "
<< "/dev/shm. Try 'sudo chmod 1777 /dev/shm' to fix.";
}
}
return {};
}
// Deleting the file prevents anyone else from mapping it in (making it
// private), and prevents the need for cleanup (once the last fd is
// closed, it is truly freed).
ScopedPathUnlinker path_unlinker(&path);
ScopedFD readonly_fd;
if (mode == Mode::kWritable) {
// Also open as readonly so that we can ConvertToReadOnly().
readonly_fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
if (!readonly_fd.is_valid()) {
DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
return {};
}
}
// Get current size.
struct stat stat = {};
if (fstat(fd.get(), &stat) != 0)
return {};
const size_t current_size = stat.st_size;
if (current_size != size) {
if (HANDLE_EINTR(ftruncate(fd.get(), size)) != 0)
return {};
}
if (readonly_fd.is_valid()) {
struct stat readonly_stat = {};
if (fstat(readonly_fd.get(), &readonly_stat))
NOTREACHED();
if (stat.st_dev != readonly_stat.st_dev ||
stat.st_ino != readonly_stat.st_ino) {
LOG(ERROR) << "Writable and read-only inodes don't match; bailing";
return {};
}
}
return PlatformSharedMemoryRegion({std::move(fd), std::move(readonly_fd)},
mode, size, UnguessableToken::Create());
#endif // !defined(OS_NACL)
}
PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
ScopedFDPair handle,
Mode mode,
size_t size,
const UnguessableToken& guid)
: handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
} // namespace subtle
} // namespace base
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/process/process_metrics.h"
#include "base/sys_info.h"
#include "base/test/gtest_util.h"
#include "base/test/test_shared_memory_util.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
#if defined(OS_MACOSX) && !defined(OS_IOS)
#include <mach/mach_vm.h>
#endif
namespace base {
namespace subtle {
const size_t kRegionSize = 1024;
class PlatformSharedMemoryRegionTest : public ::testing::Test {
public:
SharedMemoryMapping MapAt(PlatformSharedMemoryRegion* region,
off_t offset,
size_t bytes) {
void* memory = nullptr;
size_t mapped_size = 0;
if (!region->MapAt(offset, bytes, &memory, &mapped_size))
return {};
return SharedMemoryMapping(memory, mapped_size, region->GetGUID());
}
void* GetMemory(SharedMemoryMapping* mapping) {
return mapping->raw_memory_ptr();
}
};
// Tests that a default constructed region is invalid and produces invalid
// mappings.
TEST_F(PlatformSharedMemoryRegionTest, DefaultConstructedRegionIsInvalid) {
PlatformSharedMemoryRegion region;
EXPECT_FALSE(region.IsValid());
SharedMemoryMapping mapping = MapAt(&region, 0, kRegionSize);
EXPECT_FALSE(mapping.IsValid());
PlatformSharedMemoryRegion duplicate = region.Duplicate();
EXPECT_FALSE(duplicate.IsValid());
EXPECT_FALSE(region.ConvertToReadOnly());
}
// Tests that creating a region of 0 size returns an invalid region.
TEST_F(PlatformSharedMemoryRegionTest, CreateRegionOfZeroSizeIsInvalid) {
PlatformSharedMemoryRegion region =
PlatformSharedMemoryRegion::CreateWritable(0);
EXPECT_FALSE(region.IsValid());
PlatformSharedMemoryRegion region2 =
PlatformSharedMemoryRegion::CreateUnsafe(0);
EXPECT_FALSE(region2.IsValid());
}
// Tests that creating a region of size bigger than the integer max value
// returns an invalid region.
TEST_F(PlatformSharedMemoryRegionTest, CreateTooLargeRegionIsInvalid) {
size_t too_large_region_size =
static_cast<size_t>(std::numeric_limits<int>::max()) + 1;
PlatformSharedMemoryRegion region =
PlatformSharedMemoryRegion::CreateWritable(too_large_region_size);
EXPECT_FALSE(region.IsValid());
PlatformSharedMemoryRegion region2 =
PlatformSharedMemoryRegion::CreateUnsafe(too_large_region_size);
EXPECT_FALSE(region2.IsValid());
}
// Tests that the platform-specific handle converted to read-only cannot be used
// to perform a writable mapping with low-level system APIs like mmap().
TEST_F(PlatformSharedMemoryRegionTest, ReadOnlyHandleIsNotWritable) {
PlatformSharedMemoryRegion region =
PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
ASSERT_TRUE(region.IsValid());
EXPECT_TRUE(region.ConvertToReadOnly());
EXPECT_EQ(region.GetMode(), PlatformSharedMemoryRegion::Mode::kReadOnly);
EXPECT_TRUE(
CheckReadOnlyPlatformSharedMemoryRegionForTesting(std::move(region)));
}
// Tests that the PassPlatformHandle() call invalidates the region.
TEST_F(PlatformSharedMemoryRegionTest, InvalidAfterPass) {
PlatformSharedMemoryRegion region =
PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
ASSERT_TRUE(region.IsValid());
ignore_result(region.PassPlatformHandle());
EXPECT_FALSE(region.IsValid());
}
// Tests that the region is invalid after move.
TEST_F(PlatformSharedMemoryRegionTest, InvalidAfterMove) {
PlatformSharedMemoryRegion region =
PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
ASSERT_TRUE(region.IsValid());
PlatformSharedMemoryRegion moved_region = std::move(region);
EXPECT_FALSE(region.IsValid());
EXPECT_TRUE(moved_region.IsValid());
}
// Tests that calling Take() with the size parameter equal to zero returns an
// invalid region.
TEST_F(PlatformSharedMemoryRegionTest, TakeRegionOfZeroSizeIsInvalid) {
PlatformSharedMemoryRegion region =
PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
ASSERT_TRUE(region.IsValid());
PlatformSharedMemoryRegion region2 = PlatformSharedMemoryRegion::Take(
region.PassPlatformHandle(), region.GetMode(), 0, region.GetGUID());
EXPECT_FALSE(region2.IsValid());
}
// Tests that calling Take() with the size parameter bigger than the integer max
// value returns an invalid region.
TEST_F(PlatformSharedMemoryRegionTest, TakeTooLargeRegionIsInvalid) {
PlatformSharedMemoryRegion region =
PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
ASSERT_TRUE(region.IsValid());
PlatformSharedMemoryRegion region2 = PlatformSharedMemoryRegion::Take(
region.PassPlatformHandle(), region.GetMode(),
static_cast<size_t>(std::numeric_limits<int>::max()) + 1,
region.GetGUID());
EXPECT_FALSE(region2.IsValid());
}
// Tests that mapping bytes out of the region limits fails.
TEST_F(PlatformSharedMemoryRegionTest, MapAtOutOfTheRegionLimitsTest) {
PlatformSharedMemoryRegion region =
PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
ASSERT_TRUE(region.IsValid());
SharedMemoryMapping mapping = MapAt(&region, 0, region.GetSize() + 1);
EXPECT_FALSE(mapping.IsValid());
}
// Tests that mapping with a size and offset causing overflow fails.
TEST_F(PlatformSharedMemoryRegionTest, MapAtWithOverflowTest) {
PlatformSharedMemoryRegion region =
PlatformSharedMemoryRegion::CreateWritable(
SysInfo::VMAllocationGranularity() * 2);
ASSERT_TRUE(region.IsValid());
size_t size = std::numeric_limits<size_t>::max();
off_t offset = SysInfo::VMAllocationGranularity();
// |size| + |offset| should be below the region size due to overflow but
// mapping a region with these parameters should be invalid.
EXPECT_LT(size + offset, region.GetSize());
SharedMemoryMapping mapping = MapAt(&region, offset, size);
EXPECT_FALSE(mapping.IsValid());
}
#if defined(OS_POSIX) && !defined(OS_ANDROID) && !defined(OS_FUCHSIA) && \
!defined(OS_MACOSX)
// Tests that the second handle is closed after a conversion to read-only on
// POSIX.
TEST_F(PlatformSharedMemoryRegionTest,
ConvertToReadOnlyInvalidatesSecondHandle) {
PlatformSharedMemoryRegion region =
PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
ASSERT_TRUE(region.IsValid());
ASSERT_TRUE(region.ConvertToReadOnly());
FDPair fds = region.GetPlatformHandle();
EXPECT_LT(fds.readonly_fd, 0);
}
#endif
#if defined(OS_MACOSX) && !defined(OS_IOS)
// Tests that protection bits are set correctly for read-only region on MacOS.
TEST_F(PlatformSharedMemoryRegionTest, MapCurrentAndMaxProtectionSetCorrectly) {
PlatformSharedMemoryRegion region =
PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
ASSERT_TRUE(region.IsValid());
ASSERT_TRUE(region.ConvertToReadOnly());
SharedMemoryMapping ro_mapping = MapAt(&region, 0, kRegionSize);
ASSERT_TRUE(ro_mapping.IsValid());
vm_region_basic_info_64 basic_info;
mach_vm_size_t dummy_size = 0;
void* temp_addr = GetMemory(&ro_mapping);
MachVMRegionResult result = GetBasicInfo(
mach_task_self(), &dummy_size,
reinterpret_cast<mach_vm_address_t*>(&temp_addr), &basic_info);
EXPECT_EQ(result, MachVMRegionResult::Success);
EXPECT_EQ(basic_info.protection & VM_PROT_ALL, VM_PROT_READ);
EXPECT_EQ(basic_info.max_protection & VM_PROT_ALL, VM_PROT_READ);
}
#endif
// Tests that it's impossible to create read-only platform shared memory region.
TEST_F(PlatformSharedMemoryRegionTest, CreateReadOnlyRegionDeathTest) {
#ifdef OFFICIAL_BUILD
// The official build does not print the reason a CHECK failed.
const char kErrorRegex[] = "";
#else
const char kErrorRegex[] =
"Creating a region in read-only mode will lead to this region being "
"non-modifiable";
#endif
EXPECT_DEATH_IF_SUPPORTED(
PlatformSharedMemoryRegion::Create(
PlatformSharedMemoryRegion::Mode::kReadOnly, kRegionSize),
kErrorRegex);
}
// Tests that it's prohibited to duplicate a writable region.
TEST_F(PlatformSharedMemoryRegionTest, DuplicateWritableRegionDeathTest) {
#ifdef OFFICIAL_BUILD
const char kErrorRegex[] = "";
#else
const char kErrorRegex[] =
"Duplicating a writable shared memory region is prohibited";
#endif
PlatformSharedMemoryRegion region =
PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
ASSERT_TRUE(region.IsValid());
EXPECT_DEATH_IF_SUPPORTED(region.Duplicate(), kErrorRegex);
}
// Tests that it's prohibited to convert an unsafe region to read-only.
TEST_F(PlatformSharedMemoryRegionTest, UnsafeRegionConvertToReadOnlyDeathTest) {
#ifdef OFFICIAL_BUILD
const char kErrorRegex[] = "";
#else
const char kErrorRegex[] =
"Only writable shared memory region can be converted to read-only";
#endif
PlatformSharedMemoryRegion region =
PlatformSharedMemoryRegion::CreateUnsafe(kRegionSize);
ASSERT_TRUE(region.IsValid());
EXPECT_DEATH_IF_SUPPORTED(region.ConvertToReadOnly(), kErrorRegex);
}
// Tests that it's prohibited to convert a read-only region to read-only.
TEST_F(PlatformSharedMemoryRegionTest,
ReadOnlyRegionConvertToReadOnlyDeathTest) {
#ifdef OFFICIAL_BUILD
const char kErrorRegex[] = "";
#else
const char kErrorRegex[] =
"Only writable shared memory region can be converted to read-only";
#endif
PlatformSharedMemoryRegion region =
PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
ASSERT_TRUE(region.IsValid());
EXPECT_TRUE(region.ConvertToReadOnly());
EXPECT_DEATH_IF_SUPPORTED(region.ConvertToReadOnly(), kErrorRegex);
}
} // namespace subtle
} // namespace base
This diff is collapsed.
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/read_only_shared_memory_region.h"
#include <utility>
#include "base/memory/shared_memory.h"
#include "build/build_config.h"
namespace base {
// static
MappedReadOnlyRegion ReadOnlySharedMemoryRegion::Create(size_t size) {
subtle::PlatformSharedMemoryRegion handle =
subtle::PlatformSharedMemoryRegion::CreateWritable(size);
if (!handle.IsValid())
return {};
void* memory_ptr = nullptr;
size_t mapped_size = 0;
if (!handle.MapAt(0, handle.GetSize(), &memory_ptr, &mapped_size))
return {};
WritableSharedMemoryMapping mapping(memory_ptr, mapped_size,
handle.GetGUID());
#if defined(OS_MACOSX) && !defined(OS_IOS)
handle.ConvertToReadOnly(memory_ptr);
#else
handle.ConvertToReadOnly();
#endif // defined(OS_MACOSX) && !defined(OS_IOS)
ReadOnlySharedMemoryRegion region(std::move(handle));
if (!region.IsValid())
return {};
return {std::move(region), std::move(mapping)};
}
// static
ReadOnlySharedMemoryRegion ReadOnlySharedMemoryRegion::Deserialize(
subtle::PlatformSharedMemoryRegion handle) {
return ReadOnlySharedMemoryRegion(std::move(handle));
}
// static
subtle::PlatformSharedMemoryRegion
ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
ReadOnlySharedMemoryRegion region) {
return std::move(region.handle_);
}
ReadOnlySharedMemoryRegion::ReadOnlySharedMemoryRegion() = default;
ReadOnlySharedMemoryRegion::ReadOnlySharedMemoryRegion(
ReadOnlySharedMemoryRegion&& region) = default;
ReadOnlySharedMemoryRegion& ReadOnlySharedMemoryRegion::operator=(
ReadOnlySharedMemoryRegion&& region) = default;
ReadOnlySharedMemoryRegion::~ReadOnlySharedMemoryRegion() = default;
ReadOnlySharedMemoryRegion ReadOnlySharedMemoryRegion::Duplicate() {
return ReadOnlySharedMemoryRegion(handle_.Duplicate());
}
ReadOnlySharedMemoryMapping ReadOnlySharedMemoryRegion::Map() {
return MapAt(0, handle_.GetSize());
}
ReadOnlySharedMemoryMapping ReadOnlySharedMemoryRegion::MapAt(off_t offset,
size_t size) {
if (!IsValid())
return {};
void* memory = nullptr;
size_t mapped_size = 0;
if (!handle_.MapAt(offset, size, &memory, &mapped_size))
return {};
return ReadOnlySharedMemoryMapping(memory, mapped_size, handle_.GetGUID());
}
bool ReadOnlySharedMemoryRegion::IsValid() {
return handle_.IsValid();
}
ReadOnlySharedMemoryRegion::ReadOnlySharedMemoryRegion(
subtle::PlatformSharedMemoryRegion handle)
: handle_(std::move(handle)) {
CHECK_EQ(handle_.GetMode(),
subtle::PlatformSharedMemoryRegion::Mode::kReadOnly);
}
} // namespace base
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
#define BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
#include <utility>
#include "base/macros.h"
#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/shared_memory_mapping.h"
namespace base {
struct MappedReadOnlyRegion;
// Scoped move-only handle to a region of platform shared memory. The instance
// owns the platform handle it wraps. Mappings created by this region are
// read-only. These mappings remain valid even after the region handle is moved
// or destroyed.
class BASE_EXPORT ReadOnlySharedMemoryRegion {
public:
using MappingType = ReadOnlySharedMemoryMapping;
// Creates a new ReadOnlySharedMemoryRegion instance of a given size along
// with the WritableSharedMemoryMapping which provides the only way to modify
// the content of the newly created region.
//
// This means that the caller's process is the only process that can modify
// the region content. If you need to pass write access to another process,
// consider using WritableSharedMemoryRegion or UnsafeSharedMemoryRegion.
static MappedReadOnlyRegion Create(size_t size);
// Returns a ReadOnlySharedMemoryRegion built from a platform-specific handle
// that was taken from another ReadOnlySharedMemoryRegion instance. Returns an
// invalid region iff the |handle| is invalid. CHECK-fails if the |handle|
// isn't read-only.
// This should be used only by the code passing handles across process
// boundaries.
static ReadOnlySharedMemoryRegion Deserialize(
subtle::PlatformSharedMemoryRegion handle);
// Extracts a platform handle from the region. Ownership is transferred to the
// returned region object.
// This should be used only for sending the handle from the current process to
// another.
static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
ReadOnlySharedMemoryRegion region);
// Default constructor initializes an invalid instance.
ReadOnlySharedMemoryRegion();
// Move operations are allowed.
ReadOnlySharedMemoryRegion(ReadOnlySharedMemoryRegion&&);
ReadOnlySharedMemoryRegion& operator=(ReadOnlySharedMemoryRegion&&);
// Destructor closes shared memory region if valid.
// All created mappings will remain valid.
~ReadOnlySharedMemoryRegion();
// Duplicates the underlying platform handle and creates a new
// ReadOnlySharedMemoryRegion instance that owns this handle. Returns a valid
// ReadOnlySharedMemoryRegion on success, invalid otherwise. The current
// region instance remains valid in any case.
ReadOnlySharedMemoryRegion Duplicate();
// Maps the shared memory region into the caller's address space with
// read-only access. The mapped address is guaranteed to have an alignment of
// at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
// Returns a valid ReadOnlySharedMemoryMapping instance on success, invalid
// otherwise.
ReadOnlySharedMemoryMapping Map();
// Same as above, but maps only |size| bytes of the shared memory region
// starting with the given |offset|. |offset| must be aligned to value of
// |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
// requested bytes are out of the region limits.
ReadOnlySharedMemoryMapping MapAt(off_t offset, size_t size);
// Whether the underlying platform handle is valid.
bool IsValid();
// Returns the maximum mapping size that can be created from this region.
size_t GetSize() {
DCHECK(IsValid());
return handle_.GetSize();
}
private:
explicit ReadOnlySharedMemoryRegion(
subtle::PlatformSharedMemoryRegion handle);
subtle::PlatformSharedMemoryRegion handle_;
DISALLOW_COPY_AND_ASSIGN(ReadOnlySharedMemoryRegion);
};
// Helper struct for return value of ReadOnlySharedMemoryRegion::Create().
struct MappedReadOnlyRegion {
ReadOnlySharedMemoryRegion region;
WritableSharedMemoryMapping mapping;
};
} // namespace base
#endif // BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/shared_memory_mapping.h"
#include <utility>
#include "base/logging.h"
#include "base/memory/shared_memory_tracker.h"
#include "base/unguessable_token.h"
#include "build/build_config.h"
#if defined(OS_POSIX)
#include <sys/mman.h>
#endif
#if defined(OS_WIN)
#include <aclapi.h>
#endif
#if defined(OS_MACOSX) && !defined(OS_IOS)
#include <mach/mach_vm.h>
#include "base/mac/mach_logging.h"
#endif
#if defined(OS_FUCHSIA)
#include <zircon/process.h>
#include <zircon/status.h>
#include <zircon/syscalls.h>
#endif
namespace base {
SharedMemoryMapping::SharedMemoryMapping() = default;
SharedMemoryMapping::SharedMemoryMapping(SharedMemoryMapping&& mapping)
: memory_(mapping.memory_), size_(mapping.size_), guid_(mapping.guid_) {
mapping.memory_ = nullptr;
}
SharedMemoryMapping& SharedMemoryMapping::operator=(
SharedMemoryMapping&& mapping) {
Unmap();
memory_ = mapping.memory_;
size_ = mapping.size_;
guid_ = mapping.guid_;
mapping.memory_ = nullptr;
return *this;
}
SharedMemoryMapping::~SharedMemoryMapping() {
Unmap();
}
SharedMemoryMapping::SharedMemoryMapping(void* memory,
size_t size,
const UnguessableToken& guid)
: memory_(memory), size_(size), guid_(guid) {
SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
}
void SharedMemoryMapping::Unmap() {
if (!IsValid())
return;
SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
#if defined(OS_WIN)
if (!UnmapViewOfFile(memory_))
DPLOG(ERROR) << "UnmapViewOfFile";
#elif defined(OS_FUCHSIA)
uintptr_t addr = reinterpret_cast<uintptr_t>(memory_);
zx_status_t status = zx_vmar_unmap(zx_vmar_root_self(), addr, size_);
DLOG_IF(ERROR, status != ZX_OK)
<< "zx_vmar_unmap failed: " << zx_status_get_string(status);
#elif defined(OS_MACOSX) && !defined(OS_IOS)
kern_return_t kr = mach_vm_deallocate(
mach_task_self(), reinterpret_cast<mach_vm_address_t>(memory_), size_);
MACH_DLOG_IF(ERROR, kr != KERN_SUCCESS, kr) << "mach_vm_deallocate";
#else
if (munmap(memory_, size_) < 0)
DPLOG(ERROR) << "munmap";
#endif
}
ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping() = default;
ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
ReadOnlySharedMemoryMapping&&) = default;
ReadOnlySharedMemoryMapping& ReadOnlySharedMemoryMapping::operator=(
ReadOnlySharedMemoryMapping&&) = default;
ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
void* address,
size_t size,
const UnguessableToken& guid)
: SharedMemoryMapping(address, size, guid) {}
WritableSharedMemoryMapping::WritableSharedMemoryMapping() = default;
WritableSharedMemoryMapping::WritableSharedMemoryMapping(
WritableSharedMemoryMapping&&) = default;
WritableSharedMemoryMapping& WritableSharedMemoryMapping::operator=(
WritableSharedMemoryMapping&&) = default;
WritableSharedMemoryMapping::WritableSharedMemoryMapping(
void* address,
size_t size,
const UnguessableToken& guid)
: SharedMemoryMapping(address, size, guid) {}
} // namespace base
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
#define BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
#include <cstddef>
#include "base/macros.h"
#include "base/unguessable_token.h"
namespace base {
namespace subtle {
class PlatformSharedMemoryRegion;
class PlatformSharedMemoryRegionTest;
} // namespace subtle
// Base class for scoped handles to a shared memory mapping created from a
// shared memory region. Created shared memory mappings remain valid even if the
// creator region is transferred or destroyed.
//
// Each mapping has an UnguessableToken that identifies the shared memory region
// it was created from. This is used for memory metrics, to avoid overcounting
// shared memory.
class BASE_EXPORT SharedMemoryMapping {
public:
// Default constructor initializes an invalid instance.
SharedMemoryMapping();
// Move operations are allowed.
SharedMemoryMapping(SharedMemoryMapping&& mapping);
SharedMemoryMapping& operator=(SharedMemoryMapping&& mapping);
// Unmaps the region if the mapping is valid.
virtual ~SharedMemoryMapping();
// Returns true iff the mapping is valid. False means there is no
// corresponding area of memory.
bool IsValid() const { return memory_ != nullptr; }
// Returns the size of the mapping in bytes. This is page-aligned. This is
// undefined for invalid instances.
size_t size() const {
DCHECK(IsValid());
return size_;
}
// Returns 128-bit GUID of the region this mapping belongs to.
const UnguessableToken& guid() const {
DCHECK(IsValid());
return guid_;
}
protected:
SharedMemoryMapping(void* address, size_t size, const UnguessableToken& guid);
void* raw_memory_ptr() const { return memory_; }
private:
friend class subtle::PlatformSharedMemoryRegionTest;
friend class SharedMemoryTracker;
void Unmap();
void* memory_ = nullptr;
size_t size_ = 0;
UnguessableToken guid_;
DISALLOW_COPY_AND_ASSIGN(SharedMemoryMapping);
};
// Class modeling a read-only mapping of a shared memory region into the
// current process' address space. This is created by ReadOnlySharedMemoryRegion
// instances.
class BASE_EXPORT ReadOnlySharedMemoryMapping : public SharedMemoryMapping {
public:
// Default constructor initializes an invalid instance.
ReadOnlySharedMemoryMapping();
// Move operations are allowed.
ReadOnlySharedMemoryMapping(ReadOnlySharedMemoryMapping&&);
ReadOnlySharedMemoryMapping& operator=(ReadOnlySharedMemoryMapping&&);
// Returns the base address of the mapping. This is read-only memory. This is
// page-aligned. This is nullptr for invalid instances.
const void* memory() const { return raw_memory_ptr(); }
private:
friend class ReadOnlySharedMemoryRegion;
ReadOnlySharedMemoryMapping(void* address,
size_t size,
const UnguessableToken& guid);
DISALLOW_COPY_AND_ASSIGN(ReadOnlySharedMemoryMapping);
};
// Class modeling a writable mapping of a shared memory region into the
// current process' address space. This is created by *SharedMemoryRegion
// instances.
class BASE_EXPORT WritableSharedMemoryMapping : public SharedMemoryMapping {
public:
// Default constructor initializes an invalid instance.
WritableSharedMemoryMapping();
// Move operations are allowed.
WritableSharedMemoryMapping(WritableSharedMemoryMapping&&);
WritableSharedMemoryMapping& operator=(WritableSharedMemoryMapping&&);
// Returns the base address of the mapping. This is writable memory. This is
// page-aligned. This is nullptr for invalid instances.
void* memory() const { return raw_memory_ptr(); }
private:
friend class subtle::PlatformSharedMemoryRegion;
friend class ReadOnlySharedMemoryRegion;
friend class WritableSharedMemoryRegion;
friend class UnsafeSharedMemoryRegion;
WritableSharedMemoryMapping(void* address,
size_t size,
const UnguessableToken& guid);
DISALLOW_COPY_AND_ASSIGN(WritableSharedMemoryMapping);
};
} // namespace base
#endif // BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
This diff is collapsed.
...@@ -39,20 +39,80 @@ const trace_event::MemoryAllocatorDump* ...@@ -39,20 +39,80 @@ const trace_event::MemoryAllocatorDump*
SharedMemoryTracker::GetOrCreateSharedMemoryDump( SharedMemoryTracker::GetOrCreateSharedMemoryDump(
const SharedMemory* shared_memory, const SharedMemory* shared_memory,
trace_event::ProcessMemoryDump* pmd) { trace_event::ProcessMemoryDump* pmd) {
const std::string dump_name = return GetOrCreateSharedMemoryDumpInternal(shared_memory->memory(),
GetDumpNameForTracing(shared_memory->mapped_id()); shared_memory->mapped_size(),
shared_memory->mapped_id(), pmd);
}
void SharedMemoryTracker::IncrementMemoryUsage(
const SharedMemory& shared_memory) {
AutoLock hold(usages_lock_);
DCHECK(usages_.find(shared_memory.memory()) == usages_.end());
usages_.emplace(shared_memory.memory(), UsageInfo(shared_memory.mapped_size(),
shared_memory.mapped_id()));
}
void SharedMemoryTracker::IncrementMemoryUsage(
const SharedMemoryMapping& mapping) {
AutoLock hold(usages_lock_);
DCHECK(usages_.find(mapping.raw_memory_ptr()) == usages_.end());
usages_.emplace(mapping.raw_memory_ptr(),
UsageInfo(mapping.size(), mapping.guid()));
}
void SharedMemoryTracker::DecrementMemoryUsage(
const SharedMemory& shared_memory) {
AutoLock hold(usages_lock_);
DCHECK(usages_.find(shared_memory.memory()) != usages_.end());
usages_.erase(shared_memory.memory());
}
void SharedMemoryTracker::DecrementMemoryUsage(
const SharedMemoryMapping& mapping) {
AutoLock hold(usages_lock_);
DCHECK(usages_.find(mapping.raw_memory_ptr()) != usages_.end());
usages_.erase(mapping.raw_memory_ptr());
}
SharedMemoryTracker::SharedMemoryTracker() {
trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, "SharedMemoryTracker", nullptr);
}
SharedMemoryTracker::~SharedMemoryTracker() = default;
bool SharedMemoryTracker::OnMemoryDump(const trace_event::MemoryDumpArgs& args,
trace_event::ProcessMemoryDump* pmd) {
AutoLock hold(usages_lock_);
for (const auto& usage : usages_) {
const trace_event::MemoryAllocatorDump* dump =
GetOrCreateSharedMemoryDumpInternal(
usage.first, usage.second.mapped_size, usage.second.mapped_id, pmd);
DCHECK(dump);
}
return true;
}
// static
const trace_event::MemoryAllocatorDump*
SharedMemoryTracker::GetOrCreateSharedMemoryDumpInternal(
void* mapped_memory,
size_t mapped_size,
const UnguessableToken& mapped_id,
trace_event::ProcessMemoryDump* pmd) {
const std::string dump_name = GetDumpNameForTracing(mapped_id);
trace_event::MemoryAllocatorDump* local_dump = trace_event::MemoryAllocatorDump* local_dump =
pmd->GetAllocatorDump(dump_name); pmd->GetAllocatorDump(dump_name);
if (local_dump) if (local_dump)
return local_dump; return local_dump;
size_t virtual_size = shared_memory->mapped_size(); size_t virtual_size = mapped_size;
// If resident size is not available, a virtual size is used as fallback. // If resident size is not available, a virtual size is used as fallback.
size_t size = virtual_size; size_t size = virtual_size;
#if defined(COUNT_RESIDENT_BYTES_SUPPORTED) #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
base::Optional<size_t> resident_size = base::Optional<size_t> resident_size =
trace_event::ProcessMemoryDump::CountResidentBytesInSharedMemory( trace_event::ProcessMemoryDump::CountResidentBytesInSharedMemory(
*shared_memory); mapped_memory, mapped_size);
if (resident_size.has_value()) if (resident_size.has_value())
size = resident_size.value(); size = resident_size.value();
#endif #endif
...@@ -63,7 +123,7 @@ SharedMemoryTracker::GetOrCreateSharedMemoryDump( ...@@ -63,7 +123,7 @@ SharedMemoryTracker::GetOrCreateSharedMemoryDump(
local_dump->AddScalar("virtual_size", local_dump->AddScalar("virtual_size",
trace_event::MemoryAllocatorDump::kUnitsBytes, trace_event::MemoryAllocatorDump::kUnitsBytes,
virtual_size); virtual_size);
auto global_dump_guid = GetGlobalDumpIdForTracing(shared_memory->mapped_id()); auto global_dump_guid = GetGlobalDumpIdForTracing(mapped_id);
trace_event::MemoryAllocatorDump* global_dump = trace_event::MemoryAllocatorDump* global_dump =
pmd->CreateSharedGlobalAllocatorDump(global_dump_guid); pmd->CreateSharedGlobalAllocatorDump(global_dump_guid);
global_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize, global_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
...@@ -75,38 +135,4 @@ SharedMemoryTracker::GetOrCreateSharedMemoryDump( ...@@ -75,38 +135,4 @@ SharedMemoryTracker::GetOrCreateSharedMemoryDump(
return local_dump; return local_dump;
} }
void SharedMemoryTracker::IncrementMemoryUsage(
const SharedMemory& shared_memory) {
AutoLock hold(usages_lock_);
DCHECK(usages_.find(&shared_memory) == usages_.end());
usages_[&shared_memory] = shared_memory.mapped_size();
}
void SharedMemoryTracker::DecrementMemoryUsage(
const SharedMemory& shared_memory) {
AutoLock hold(usages_lock_);
DCHECK(usages_.find(&shared_memory) != usages_.end());
usages_.erase(&shared_memory);
}
bool SharedMemoryTracker::OnMemoryDump(const trace_event::MemoryDumpArgs& args,
trace_event::ProcessMemoryDump* pmd) {
{
AutoLock hold(usages_lock_);
for (const auto& usage : usages_) {
const trace_event::MemoryAllocatorDump* dump =
GetOrCreateSharedMemoryDump(usage.first, pmd);
DCHECK(dump);
}
}
return true;
}
SharedMemoryTracker::SharedMemoryTracker() {
trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, "SharedMemoryTracker", nullptr);
}
SharedMemoryTracker::~SharedMemoryTracker() = default;
} // namespace } // namespace
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <string> #include <string>
#include "base/memory/shared_memory.h" #include "base/memory/shared_memory.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/synchronization/lock.h" #include "base/synchronization/lock.h"
#include "base/trace_event/memory_dump_provider.h" #include "base/trace_event/memory_dump_provider.h"
...@@ -38,11 +39,15 @@ class BASE_EXPORT SharedMemoryTracker : public trace_event::MemoryDumpProvider { ...@@ -38,11 +39,15 @@ class BASE_EXPORT SharedMemoryTracker : public trace_event::MemoryDumpProvider {
const SharedMemory* shared_memory, const SharedMemory* shared_memory,
trace_event::ProcessMemoryDump* pmd); trace_event::ProcessMemoryDump* pmd);
// Records shared memory usage on mapping. // Records shared memory usage on valid mapping.
void IncrementMemoryUsage(const SharedMemory& shared_memory); void IncrementMemoryUsage(const SharedMemory& shared_memory);
// We're in the middle of a refactor https://crbug.com/795291. Eventually, the
// first call will go away.
void IncrementMemoryUsage(const SharedMemoryMapping& mapping);
// Records shared memory usage on unmapping. // Records shared memory usage on unmapping.
void DecrementMemoryUsage(const SharedMemory& shared_memory); void DecrementMemoryUsage(const SharedMemory& shared_memory);
void DecrementMemoryUsage(const SharedMemoryMapping& mapping);
// Root dump name for all shared memory dumps. // Root dump name for all shared memory dumps.
static const char kDumpRootName[]; static const char kDumpRootName[];
...@@ -55,9 +60,24 @@ class BASE_EXPORT SharedMemoryTracker : public trace_event::MemoryDumpProvider { ...@@ -55,9 +60,24 @@ class BASE_EXPORT SharedMemoryTracker : public trace_event::MemoryDumpProvider {
bool OnMemoryDump(const trace_event::MemoryDumpArgs& args, bool OnMemoryDump(const trace_event::MemoryDumpArgs& args,
trace_event::ProcessMemoryDump* pmd) override; trace_event::ProcessMemoryDump* pmd) override;
static const trace_event::MemoryAllocatorDump*
GetOrCreateSharedMemoryDumpInternal(void* mapped_memory,
size_t mapped_size,
const UnguessableToken& mapped_id,
trace_event::ProcessMemoryDump* pmd);
// Information associated with each mapped address.
struct UsageInfo {
UsageInfo(size_t size, const UnguessableToken& id)
: mapped_size(size), mapped_id(id) {}
size_t mapped_size;
UnguessableToken mapped_id;
};
// Used to lock when |usages_| is modified or read. // Used to lock when |usages_| is modified or read.
Lock usages_lock_; Lock usages_lock_;
std::map<const SharedMemory*, size_t> usages_; std::map<void*, UsageInfo> usages_;
DISALLOW_COPY_AND_ASSIGN(SharedMemoryTracker); DISALLOW_COPY_AND_ASSIGN(SharedMemoryTracker);
}; };
......
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/unsafe_shared_memory_region.h"
#include <utility>
#include "base/memory/shared_memory.h"
namespace base {
// static
UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Create(size_t size) {
subtle::PlatformSharedMemoryRegion handle =
subtle::PlatformSharedMemoryRegion::CreateUnsafe(size);
return UnsafeSharedMemoryRegion(std::move(handle));
}
// static
UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Deserialize(
subtle::PlatformSharedMemoryRegion handle) {
return UnsafeSharedMemoryRegion(std::move(handle));
}
// static
subtle::PlatformSharedMemoryRegion
UnsafeSharedMemoryRegion::TakeHandleForSerialization(
UnsafeSharedMemoryRegion region) {
return std::move(region.handle_);
}
UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion() = default;
UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion(
UnsafeSharedMemoryRegion&& region) = default;
UnsafeSharedMemoryRegion& UnsafeSharedMemoryRegion::operator=(
UnsafeSharedMemoryRegion&& region) = default;
UnsafeSharedMemoryRegion::~UnsafeSharedMemoryRegion() = default;
UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Duplicate() {
return UnsafeSharedMemoryRegion(handle_.Duplicate());
}
WritableSharedMemoryMapping UnsafeSharedMemoryRegion::Map() {
return MapAt(0, handle_.GetSize());
}
WritableSharedMemoryMapping UnsafeSharedMemoryRegion::MapAt(off_t offset,
size_t size) {
if (!IsValid())
return {};
void* memory = nullptr;
size_t mapped_size = 0;
if (!handle_.MapAt(offset, size, &memory, &mapped_size))
return {};
return WritableSharedMemoryMapping(memory, mapped_size, handle_.GetGUID());
}
bool UnsafeSharedMemoryRegion::IsValid() {
return handle_.IsValid();
}
UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion(
subtle::PlatformSharedMemoryRegion handle)
: handle_(std::move(handle)) {
CHECK_EQ(handle_.GetMode(),
subtle::PlatformSharedMemoryRegion::Mode::kUnsafe);
}
} // namespace base
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
#define BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
#include "base/macros.h"
#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/shared_memory_mapping.h"
namespace base {
// Scoped move-only handle to a region of platform shared memory. The instance
// owns the platform handle it wraps. Mappings created by this region are
// writable. These mappings remain valid even after the region handle is moved
// or destroyed.
//
// NOTE: UnsafeSharedMemoryRegion cannot be converted to a read-only region. Use
// with caution as the region will be writable to any process with a handle to
// the region.
//
// Use this if and only if the following is true:
// - You do not need to share the region as read-only, and,
// - You need to have several instances of the region simultaneously, possibly
// in different processes, that can produce writable mappings.
class BASE_EXPORT UnsafeSharedMemoryRegion {
public:
using MappingType = WritableSharedMemoryMapping;
// Creates a new UnsafeSharedMemoryRegion instance of a given size that can be
// used for mapping writable shared memory into the virtual address space.
static UnsafeSharedMemoryRegion Create(size_t size);
// Returns an UnsafeSharedMemoryRegion built from a platform-specific handle
// that was taken from another UnsafeSharedMemoryRegion instance. Returns an
// invalid region iff the |handle| is invalid. CHECK-fails if the |handle|
// isn't unsafe.
// This should be used only by the code passing a handle across
// process boundaries.
static UnsafeSharedMemoryRegion Deserialize(
subtle::PlatformSharedMemoryRegion handle);
// Extracts a platform handle from the region. Ownership is transferred to the
// returned region object.
// This should be used only for sending the handle from the current
// process to another.
static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
UnsafeSharedMemoryRegion region);
// Default constructor initializes an invalid instance.
UnsafeSharedMemoryRegion();
// Move operations are allowed.
UnsafeSharedMemoryRegion(UnsafeSharedMemoryRegion&&);
UnsafeSharedMemoryRegion& operator=(UnsafeSharedMemoryRegion&&);
// Destructor closes shared memory region if valid.
// All created mappings will remain valid.
~UnsafeSharedMemoryRegion();
// Duplicates the underlying platform handle and creates a new
// UnsafeSharedMemoryRegion instance that owns the newly created handle.
// Returns a valid UnsafeSharedMemoryRegion on success, invalid otherwise.
// The current region instance remains valid in any case.
UnsafeSharedMemoryRegion Duplicate();
// Maps the shared memory region into the caller's address space with write
// access. The mapped address is guaranteed to have an alignment of
// at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
// Returns a valid WritableSharedMemoryMapping instance on success, invalid
// otherwise.
WritableSharedMemoryMapping Map();
// Same as above, but maps only |size| bytes of the shared memory region
// starting with the given |offset|. |offset| must be aligned to value of
// |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
// requested bytes are out of the region limits.
WritableSharedMemoryMapping MapAt(off_t offset, size_t size);
// Whether the underlying platform handle is valid.
bool IsValid();
// Returns the maximum mapping size that can be created from this region.
size_t GetSize() {
DCHECK(IsValid());
return handle_.GetSize();
}
private:
explicit UnsafeSharedMemoryRegion(subtle::PlatformSharedMemoryRegion handle);
subtle::PlatformSharedMemoryRegion handle_;
DISALLOW_COPY_AND_ASSIGN(UnsafeSharedMemoryRegion);
};
} // namespace base
#endif // BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/writable_shared_memory_region.h"
#include <utility>
#include "base/memory/shared_memory.h"
#include "build/build_config.h"
namespace base {
// static
WritableSharedMemoryRegion WritableSharedMemoryRegion::Create(size_t size) {
subtle::PlatformSharedMemoryRegion handle =
subtle::PlatformSharedMemoryRegion::CreateWritable(size);
return WritableSharedMemoryRegion(std::move(handle));
}
// static
WritableSharedMemoryRegion WritableSharedMemoryRegion::Deserialize(
subtle::PlatformSharedMemoryRegion handle) {
return WritableSharedMemoryRegion(std::move(handle));
}
// static
subtle::PlatformSharedMemoryRegion
WritableSharedMemoryRegion::TakeHandleForSerialization(
WritableSharedMemoryRegion region) {
return std::move(region.handle_);
}
// static
ReadOnlySharedMemoryRegion WritableSharedMemoryRegion::ConvertToReadOnly(
WritableSharedMemoryRegion region) {
subtle::PlatformSharedMemoryRegion handle = std::move(region.handle_);
if (!handle.ConvertToReadOnly())
return {};
return ReadOnlySharedMemoryRegion::Deserialize(std::move(handle));
}
WritableSharedMemoryRegion::WritableSharedMemoryRegion() = default;
WritableSharedMemoryRegion::WritableSharedMemoryRegion(
WritableSharedMemoryRegion&& region) = default;
WritableSharedMemoryRegion& WritableSharedMemoryRegion::operator=(
WritableSharedMemoryRegion&& region) = default;
WritableSharedMemoryRegion::~WritableSharedMemoryRegion() = default;
WritableSharedMemoryMapping WritableSharedMemoryRegion::Map() {
return MapAt(0, handle_.GetSize());
}
WritableSharedMemoryMapping WritableSharedMemoryRegion::MapAt(off_t offset,
size_t size) {
if (!IsValid())
return {};
void* memory = nullptr;
size_t mapped_size = 0;
if (!handle_.MapAt(offset, size, &memory, &mapped_size))
return {};
return WritableSharedMemoryMapping(memory, mapped_size, handle_.GetGUID());
}
bool WritableSharedMemoryRegion::IsValid() {
return handle_.IsValid();
}
WritableSharedMemoryRegion::WritableSharedMemoryRegion(
subtle::PlatformSharedMemoryRegion handle)
: handle_(std::move(handle)) {
CHECK_EQ(handle_.GetMode(),
subtle::PlatformSharedMemoryRegion::Mode::kWritable);
}
} // namespace base
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_
#define BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_
#include "base/macros.h"
#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/read_only_shared_memory_region.h"
#include "base/memory/shared_memory_mapping.h"
namespace base {
// Scoped move-only handle to a region of platform shared memory. The instance
// owns the platform handle it wraps. Mappings created by this region are
// writable. These mappings remain valid even after the region handle is moved
// or destroyed.
//
// This region can be locked to read-only access by converting it to a
// ReadOnlySharedMemoryRegion. However, unlike ReadOnlySharedMemoryRegion and
// UnsafeSharedMemoryRegion, ownership of this region (while writable) is unique
// and may only be transferred, not duplicated.
class BASE_EXPORT WritableSharedMemoryRegion {
public:
using MappingType = WritableSharedMemoryMapping;
// Creates a new WritableSharedMemoryRegion instance of a given
// size that can be used for mapping writable shared memory into the virtual
// address space.
static WritableSharedMemoryRegion Create(size_t size);
// Returns a WritableSharedMemoryRegion built from a platform handle that was
// taken from another WritableSharedMemoryRegion instance. Returns an invalid
// region iff the |handle| is invalid. CHECK-fails if the |handle| isn't
// writable.
// This should be used only by the code passing handles across process
// boundaries.
static WritableSharedMemoryRegion Deserialize(
subtle::PlatformSharedMemoryRegion handle);
// Extracts a platform handle from the region. Ownership is transferred to the
// returned region object.
// This should be used only for sending the handle from the current
// process to another.
static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
WritableSharedMemoryRegion region);
// Makes the region read-only. No new writable mappings of the region can be
// created after this call. Returns an invalid region on failure.
static ReadOnlySharedMemoryRegion ConvertToReadOnly(
WritableSharedMemoryRegion region);
// Default constructor initializes an invalid instance.
WritableSharedMemoryRegion();
// Move operations are allowed.
WritableSharedMemoryRegion(WritableSharedMemoryRegion&&);
WritableSharedMemoryRegion& operator=(WritableSharedMemoryRegion&&);
// Destructor closes shared memory region if valid.
// All created mappings will remain valid.
~WritableSharedMemoryRegion();
// Maps the shared memory region into the caller's address space with write
// access. The mapped address is guaranteed to have an alignment of
// at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
// Returns a valid WritableSharedMemoryMapping instance on success, invalid
// otherwise.
WritableSharedMemoryMapping Map();
// Same as above, but maps only |size| bytes of the shared memory block
// starting with the given |offset|. |offset| must be aligned to value of
// |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
// requested bytes are out of the region limits.
WritableSharedMemoryMapping MapAt(off_t offset, size_t size);
// Whether underlying platform handles are valid.
bool IsValid();
// Returns the maximum mapping size that can be created from this region.
size_t GetSize() {
DCHECK(IsValid());
return handle_.GetSize();
}
private:
explicit WritableSharedMemoryRegion(
subtle::PlatformSharedMemoryRegion handle);
subtle::PlatformSharedMemoryRegion handle_;
DISALLOW_COPY_AND_ASSIGN(WritableSharedMemoryRegion);
};
} // namespace base
#endif // BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_
...@@ -71,12 +71,12 @@ static bool CheckReadOnlySharedMemoryFdPosix(int fd) { ...@@ -71,12 +71,12 @@ static bool CheckReadOnlySharedMemoryFdPosix(int fd) {
#if defined(OS_FUCHSIA) #if defined(OS_FUCHSIA)
// Fuchsia specific implementation. // Fuchsia specific implementation.
bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle) { bool CheckReadOnlySharedMemoryFuchsiaHandle(zx_handle_t handle) {
const uint32_t flags = ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE; const uint32_t flags = ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE;
uintptr_t addr; uintptr_t addr;
const zx_handle_t root = zx_vmar_root_self(); const zx_handle_t root = zx_vmar_root_self();
const zx_status_t status = const zx_status_t status =
zx_vmar_map(root, 0, handle.GetHandle(), 0U, kDataSize, flags, &addr); zx_vmar_map(root, 0, handle, 0U, kDataSize, flags, &addr);
if (status == ZX_OK) { if (status == ZX_OK) {
LOG(ERROR) << "zx_vmar_map() should have failed!"; LOG(ERROR) << "zx_vmar_map() should have failed!";
zx_vmar_unmap(root, addr, kDataSize); zx_vmar_unmap(root, addr, kDataSize);
...@@ -89,16 +89,13 @@ bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle) { ...@@ -89,16 +89,13 @@ bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle) {
} }
return true; return true;
} }
#elif defined(OS_MACOSX) && !defined(OS_IOS)
// For OSX, the code has to deal with both POSIX and MACH handles.
bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle) {
if (handle.type_ == SharedMemoryHandle::POSIX)
return CheckReadOnlySharedMemoryFdPosix(handle.file_descriptor_.fd);
#elif defined(OS_MACOSX) && !defined(OS_IOS)
bool CheckReadOnlySharedMemoryMachPort(mach_port_t memory_object) {
mach_vm_address_t memory; mach_vm_address_t memory;
const kern_return_t kr = mach_vm_map( const kern_return_t kr = mach_vm_map(
mach_task_self(), &memory, kDataSize, 0, VM_FLAGS_ANYWHERE, mach_task_self(), &memory, kDataSize, 0, VM_FLAGS_ANYWHERE, memory_object,
handle.memory_object_, 0, FALSE, VM_PROT_READ | VM_PROT_WRITE, 0, FALSE, VM_PROT_READ | VM_PROT_WRITE,
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_IS_MASK, VM_INHERIT_NONE); VM_PROT_READ | VM_PROT_WRITE | VM_PROT_IS_MASK, VM_INHERIT_NONE);
if (kr == KERN_SUCCESS) { if (kr == KERN_SUCCESS) {
LOG(ERROR) << "mach_vm_map() should have failed!"; LOG(ERROR) << "mach_vm_map() should have failed!";
...@@ -107,10 +104,11 @@ bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle) { ...@@ -107,10 +104,11 @@ bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle) {
} }
return true; return true;
} }
#elif defined(OS_WIN) #elif defined(OS_WIN)
bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle) { bool CheckReadOnlySharedMemoryWindowsHandle(HANDLE handle) {
void* memory = MapViewOfFile(handle.GetHandle(), void* memory =
FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, kDataSize); MapViewOfFile(handle, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, kDataSize);
if (memory != nullptr) { if (memory != nullptr) {
LOG(ERROR) << "MapViewOfFile() should have failed!"; LOG(ERROR) << "MapViewOfFile() should have failed!";
UnmapViewOfFile(memory); UnmapViewOfFile(memory);
...@@ -118,11 +116,46 @@ bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle) { ...@@ -118,11 +116,46 @@ bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle) {
} }
return true; return true;
} }
#else #endif
bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle) { bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle) {
#if defined(OS_MACOSX) && !defined(OS_IOS)
// For OSX, the code has to deal with both POSIX and MACH handles.
if (handle.type_ == SharedMemoryHandle::POSIX)
return CheckReadOnlySharedMemoryFdPosix(handle.file_descriptor_.fd);
else
return CheckReadOnlySharedMemoryMachPort(handle.memory_object_);
#elif defined(OS_FUCHSIA)
return CheckReadOnlySharedMemoryFuchsiaHandle(handle.GetHandle());
#elif defined(OS_WIN)
return CheckReadOnlySharedMemoryWindowsHandle(handle.GetHandle());
#else
return CheckReadOnlySharedMemoryFdPosix(handle.GetHandle()); return CheckReadOnlySharedMemoryFdPosix(handle.GetHandle());
#endif
} }
bool CheckReadOnlyPlatformSharedMemoryRegionForTesting(
subtle::PlatformSharedMemoryRegion region) {
if (region.GetMode() != subtle::PlatformSharedMemoryRegion::Mode::kReadOnly) {
LOG(ERROR) << "Expected region mode is "
<< static_cast<int>(
subtle::PlatformSharedMemoryRegion::Mode::kReadOnly)
<< " but actual is " << static_cast<int>(region.GetMode());
return false;
}
#if defined(OS_MACOSX) && !defined(OS_IOS)
return CheckReadOnlySharedMemoryMachPort(region.GetPlatformHandle());
#elif defined(OS_FUCHSIA)
return CheckReadOnlySharedMemoryFuchsiaHandle(region.GetPlatformHandle());
#elif defined(OS_WIN)
return CheckReadOnlySharedMemoryWindowsHandle(region.GetPlatformHandle());
#elif defined(OS_ANDROID)
return CheckReadOnlySharedMemoryFdPosix(region.GetPlatformHandle());
#else
return CheckReadOnlySharedMemoryFdPosix(region.GetPlatformHandle().fd);
#endif #endif
}
#endif // !OS_NACL #endif // !OS_NACL
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#ifndef BASE_TEST_TEST_SHARED_MEMORY_UTIL_H_ #ifndef BASE_TEST_TEST_SHARED_MEMORY_UTIL_H_
#define BASE_TEST_TEST_SHARED_MEMORY_UTIL_H_ #define BASE_TEST_TEST_SHARED_MEMORY_UTIL_H_
#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/shared_memory_handle.h" #include "base/memory/shared_memory_handle.h"
namespace base { namespace base {
...@@ -15,6 +16,9 @@ namespace base { ...@@ -15,6 +16,9 @@ namespace base {
// otherwise. // otherwise.
bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle); bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle);
bool CheckReadOnlyPlatformSharedMemoryRegionForTesting(
subtle::PlatformSharedMemoryRegion region);
} // namespace base } // namespace base
#endif // BASE_TEST_TEST_SHARED_MEMORY_UTIL_H_ #endif // BASE_TEST_TEST_SHARED_MEMORY_UTIL_H_
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <vector> #include <vector>
#include "base/memory/ptr_util.h" #include "base/memory/ptr_util.h"
#include "base/memory/shared_memory.h"
#include "base/memory/shared_memory_tracker.h" #include "base/memory/shared_memory_tracker.h"
#include "base/process/process_metrics.h" #include "base/process/process_metrics.h"
#include "base/strings/stringprintf.h" #include "base/strings/stringprintf.h"
...@@ -164,13 +163,14 @@ size_t ProcessMemoryDump::CountResidentBytes(void* start_address, ...@@ -164,13 +163,14 @@ size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
// static // static
base::Optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory( base::Optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory(
const SharedMemory& shared_memory) { void* start_address,
size_t mapped_size) {
#if defined(OS_MACOSX) && !defined(OS_IOS) #if defined(OS_MACOSX) && !defined(OS_IOS)
// On macOS, use mach_vm_region instead of mincore for performance // On macOS, use mach_vm_region instead of mincore for performance
// (crbug.com/742042). // (crbug.com/742042).
mach_vm_size_t dummy_size = 0; mach_vm_size_t dummy_size = 0;
mach_vm_address_t address = mach_vm_address_t address =
reinterpret_cast<mach_vm_address_t>(shared_memory.memory()); reinterpret_cast<mach_vm_address_t>(start_address);
vm_region_top_info_data_t info; vm_region_top_info_data_t info;
MachVMRegionResult result = MachVMRegionResult result =
GetTopInfo(mach_task_self(), &dummy_size, &address, &info); GetTopInfo(mach_task_self(), &dummy_size, &address, &info);
...@@ -212,10 +212,9 @@ base::Optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory( ...@@ -212,10 +212,9 @@ base::Optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory(
// Sanity check in case the mapped size is less than the total size of the // Sanity check in case the mapped size is less than the total size of the
// region. // region.
size_t pages_to_fault = size_t pages_to_fault =
std::min(resident_pages, std::min(resident_pages, (mapped_size + PAGE_SIZE - 1) / PAGE_SIZE);
(shared_memory.mapped_size() + PAGE_SIZE - 1) / PAGE_SIZE);
volatile char* base_address = static_cast<char*>(shared_memory.memory()); volatile char* base_address = static_cast<char*>(start_address);
for (size_t i = 0; i < pages_to_fault; ++i) { for (size_t i = 0; i < pages_to_fault; ++i) {
// Reading from a volatile is a visible side-effect for the purposes of // Reading from a volatile is a visible side-effect for the purposes of
// optimization. This guarantees that the optimizer will not kill this line. // optimization. This guarantees that the optimizer will not kill this line.
...@@ -224,8 +223,7 @@ base::Optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory( ...@@ -224,8 +223,7 @@ base::Optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory(
return resident_pages * PAGE_SIZE; return resident_pages * PAGE_SIZE;
#else #else
return CountResidentBytes(shared_memory.memory(), return CountResidentBytes(start_address, mapped_size);
shared_memory.mapped_size());
#endif // defined(OS_MACOSX) && !defined(OS_IOS) #endif // defined(OS_MACOSX) && !defined(OS_IOS)
} }
......
...@@ -74,10 +74,11 @@ class BASE_EXPORT ProcessMemoryDump { ...@@ -74,10 +74,11 @@ class BASE_EXPORT ProcessMemoryDump {
// process. The |start_address| must be page-aligned. // process. The |start_address| must be page-aligned.
static size_t CountResidentBytes(void* start_address, size_t mapped_size); static size_t CountResidentBytes(void* start_address, size_t mapped_size);
// Returns the total bytes resident for the given |shared_memory|'s mapped // The same as above, but the given mapped range should belong to the
// region. // shared_memory's mapped region.
static base::Optional<size_t> CountResidentBytesInSharedMemory( static base::Optional<size_t> CountResidentBytesInSharedMemory(
const SharedMemory& shared_memory); void* start_address,
size_t mapped_size);
#endif #endif
ProcessMemoryDump(scoped_refptr<HeapProfilerSerializationState> ProcessMemoryDump(scoped_refptr<HeapProfilerSerializationState>
......
...@@ -547,7 +547,8 @@ TEST(ProcessMemoryDumpTest, CountResidentBytesInSharedMemory) { ...@@ -547,7 +547,8 @@ TEST(ProcessMemoryDumpTest, CountResidentBytesInSharedMemory) {
shared_memory1.CreateAndMapAnonymous(size1); shared_memory1.CreateAndMapAnonymous(size1);
memset(shared_memory1.memory(), 0, size1); memset(shared_memory1.memory(), 0, size1);
base::Optional<size_t> res1 = base::Optional<size_t> res1 =
ProcessMemoryDump::CountResidentBytesInSharedMemory(shared_memory1); ProcessMemoryDump::CountResidentBytesInSharedMemory(
shared_memory1.memory(), shared_memory1.mapped_size());
ASSERT_TRUE(res1.has_value()); ASSERT_TRUE(res1.has_value());
ASSERT_EQ(res1.value(), size1); ASSERT_EQ(res1.value(), size1);
shared_memory1.Unmap(); shared_memory1.Unmap();
...@@ -559,7 +560,8 @@ TEST(ProcessMemoryDumpTest, CountResidentBytesInSharedMemory) { ...@@ -559,7 +560,8 @@ TEST(ProcessMemoryDumpTest, CountResidentBytesInSharedMemory) {
shared_memory2.CreateAndMapAnonymous(kVeryLargeMemorySize); shared_memory2.CreateAndMapAnonymous(kVeryLargeMemorySize);
memset(shared_memory2.memory(), 0, kVeryLargeMemorySize); memset(shared_memory2.memory(), 0, kVeryLargeMemorySize);
base::Optional<size_t> res2 = base::Optional<size_t> res2 =
ProcessMemoryDump::CountResidentBytesInSharedMemory(shared_memory2); ProcessMemoryDump::CountResidentBytesInSharedMemory(
shared_memory2.memory(), shared_memory2.mapped_size());
ASSERT_TRUE(res2.has_value()); ASSERT_TRUE(res2.has_value());
ASSERT_EQ(res2.value(), kVeryLargeMemorySize); ASSERT_EQ(res2.value(), kVeryLargeMemorySize);
shared_memory2.Unmap(); shared_memory2.Unmap();
...@@ -571,7 +573,8 @@ TEST(ProcessMemoryDumpTest, CountResidentBytesInSharedMemory) { ...@@ -571,7 +573,8 @@ TEST(ProcessMemoryDumpTest, CountResidentBytesInSharedMemory) {
shared_memory3.CreateAndMapAnonymous(kVeryLargeMemorySize); shared_memory3.CreateAndMapAnonymous(kVeryLargeMemorySize);
memset(shared_memory3.memory(), 0, kTouchedMemorySize); memset(shared_memory3.memory(), 0, kTouchedMemorySize);
base::Optional<size_t> res3 = base::Optional<size_t> res3 =
ProcessMemoryDump::CountResidentBytesInSharedMemory(shared_memory3); ProcessMemoryDump::CountResidentBytesInSharedMemory(
shared_memory3.memory(), shared_memory3.mapped_size());
ASSERT_TRUE(res3.has_value()); ASSERT_TRUE(res3.has_value());
ASSERT_EQ(res3.value(), kTouchedMemorySize); ASSERT_EQ(res3.value(), kTouchedMemorySize);
shared_memory3.Unmap(); shared_memory3.Unmap();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment