Commit 26b06001 authored by erikchen's avatar erikchen Committed by Commit Bot

OOP HP accounting fix for thread destruction frees.

Previously, OOP HP relied on base TLS to implement its re-entrancy bit. Base TLS
cannot be used after thread destruction has started. This means that frees that
occurred during thread destruction were not being logged.

This CL implements a very simple cross-platform TLS re-entrancy bit. Since it's
only a single bit, the data can be stored in the TLS value without needing any
additional allocations. This means that it can be safely used during thread
destruction.

With this CL, frees that occur in thread destruction will be correctly recorded.
Allocations still depend on base TLS, so they will be missed.

Bug: 839416
Change-Id: I2e1aab9de73122f234a4b11b826e491c347dcfd5
TBR: gab@chromium.org
Reviewed-on: https://chromium-review.googlesource.com/1076448Reviewed-by: default avatarRichard Coles <torne@chromium.org>
Reviewed-by: default avatarRobert Liao <robliao@chromium.org>
Commit-Queue: Erik Chen <erikchen@chromium.org>
Cr-Commit-Position: refs/heads/master@{#563033}
parent d0a2c265
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include "components/crash/content/app/breakpad_linux.h" #include "components/crash/content/app/breakpad_linux.h"
#include "components/crash/core/common/crash_key.h" #include "components/crash/core/common/crash_key.h"
#include "components/safe_browsing/android/safe_browsing_api_handler_bridge.h" #include "components/safe_browsing/android/safe_browsing_api_handler_bridge.h"
#include "components/services/heap_profiling/public/cpp/allocator_shim.h"
#include "components/spellcheck/spellcheck_buildflags.h" #include "components/spellcheck/spellcheck_buildflags.h"
#include "content/public/browser/android/browser_media_player_manager_register.h" #include "content/public/browser/android/browser_media_player_manager_register.h"
#include "content/public/browser/browser_main_runner.h" #include "content/public/browser/browser_main_runner.h"
...@@ -188,6 +189,14 @@ bool AwMainDelegate::BasicStartupComplete(int* exit_code) { ...@@ -188,6 +189,14 @@ bool AwMainDelegate::BasicStartupComplete(int* exit_code) {
base::trace_event::TraceLog::GetInstance()->SetArgumentFilterPredicate( base::trace_event::TraceLog::GetInstance()->SetArgumentFilterPredicate(
base::BindRepeating(&IsTraceEventArgsWhitelisted)); base::BindRepeating(&IsTraceEventArgsWhitelisted));
// The TLS slot used by the memlog allocator shim needs to be initialized
// early to ensure that it gets assigned a low slot number. If it gets
// initialized too late, the glibc TLS system will require a malloc call in
// order to allocate storage for a higher slot number. Since malloc is hooked,
// this causes re-entrancy into the allocator shim, while the TLS object is
// partially-initialized, which the TLS object is supposed to protect again.
heap_profiling::InitTLSSlot();
return false; return false;
} }
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#endif #endif
namespace heap_profiling { namespace heap_profiling {
class ScopedAllowLogging; class ScopedAllowAlloc;
} // namespace heap_profiling } // namespace heap_profiling
namespace base { namespace base {
...@@ -156,7 +156,7 @@ class BASE_EXPORT ThreadLocalStorage { ...@@ -156,7 +156,7 @@ class BASE_EXPORT ThreadLocalStorage {
friend class base::SamplingHeapProfiler; friend class base::SamplingHeapProfiler;
friend class base::internal::ThreadLocalStorageTestInternal; friend class base::internal::ThreadLocalStorageTestInternal;
friend class base::trace_event::MallocDumpProvider; friend class base::trace_event::MallocDumpProvider;
friend class heap_profiling::ScopedAllowLogging; friend class heap_profiling::ScopedAllowAlloc;
static bool HasBeenDestroyed(); static bool HasBeenDestroyed();
DISALLOW_COPY_AND_ASSIGN(ThreadLocalStorage); DISALLOW_COPY_AND_ASSIGN(ThreadLocalStorage);
......
...@@ -27,6 +27,11 @@ ...@@ -27,6 +27,11 @@
#if defined(OS_POSIX) #if defined(OS_POSIX)
#include <limits.h> #include <limits.h>
#include <pthread.h>
#endif
#if defined(OS_WIN)
#include <windows.h>
#endif #endif
#if defined(OS_LINUX) || defined(OS_ANDROID) #if defined(OS_LINUX) || defined(OS_ANDROID)
...@@ -44,7 +49,57 @@ using CaptureMode = base::trace_event::AllocationContextTracker::CaptureMode; ...@@ -44,7 +49,57 @@ using CaptureMode = base::trace_event::AllocationContextTracker::CaptureMode;
namespace heap_profiling { namespace heap_profiling {
// A ScopedAllowLogging instance must be instantiated in the scope of all hooks. namespace {
// The base implementation of TLS will leak memory if accessed during late
// stages of thread destruction. We roll our own implementation of TLS to
// prevent reentrancy. Since this only requires storing a single bit of
// information, we don't need to deal with hooking thread destruction to free
// memory, and thus avoid leaks and other issues.
#if defined(OS_WIN)
using TLSKey = DWORD;
#else
using TLSKey = pthread_key_t;
#endif
// Holds a key to a TLS value. The TLS value (0 or 1) indicates whether the
// allocator shim is already being used on the current thread.
TLSKey g_prevent_reentrancy_key = 0;
void InitializeReentrancyKey() {
#if defined(OS_WIN)
g_prevent_reentrancy_key = TlsAlloc();
DCHECK_NE(TLS_OUT_OF_INDEXES, g_prevent_reentrancy_key);
#else
// Returns |0| on success.
int result = pthread_key_create(&g_prevent_reentrancy_key, nullptr);
DCHECK(!result);
#endif
}
bool CanEnterAllocatorShim() {
#if defined(OS_WIN)
return !TlsGetValue(g_prevent_reentrancy_key);
#else
return !pthread_getspecific(g_prevent_reentrancy_key);
#endif
}
void SetEnteringAllocatorShim(bool entering) {
void* value = entering ? reinterpret_cast<void*>(1) : nullptr;
#if defined(OS_WIN)
BOOL ret = TlsSetValue(g_prevent_reentrancy_key, value);
DPCHECK(ret);
#else
int ret = pthread_setspecific(g_prevent_reentrancy_key, value);
DCHECK_EQ(ret, 0);
#endif
}
} // namespace
// A ScopedAllow{Free,Alloc} instance must be instantiated in the scope of all
// hooks.
// AllocatorShimLogAlloc/AllocatorShimLogFree must only be called if it // AllocatorShimLogAlloc/AllocatorShimLogFree must only be called if it
// evaluates to true. // evaluates to true.
// //
...@@ -62,28 +117,42 @@ namespace heap_profiling { ...@@ -62,28 +117,42 @@ namespace heap_profiling {
// The implementation of libmalloc will sometimes call malloc [from // The implementation of libmalloc will sometimes call malloc [from
// one zone to another] - without this guard, the allocation would get two // one zone to another] - without this guard, the allocation would get two
// chances of being sampled. // chances of being sampled.
class ScopedAllowLogging { class ScopedAllowFree {
public: public:
ScopedAllowLogging() ScopedAllowFree() : allowed_(LIKELY(CanEnterAllocatorShim())) {
: allowed_(LIKELY(!base::ThreadLocalStorage::HasBeenDestroyed()) &&
LIKELY(!prevent_reentrancy_.Pointer()->Get())) {
if (allowed_) if (allowed_)
prevent_reentrancy_.Pointer()->Set(true); SetEnteringAllocatorShim(true);
} }
~ScopedAllowLogging() { ~ScopedAllowFree() {
if (allowed_) if (allowed_)
prevent_reentrancy_.Pointer()->Set(false); SetEnteringAllocatorShim(false);
} }
explicit operator bool() const { return allowed_; } explicit operator bool() const { return allowed_; }
private: private:
const bool allowed_; const bool allowed_;
static base::LazyInstance<base::ThreadLocalBoolean>::Leaky
prevent_reentrancy_;
}; };
base::LazyInstance<base::ThreadLocalBoolean>::Leaky // Allocation logging also requires use of base TLS, so we must also check that
ScopedAllowLogging::prevent_reentrancy_; // that is available. This means that allocations that occur after base TLS has
// been torn down will not be logged.
class ScopedAllowAlloc {
public:
ScopedAllowAlloc()
: allowed_(LIKELY(CanEnterAllocatorShim()) &&
LIKELY(!base::ThreadLocalStorage::HasBeenDestroyed())) {
if (allowed_)
SetEnteringAllocatorShim(true);
}
~ScopedAllowAlloc() {
if (allowed_)
SetEnteringAllocatorShim(false);
}
explicit operator bool() const { return allowed_; }
private:
const bool allowed_;
};
namespace { namespace {
...@@ -162,14 +231,14 @@ void DestructShimState(void* shim_state) { ...@@ -162,14 +231,14 @@ void DestructShimState(void* shim_state) {
// Technically, this code could be called after Thread destruction and we would // Technically, this code could be called after Thread destruction and we would
// need to guard this with ThreadLocalStorage::HasBeenDestroyed(), but all calls // need to guard this with ThreadLocalStorage::HasBeenDestroyed(), but all calls
// to this are guarded behind ScopedAllowLogging, which already makes the check. // to this are guarded behind ScopedAllowAlloc, which already makes the check.
base::ThreadLocalStorage::Slot& ShimStateTLS() { base::ThreadLocalStorage::Slot& ShimStateTLS() {
static base::NoDestructor<base::ThreadLocalStorage::Slot> shim_state_tls( static base::NoDestructor<base::ThreadLocalStorage::Slot> shim_state_tls(
&DestructShimState); &DestructShimState);
return *shim_state_tls; return *shim_state_tls;
} }
// We don't need to worry about re-entrancy because ScopedAllowLogging // We don't need to worry about re-entrancy because ScopedAllowAlloc.
// already guards against that. // already guards against that.
ShimState* GetShimState() { ShimState* GetShimState() {
ShimState* state = static_cast<ShimState*>(ShimStateTLS().Get()); ShimState* state = static_cast<ShimState*>(ShimStateTLS().Get());
...@@ -321,7 +390,7 @@ void DoSend(const void* address, ...@@ -321,7 +390,7 @@ void DoSend(const void* address,
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if BUILDFLAG(USE_ALLOCATOR_SHIM)
void* HookAlloc(const AllocatorDispatch* self, size_t size, void* context) { void* HookAlloc(const AllocatorDispatch* self, size_t size, void* context) {
ScopedAllowLogging allow_logging; ScopedAllowAlloc allow_logging;
const AllocatorDispatch* const next = self->next; const AllocatorDispatch* const next = self->next;
void* ptr = next->alloc_function(next, size, context); void* ptr = next->alloc_function(next, size, context);
...@@ -337,7 +406,7 @@ void* HookZeroInitAlloc(const AllocatorDispatch* self, ...@@ -337,7 +406,7 @@ void* HookZeroInitAlloc(const AllocatorDispatch* self,
size_t n, size_t n,
size_t size, size_t size,
void* context) { void* context) {
ScopedAllowLogging allow_logging; ScopedAllowAlloc allow_logging;
const AllocatorDispatch* const next = self->next; const AllocatorDispatch* const next = self->next;
void* ptr = next->alloc_zero_initialized_function(next, n, size, context); void* ptr = next->alloc_zero_initialized_function(next, n, size, context);
...@@ -352,7 +421,7 @@ void* HookAllocAligned(const AllocatorDispatch* self, ...@@ -352,7 +421,7 @@ void* HookAllocAligned(const AllocatorDispatch* self,
size_t alignment, size_t alignment,
size_t size, size_t size,
void* context) { void* context) {
ScopedAllowLogging allow_logging; ScopedAllowAlloc allow_logging;
const AllocatorDispatch* const next = self->next; const AllocatorDispatch* const next = self->next;
void* ptr = next->alloc_aligned_function(next, alignment, size, context); void* ptr = next->alloc_aligned_function(next, alignment, size, context);
...@@ -367,7 +436,7 @@ void* HookRealloc(const AllocatorDispatch* self, ...@@ -367,7 +436,7 @@ void* HookRealloc(const AllocatorDispatch* self,
void* address, void* address,
size_t size, size_t size,
void* context) { void* context) {
ScopedAllowLogging allow_logging; ScopedAllowAlloc allow_logging;
const AllocatorDispatch* const next = self->next; const AllocatorDispatch* const next = self->next;
void* ptr = next->realloc_function(next, address, size, context); void* ptr = next->realloc_function(next, address, size, context);
...@@ -382,7 +451,7 @@ void* HookRealloc(const AllocatorDispatch* self, ...@@ -382,7 +451,7 @@ void* HookRealloc(const AllocatorDispatch* self,
} }
void HookFree(const AllocatorDispatch* self, void* address, void* context) { void HookFree(const AllocatorDispatch* self, void* address, void* context) {
ScopedAllowLogging allow_logging; ScopedAllowFree allow_logging;
const AllocatorDispatch* const next = self->next; const AllocatorDispatch* const next = self->next;
next->free_function(next, address, context); next->free_function(next, address, context);
...@@ -404,7 +473,7 @@ unsigned HookBatchMalloc(const AllocatorDispatch* self, ...@@ -404,7 +473,7 @@ unsigned HookBatchMalloc(const AllocatorDispatch* self,
void** results, void** results,
unsigned num_requested, unsigned num_requested,
void* context) { void* context) {
ScopedAllowLogging allow_logging; ScopedAllowAlloc allow_logging;
const AllocatorDispatch* const next = self->next; const AllocatorDispatch* const next = self->next;
unsigned count = unsigned count =
...@@ -421,7 +490,7 @@ void HookBatchFree(const AllocatorDispatch* self, ...@@ -421,7 +490,7 @@ void HookBatchFree(const AllocatorDispatch* self,
void** to_be_freed, void** to_be_freed,
unsigned num_to_be_freed, unsigned num_to_be_freed,
void* context) { void* context) {
ScopedAllowLogging allow_logging; ScopedAllowFree allow_logging;
const AllocatorDispatch* const next = self->next; const AllocatorDispatch* const next = self->next;
next->batch_free_function(next, to_be_freed, num_to_be_freed, context); next->batch_free_function(next, to_be_freed, num_to_be_freed, context);
...@@ -436,7 +505,7 @@ void HookFreeDefiniteSize(const AllocatorDispatch* self, ...@@ -436,7 +505,7 @@ void HookFreeDefiniteSize(const AllocatorDispatch* self,
void* ptr, void* ptr,
size_t size, size_t size,
void* context) { void* context) {
ScopedAllowLogging allow_logging; ScopedAllowFree allow_logging;
const AllocatorDispatch* const next = self->next; const AllocatorDispatch* const next = self->next;
next->free_definite_size_function(next, ptr, size, context); next->free_definite_size_function(next, ptr, size, context);
...@@ -461,28 +530,28 @@ AllocatorDispatch g_hooks = { ...@@ -461,28 +530,28 @@ AllocatorDispatch g_hooks = {
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM) #endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
void HookPartitionAlloc(void* address, size_t size, const char* type) { void HookPartitionAlloc(void* address, size_t size, const char* type) {
ScopedAllowLogging allow_logging; ScopedAllowAlloc allow_logging;
if (LIKELY(allow_logging)) { if (LIKELY(allow_logging)) {
AllocatorShimLogAlloc(AllocatorType::kPartitionAlloc, address, size, type); AllocatorShimLogAlloc(AllocatorType::kPartitionAlloc, address, size, type);
} }
} }
void HookPartitionFree(void* address) { void HookPartitionFree(void* address) {
ScopedAllowLogging allow_logging; ScopedAllowFree allow_logging;
if (LIKELY(allow_logging)) { if (LIKELY(allow_logging)) {
AllocatorShimLogFree(address); AllocatorShimLogFree(address);
} }
} }
void HookGCAlloc(uint8_t* address, size_t size, const char* type) { void HookGCAlloc(uint8_t* address, size_t size, const char* type) {
ScopedAllowLogging allow_logging; ScopedAllowAlloc allow_logging;
if (LIKELY(allow_logging)) { if (LIKELY(allow_logging)) {
AllocatorShimLogAlloc(AllocatorType::kOilpan, address, size, type); AllocatorShimLogAlloc(AllocatorType::kOilpan, address, size, type);
} }
} }
void HookGCFree(uint8_t* address) { void HookGCFree(uint8_t* address) {
ScopedAllowLogging allow_logging; ScopedAllowFree allow_logging;
if (LIKELY(allow_logging)) { if (LIKELY(allow_logging)) {
AllocatorShimLogFree(address); AllocatorShimLogFree(address);
} }
...@@ -589,7 +658,7 @@ class FrameSerializer { ...@@ -589,7 +658,7 @@ class FrameSerializer {
} // namespace } // namespace
void InitTLSSlot() { void InitTLSSlot() {
{ ScopedAllowLogging allow_logging; } InitializeReentrancyKey();
ignore_result(ShimStateTLS()); ignore_result(ShimStateTLS());
} }
......
...@@ -38,6 +38,7 @@ void AllocatorShimLogAlloc(AllocatorType type, ...@@ -38,6 +38,7 @@ void AllocatorShimLogAlloc(AllocatorType type,
size_t sz, size_t sz,
const char* context); const char* context);
// Logs a free. This must not rely on the base implementation of TLS.
void AllocatorShimLogFree(void* address); void AllocatorShimLogFree(void* address);
// Ensures all send buffers are flushed. The given barrier ID is sent to the // Ensures all send buffers are flushed. The given barrier ID is sent to the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment