Commit 88a96c6e authored by Alexander Khaustov's avatar Alexander Khaustov Committed by Commit Bot

remove ThreadLocalStorage::StaticSlot

in favor of the pattern with
NoDestructor<ThreadLocalStorage::Slot> as discussed on PS1 @
https://chromium-review.googlesource.com/c/chromium/src/+/881017/1

StaticSlot has been subject to a possible initialization race:
CHECK_NE(slot_, kInvalidSlotValue) in StaticSlot::Initialize
could fail if another thread initialized it to kInvalidSlotValue

Change-Id: Iefaa1dde6f85842d20561851a9a26ad5f3a20973
Reviewed-on: https://chromium-review.googlesource.com/881017Reviewed-by: default avatarErik Chen <erikchen@chromium.org>
Reviewed-by: default avatarMiriam Gershenson <mgersh@chromium.org>
Reviewed-by: default avatarRobert Sesek <rsesek@chromium.org>
Reviewed-by: default avatarPeter Kasting <pkasting@chromium.org>
Reviewed-by: default avatarGabriel Charette <gab@chromium.org>
Commit-Queue: Gabriel Charette <gab@chromium.org>
Cr-Commit-Position: refs/heads/master@{#537009}
parent cba5d24a
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include "base/allocator/allocator_shim.h" #include "base/allocator/allocator_shim.h"
#include "base/allocator/features.h" #include "base/allocator/features.h"
#include "base/logging.h" #include "base/logging.h"
#include "base/no_destructor.h"
#include "base/threading/thread_local_storage.h" #include "base/threading/thread_local_storage.h"
#include "build/build_config.h" #include "build/build_config.h"
...@@ -29,14 +30,34 @@ namespace { ...@@ -29,14 +30,34 @@ namespace {
using base::allocator::AllocatorDispatch; using base::allocator::AllocatorDispatch;
ThreadLocalStorage::StaticSlot g_thread_allocator_usage = TLS_INITIALIZER;
const uintptr_t kSentinelMask = std::numeric_limits<uintptr_t>::max() - 1; const uintptr_t kSentinelMask = std::numeric_limits<uintptr_t>::max() - 1;
ThreadHeapUsage* const kInitializationSentinel = ThreadHeapUsage* const kInitializationSentinel =
reinterpret_cast<ThreadHeapUsage*>(kSentinelMask); reinterpret_cast<ThreadHeapUsage*>(kSentinelMask);
ThreadHeapUsage* const kTeardownSentinel = ThreadHeapUsage* const kTeardownSentinel =
reinterpret_cast<ThreadHeapUsage*>(kSentinelMask | 1); reinterpret_cast<ThreadHeapUsage*>(kSentinelMask | 1);
ThreadLocalStorage::Slot& ThreadAllocationUsage() {
static NoDestructor<ThreadLocalStorage::Slot> thread_allocator_usage(
[](void* thread_heap_usage) {
// This destructor will be called twice. Once to destroy the actual
// ThreadHeapUsage instance and a second time, immediately after, for
// the sentinel. Re-setting the TLS slow (below) does re-initialize the
// TLS slot. The ThreadLocalStorage code is designed to deal with this
// use case and will re-call the destructor with the kTeardownSentinel
// as arg.
if (thread_heap_usage == kTeardownSentinel)
return;
DCHECK_NE(thread_heap_usage, kInitializationSentinel);
// Deleting the ThreadHeapUsage TLS object will re-enter the shim and
// hit RecordFree() (see below). The sentinel prevents RecordFree() from
// re-creating another ThreadHeapUsage object.
ThreadAllocationUsage().Set(kTeardownSentinel);
delete static_cast<ThreadHeapUsage*>(thread_heap_usage);
});
return *thread_allocator_usage;
}
bool g_heap_tracking_enabled = false; bool g_heap_tracking_enabled = false;
// Forward declared as it needs to delegate memory allocation to the next // Forward declared as it needs to delegate memory allocation to the next
...@@ -196,20 +217,20 @@ AllocatorDispatch allocator_dispatch = {&AllocFn, ...@@ -196,20 +217,20 @@ AllocatorDispatch allocator_dispatch = {&AllocFn,
nullptr}; nullptr};
ThreadHeapUsage* GetOrCreateThreadUsage() { ThreadHeapUsage* GetOrCreateThreadUsage() {
auto tls_ptr = reinterpret_cast<uintptr_t>(g_thread_allocator_usage.Get()); auto tls_ptr = reinterpret_cast<uintptr_t>(ThreadAllocationUsage().Get());
if ((tls_ptr & kSentinelMask) == kSentinelMask) if ((tls_ptr & kSentinelMask) == kSentinelMask)
return nullptr; // Re-entrancy case. return nullptr; // Re-entrancy case.
auto* allocator_usage = reinterpret_cast<ThreadHeapUsage*>(tls_ptr); auto* allocator_usage = reinterpret_cast<ThreadHeapUsage*>(tls_ptr);
if (allocator_usage == nullptr) { if (allocator_usage == nullptr) {
// Prevent reentrancy due to the allocation below. // Prevent reentrancy due to the allocation below.
g_thread_allocator_usage.Set(kInitializationSentinel); ThreadAllocationUsage().Set(kInitializationSentinel);
allocator_usage = new ThreadHeapUsage(); allocator_usage = new ThreadHeapUsage();
static_assert(std::is_pod<ThreadHeapUsage>::value, static_assert(std::is_pod<ThreadHeapUsage>::value,
"AllocatorDispatch must be POD"); "AllocatorDispatch must be POD");
memset(allocator_usage, 0, sizeof(*allocator_usage)); memset(allocator_usage, 0, sizeof(*allocator_usage));
g_thread_allocator_usage.Set(allocator_usage); ThreadAllocationUsage().Set(allocator_usage);
} }
return allocator_usage; return allocator_usage;
...@@ -233,7 +254,6 @@ ThreadHeapUsageTracker::~ThreadHeapUsageTracker() { ...@@ -233,7 +254,6 @@ ThreadHeapUsageTracker::~ThreadHeapUsageTracker() {
void ThreadHeapUsageTracker::Start() { void ThreadHeapUsageTracker::Start() {
DCHECK(thread_checker_.CalledOnValidThread()); DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(g_thread_allocator_usage.initialized());
thread_usage_ = GetOrCreateThreadUsage(); thread_usage_ = GetOrCreateThreadUsage();
usage_ = *thread_usage_; usage_ = *thread_usage_;
...@@ -276,16 +296,12 @@ void ThreadHeapUsageTracker::Stop(bool usage_is_exclusive) { ...@@ -276,16 +296,12 @@ void ThreadHeapUsageTracker::Stop(bool usage_is_exclusive) {
} }
ThreadHeapUsage ThreadHeapUsageTracker::GetUsageSnapshot() { ThreadHeapUsage ThreadHeapUsageTracker::GetUsageSnapshot() {
DCHECK(g_thread_allocator_usage.initialized());
ThreadHeapUsage* usage = GetOrCreateThreadUsage(); ThreadHeapUsage* usage = GetOrCreateThreadUsage();
DCHECK_NE(nullptr, usage); DCHECK_NE(nullptr, usage);
return *usage; return *usage;
} }
void ThreadHeapUsageTracker::EnableHeapTracking() { void ThreadHeapUsageTracker::EnableHeapTracking() {
EnsureTLSInitialized();
CHECK_EQ(false, g_heap_tracking_enabled) << "No double-enabling."; CHECK_EQ(false, g_heap_tracking_enabled) << "No double-enabling.";
g_heap_tracking_enabled = true; g_heap_tracking_enabled = true;
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if BUILDFLAG(USE_ALLOCATOR_SHIM)
...@@ -314,27 +330,5 @@ ThreadHeapUsageTracker::GetDispatchForTesting() { ...@@ -314,27 +330,5 @@ ThreadHeapUsageTracker::GetDispatchForTesting() {
return &allocator_dispatch; return &allocator_dispatch;
} }
void ThreadHeapUsageTracker::EnsureTLSInitialized() {
if (!g_thread_allocator_usage.initialized()) {
g_thread_allocator_usage.Initialize([](void* thread_heap_usage) {
// This destructor will be called twice. Once to destroy the actual
// ThreadHeapUsage instance and a second time, immediately after, for the
// sentinel. Re-setting the TLS slow (below) does re-initialize the TLS
// slot. The ThreadLocalStorage code is designed to deal with this use
// case (see comments in ThreadHeapUsageTracker::EnsureTLSInitialized) and
// will re-call the destructor with the kTeardownSentinel as arg.
if (thread_heap_usage == kTeardownSentinel)
return;
DCHECK(thread_heap_usage != kInitializationSentinel);
// Deleting the ThreadHeapUsage TLS object will re-enter the shim and hit
// RecordFree() above. The sentinel prevents RecordFree() from re-creating
// another ThreadHeapUsage object.
g_thread_allocator_usage.Set(kTeardownSentinel);
delete static_cast<ThreadHeapUsage*>(thread_heap_usage);
});
}
}
} // namespace debug } // namespace debug
} // namespace base } // namespace base
...@@ -94,9 +94,6 @@ class BASE_EXPORT ThreadHeapUsageTracker { ...@@ -94,9 +94,6 @@ class BASE_EXPORT ThreadHeapUsageTracker {
// after calling this function in tests. // after calling this function in tests.
static void DisableHeapTrackingForTesting(); static void DisableHeapTrackingForTesting();
// Exposed for testing only.
static void EnsureTLSInitialized();
// Exposed to allow testing the shim without inserting it in the allocator // Exposed to allow testing the shim without inserting it in the allocator
// shim chain. // shim chain.
static base::allocator::AllocatorDispatch* GetDispatchForTesting(); static base::allocator::AllocatorDispatch* GetDispatchForTesting();
......
...@@ -22,7 +22,6 @@ namespace { ...@@ -22,7 +22,6 @@ namespace {
class TestingThreadHeapUsageTracker : public ThreadHeapUsageTracker { class TestingThreadHeapUsageTracker : public ThreadHeapUsageTracker {
public: public:
using ThreadHeapUsageTracker::DisableHeapTrackingForTesting; using ThreadHeapUsageTracker::DisableHeapTrackingForTesting;
using ThreadHeapUsageTracker::EnsureTLSInitialized;
using ThreadHeapUsageTracker::GetDispatchForTesting; using ThreadHeapUsageTracker::GetDispatchForTesting;
}; };
...@@ -56,8 +55,6 @@ class ThreadHeapUsageTrackerTest : public testing::Test { ...@@ -56,8 +55,6 @@ class ThreadHeapUsageTrackerTest : public testing::Test {
} }
void SetUp() override { void SetUp() override {
TestingThreadHeapUsageTracker::EnsureTLSInitialized();
dispatch_under_test_ = dispatch_under_test_ =
TestingThreadHeapUsageTracker::GetDispatchForTesting(); TestingThreadHeapUsageTracker::GetDispatchForTesting();
ASSERT_EQ(nullptr, dispatch_under_test_->next); ASSERT_EQ(nullptr, dispatch_under_test_->next);
......
...@@ -35,7 +35,9 @@ namespace base { ...@@ -35,7 +35,9 @@ namespace base {
// } // }
// //
// NoDestructor<T> stores the object inline, so it also avoids a pointer // NoDestructor<T> stores the object inline, so it also avoids a pointer
// indirection and a malloc. Code should prefer to use NoDestructor<T> over: // indirection and a malloc. Also note that since C++11 static local variable
// initialization is thread-safe and so is this pattern. Code should prefer to
// use NoDestructor<T> over:
// - The CR_DEFINE_STATIC_LOCAL() helper macro. // - The CR_DEFINE_STATIC_LOCAL() helper macro.
// - A function scoped static T* or T& that is dynamically initialized. // - A function scoped static T* or T& that is dynamically initialized.
// - A global base::LazyInstance<T>. // - A global base::LazyInstance<T>.
......
...@@ -71,7 +71,6 @@ base::subtle::Atomic32 g_native_tls_key = ...@@ -71,7 +71,6 @@ base::subtle::Atomic32 g_native_tls_key =
// The maximum number of slots in our thread local storage stack. // The maximum number of slots in our thread local storage stack.
constexpr int kThreadLocalStorageSize = 256; constexpr int kThreadLocalStorageSize = 256;
constexpr int kInvalidSlotValue = -1;
enum TlsStatus { enum TlsStatus {
FREE, FREE,
...@@ -251,7 +250,7 @@ void PlatformThreadLocalStorage::OnThreadExit(void* value) { ...@@ -251,7 +250,7 @@ void PlatformThreadLocalStorage::OnThreadExit(void* value) {
} // namespace internal } // namespace internal
void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) { void ThreadLocalStorage::Slot::Initialize(TLSDestructorFunc destructor) {
PlatformThreadLocalStorage::TLSKey key = PlatformThreadLocalStorage::TLSKey key =
base::subtle::NoBarrier_Load(&g_native_tls_key); base::subtle::NoBarrier_Load(&g_native_tls_key);
if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES || if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES ||
...@@ -260,8 +259,6 @@ void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) { ...@@ -260,8 +259,6 @@ void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) {
} }
// Grab a new slot. // Grab a new slot.
slot_ = kInvalidSlotValue;
version_ = 0;
{ {
base::AutoLock auto_lock(*GetTLSMetadataLock()); base::AutoLock auto_lock(*GetTLSMetadataLock());
for (int i = 0; i < kThreadLocalStorageSize; ++i) { for (int i = 0; i < kThreadLocalStorageSize; ++i) {
...@@ -276,6 +273,7 @@ void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) { ...@@ -276,6 +273,7 @@ void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) {
g_tls_metadata[slot_candidate].status = TlsStatus::IN_USE; g_tls_metadata[slot_candidate].status = TlsStatus::IN_USE;
g_tls_metadata[slot_candidate].destructor = destructor; g_tls_metadata[slot_candidate].destructor = destructor;
g_last_assigned_slot = slot_candidate; g_last_assigned_slot = slot_candidate;
DCHECK_EQ(kInvalidSlotValue, slot_);
slot_ = slot_candidate; slot_ = slot_candidate;
version_ = g_tls_metadata[slot_candidate].version; version_ = g_tls_metadata[slot_candidate].version;
break; break;
...@@ -284,12 +282,9 @@ void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) { ...@@ -284,12 +282,9 @@ void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) {
} }
CHECK_NE(slot_, kInvalidSlotValue); CHECK_NE(slot_, kInvalidSlotValue);
CHECK_LT(slot_, kThreadLocalStorageSize); CHECK_LT(slot_, kThreadLocalStorageSize);
// Setup our destructor.
base::subtle::Release_Store(&initialized_, 1);
} }
void ThreadLocalStorage::StaticSlot::Free() { void ThreadLocalStorage::Slot::Free() {
DCHECK_NE(slot_, kInvalidSlotValue); DCHECK_NE(slot_, kInvalidSlotValue);
DCHECK_LT(slot_, kThreadLocalStorageSize); DCHECK_LT(slot_, kThreadLocalStorageSize);
{ {
...@@ -299,10 +294,9 @@ void ThreadLocalStorage::StaticSlot::Free() { ...@@ -299,10 +294,9 @@ void ThreadLocalStorage::StaticSlot::Free() {
++(g_tls_metadata[slot_].version); ++(g_tls_metadata[slot_].version);
} }
slot_ = kInvalidSlotValue; slot_ = kInvalidSlotValue;
base::subtle::Release_Store(&initialized_, 0);
} }
void* ThreadLocalStorage::StaticSlot::Get() const { void* ThreadLocalStorage::Slot::Get() const {
TlsVectorEntry* tls_data = static_cast<TlsVectorEntry*>( TlsVectorEntry* tls_data = static_cast<TlsVectorEntry*>(
PlatformThreadLocalStorage::GetTLSValue( PlatformThreadLocalStorage::GetTLSValue(
base::subtle::NoBarrier_Load(&g_native_tls_key))); base::subtle::NoBarrier_Load(&g_native_tls_key)));
...@@ -316,7 +310,7 @@ void* ThreadLocalStorage::StaticSlot::Get() const { ...@@ -316,7 +310,7 @@ void* ThreadLocalStorage::StaticSlot::Get() const {
return tls_data[slot_].data; return tls_data[slot_].data;
} }
void ThreadLocalStorage::StaticSlot::Set(void* value) { void ThreadLocalStorage::Slot::Set(void* value) {
TlsVectorEntry* tls_data = static_cast<TlsVectorEntry*>( TlsVectorEntry* tls_data = static_cast<TlsVectorEntry*>(
PlatformThreadLocalStorage::GetTLSValue( PlatformThreadLocalStorage::GetTLSValue(
base::subtle::NoBarrier_Load(&g_native_tls_key))); base::subtle::NoBarrier_Load(&g_native_tls_key)));
...@@ -329,19 +323,11 @@ void ThreadLocalStorage::StaticSlot::Set(void* value) { ...@@ -329,19 +323,11 @@ void ThreadLocalStorage::StaticSlot::Set(void* value) {
} }
ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) { ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) {
tls_slot_.Initialize(destructor); Initialize(destructor);
} }
ThreadLocalStorage::Slot::~Slot() { ThreadLocalStorage::Slot::~Slot() {
tls_slot_.Free(); Free();
}
void* ThreadLocalStorage::Slot::Get() const {
return tls_slot_.Get();
}
void ThreadLocalStorage::Slot::Set(void* value) {
tls_slot_.Set(value);
} }
} // namespace base } // namespace base
...@@ -96,26 +96,23 @@ class BASE_EXPORT ThreadLocalStorage { ...@@ -96,26 +96,23 @@ class BASE_EXPORT ThreadLocalStorage {
// stored in thread local storage. // stored in thread local storage.
typedef void (*TLSDestructorFunc)(void* value); typedef void (*TLSDestructorFunc)(void* value);
// StaticSlot uses its own struct initializer-list style static // A key representing one value stored in TLS. Use as a class member or a
// initialization, which does not require a constructor. // local variable. If you need a static storage duration variable, use the
#define TLS_INITIALIZER {0} // following pattern with a NoDestructor<Slot>:
// void MyDestructorFunc(void* value);
// A key representing one value stored in TLS. // ThreadLocalStorage::Slot& ImportantContentTLS() {
// Initialize like // static NoDestructor<ThreadLocalStorage::Slot> important_content_tls(
// ThreadLocalStorage::StaticSlot my_slot = TLS_INITIALIZER; // &MyDestructorFunc);
// If you're not using a static variable, use the convenience class // return *important_content_tls;
// ThreadLocalStorage::Slot (below) instead. // }
struct BASE_EXPORT StaticSlot { class BASE_EXPORT Slot final {
// Set up the TLS slot. Called by the constructor. public:
// 'destructor' is a pointer to a function to perform per-thread cleanup of // |destructor| is a pointer to a function to perform per-thread cleanup of
// this object. If set to NULL, no cleanup is done for this TLS slot. // this object. If set to nullptr, no cleanup is done for this TLS slot.
void Initialize(TLSDestructorFunc destructor); explicit Slot(TLSDestructorFunc destructor = nullptr);
// If a destructor was set for this slot, removes the destructor so that
// Free a previously allocated TLS 'slot'. // remaining threads exiting will not free data.
// If a destructor was set for this slot, removes ~Slot();
// the destructor so that remaining threads exiting
// will not free data.
void Free();
// Get the thread-local value stored in slot 'slot'. // Get the thread-local value stored in slot 'slot'.
// Values are guaranteed to initially be zero. // Values are guaranteed to initially be zero.
...@@ -125,32 +122,13 @@ class BASE_EXPORT ThreadLocalStorage { ...@@ -125,32 +122,13 @@ class BASE_EXPORT ThreadLocalStorage {
// value 'value'. // value 'value'.
void Set(void* value); void Set(void* value);
bool initialized() const {
return base::subtle::Acquire_Load(&initialized_) != 0;
}
// The internals of this struct should be considered private.
base::subtle::Atomic32 initialized_;
int slot_;
uint32_t version_;
};
// A convenience wrapper around StaticSlot with a constructor. Can be used
// as a member variable.
class BASE_EXPORT Slot {
public:
explicit Slot(TLSDestructorFunc destructor = NULL);
~Slot();
// Get the thread-local value stored in this slot.
// Values are guaranteed to initially be zero.
void* Get() const;
// Set the slot's thread-local value to |value|.
void Set(void* value);
private: private:
StaticSlot tls_slot_; void Initialize(TLSDestructorFunc destructor);
void Free();
static constexpr int kInvalidSlotValue = -1;
int slot_ = kInvalidSlotValue;
uint32_t version_ = 0;
DISALLOW_COPY_AND_ASSIGN(Slot); DISALLOW_COPY_AND_ASSIGN(Slot);
}; };
......
...@@ -2,14 +2,16 @@ ...@@ -2,14 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include "base/threading/thread_local_storage.h"
#if defined(OS_WIN) #if defined(OS_WIN)
#include <windows.h> #include <windows.h>
#include <process.h> #include <process.h>
#endif #endif
#include "base/macros.h" #include "base/macros.h"
#include "base/no_destructor.h"
#include "base/threading/simple_thread.h" #include "base/threading/simple_thread.h"
#include "base/threading/thread_local_storage.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
...@@ -28,7 +30,13 @@ const int kFinalTlsValue = 0x7777; ...@@ -28,7 +30,13 @@ const int kFinalTlsValue = 0x7777;
// How many times must a destructor be called before we really are done. // How many times must a destructor be called before we really are done.
const int kNumberDestructorCallRepetitions = 3; const int kNumberDestructorCallRepetitions = 3;
static ThreadLocalStorage::StaticSlot tls_slot = TLS_INITIALIZER; void ThreadLocalStorageCleanup(void* value);
ThreadLocalStorage::Slot& TLSSlot() {
static NoDestructor<ThreadLocalStorage::Slot> slot(
&ThreadLocalStorageCleanup);
return *slot;
}
class ThreadLocalStorageRunner : public DelegateSimpleThread::Delegate { class ThreadLocalStorageRunner : public DelegateSimpleThread::Delegate {
public: public:
...@@ -39,14 +47,14 @@ class ThreadLocalStorageRunner : public DelegateSimpleThread::Delegate { ...@@ -39,14 +47,14 @@ class ThreadLocalStorageRunner : public DelegateSimpleThread::Delegate {
void Run() override { void Run() override {
*tls_value_ptr_ = kInitialTlsValue; *tls_value_ptr_ = kInitialTlsValue;
tls_slot.Set(tls_value_ptr_); TLSSlot().Set(tls_value_ptr_);
int *ptr = static_cast<int*>(tls_slot.Get()); int* ptr = static_cast<int*>(TLSSlot().Get());
EXPECT_EQ(ptr, tls_value_ptr_); EXPECT_EQ(ptr, tls_value_ptr_);
EXPECT_EQ(*ptr, kInitialTlsValue); EXPECT_EQ(*ptr, kInitialTlsValue);
*tls_value_ptr_ = 0; *tls_value_ptr_ = 0;
ptr = static_cast<int*>(tls_slot.Get()); ptr = static_cast<int*>(TLSSlot().Get());
EXPECT_EQ(ptr, tls_value_ptr_); EXPECT_EQ(ptr, tls_value_ptr_);
EXPECT_EQ(*ptr, 0); EXPECT_EQ(*ptr, 0);
...@@ -69,7 +77,7 @@ void ThreadLocalStorageCleanup(void *value) { ...@@ -69,7 +77,7 @@ void ThreadLocalStorageCleanup(void *value) {
ASSERT_GE(kFinalTlsValue + kNumberDestructorCallRepetitions, *ptr); ASSERT_GE(kFinalTlsValue + kNumberDestructorCallRepetitions, *ptr);
--*ptr; // Move closer to our target. --*ptr; // Move closer to our target.
// Tell tls that we're not done with this thread, and still need destruction. // Tell tls that we're not done with this thread, and still need destruction.
tls_slot.Set(value); TLSSlot().Set(value);
} }
} // namespace } // namespace
...@@ -104,8 +112,6 @@ TEST(ThreadLocalStorageTest, MAYBE_TLSDestructors) { ...@@ -104,8 +112,6 @@ TEST(ThreadLocalStorageTest, MAYBE_TLSDestructors) {
ThreadLocalStorageRunner* thread_delegates[kNumThreads]; ThreadLocalStorageRunner* thread_delegates[kNumThreads];
DelegateSimpleThread* threads[kNumThreads]; DelegateSimpleThread* threads[kNumThreads];
tls_slot.Initialize(ThreadLocalStorageCleanup);
// Spawn the threads. // Spawn the threads.
for (int index = 0; index < kNumThreads; index++) { for (int index = 0; index < kNumThreads; index++) {
values[index] = kInitialTlsValue; values[index] = kInitialTlsValue;
...@@ -124,7 +130,6 @@ TEST(ThreadLocalStorageTest, MAYBE_TLSDestructors) { ...@@ -124,7 +130,6 @@ TEST(ThreadLocalStorageTest, MAYBE_TLSDestructors) {
// Verify that the destructor was called and that we reset. // Verify that the destructor was called and that we reset.
EXPECT_EQ(values[index], kFinalTlsValue); EXPECT_EQ(values[index], kFinalTlsValue);
} }
tls_slot.Free(); // Stop doing callbacks to cleanup threads.
} }
TEST(ThreadLocalStorageTest, TLSReclaim) { TEST(ThreadLocalStorageTest, TLSReclaim) {
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "base/debug/debugging_flags.h" #include "base/debug/debugging_flags.h"
#include "base/debug/leak_annotations.h" #include "base/debug/leak_annotations.h"
#include "base/debug/stack_trace.h" #include "base/debug/stack_trace.h"
#include "base/no_destructor.h"
#include "base/threading/platform_thread.h" #include "base/threading/platform_thread.h"
#include "base/threading/thread_local_storage.h" #include "base/threading/thread_local_storage.h"
#include "base/trace_event/heap_profiler_allocation_context.h" #include "base/trace_event/heap_profiler_allocation_context.h"
...@@ -32,14 +33,18 @@ const size_t kMaxTaskDepth = 16u; ...@@ -32,14 +33,18 @@ const size_t kMaxTaskDepth = 16u;
AllocationContextTracker* const kInitializingSentinel = AllocationContextTracker* const kInitializingSentinel =
reinterpret_cast<AllocationContextTracker*>(-1); reinterpret_cast<AllocationContextTracker*>(-1);
ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER;
// This function is added to the TLS slot to clean up the instance when the // This function is added to the TLS slot to clean up the instance when the
// thread exits. // thread exits.
void DestructAllocationContextTracker(void* alloc_ctx_tracker) { void DestructAllocationContextTracker(void* alloc_ctx_tracker) {
delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker); delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker);
} }
ThreadLocalStorage::Slot& AllocationContextTrackerTLS() {
static NoDestructor<ThreadLocalStorage::Slot> tls_alloc_ctx_tracker(
&DestructAllocationContextTracker);
return *tls_alloc_ctx_tracker;
}
// Cannot call ThreadIdNameManager::GetName because it holds a lock and causes // Cannot call ThreadIdNameManager::GetName because it holds a lock and causes
// deadlock when lock is already held by ThreadIdNameManager before the current // deadlock when lock is already held by ThreadIdNameManager before the current
// allocation. Gets the thread name from kernel if available or returns a string // allocation. Gets the thread name from kernel if available or returns a string
...@@ -68,15 +73,15 @@ const char* GetAndLeakThreadName() { ...@@ -68,15 +73,15 @@ const char* GetAndLeakThreadName() {
// static // static
AllocationContextTracker* AllocationContextTracker*
AllocationContextTracker::GetInstanceForCurrentThread() { AllocationContextTracker::GetInstanceForCurrentThread() {
AllocationContextTracker* tracker = AllocationContextTracker* tracker = static_cast<AllocationContextTracker*>(
static_cast<AllocationContextTracker*>(g_tls_alloc_ctx_tracker.Get()); AllocationContextTrackerTLS().Get());
if (tracker == kInitializingSentinel) if (tracker == kInitializingSentinel)
return nullptr; // Re-entrancy case. return nullptr; // Re-entrancy case.
if (!tracker) { if (!tracker) {
g_tls_alloc_ctx_tracker.Set(kInitializingSentinel); AllocationContextTrackerTLS().Set(kInitializingSentinel);
tracker = new AllocationContextTracker(); tracker = new AllocationContextTracker();
g_tls_alloc_ctx_tracker.Set(tracker); AllocationContextTrackerTLS().Set(tracker);
} }
return tracker; return tracker;
...@@ -98,11 +103,6 @@ void AllocationContextTracker::SetCurrentThreadName(const char* name) { ...@@ -98,11 +103,6 @@ void AllocationContextTracker::SetCurrentThreadName(const char* name) {
// static // static
void AllocationContextTracker::SetCaptureMode(CaptureMode mode) { void AllocationContextTracker::SetCaptureMode(CaptureMode mode) {
// When enabling capturing, also initialize the TLS slot. This does not create
// a TLS instance yet.
if (mode != CaptureMode::DISABLED && !g_tls_alloc_ctx_tracker.initialized())
g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker);
// Release ordering ensures that when a thread observes |capture_mode_| to // Release ordering ensures that when a thread observes |capture_mode_| to
// be true through an acquire load, the TLS slot has been initialized. // be true through an acquire load, the TLS slot has been initialized.
subtle::Release_Store(&capture_mode_, static_cast<int32_t>(mode)); subtle::Release_Store(&capture_mode_, static_cast<int32_t>(mode));
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "base/debug/debugging_flags.h" #include "base/debug/debugging_flags.h"
#include "base/debug/stack_trace.h" #include "base/debug/stack_trace.h"
#include "base/lazy_instance.h" #include "base/lazy_instance.h"
#include "base/no_destructor.h"
#include "base/numerics/safe_conversions.h" #include "base/numerics/safe_conversions.h"
#include "base/rand_util.h" #include "base/rand_util.h"
#include "base/synchronization/lock.h" #include "base/synchronization/lock.h"
...@@ -123,16 +124,20 @@ void DestructShimState(void* shim_state) { ...@@ -123,16 +124,20 @@ void DestructShimState(void* shim_state) {
delete static_cast<ShimState*>(shim_state); delete static_cast<ShimState*>(shim_state);
} }
base::ThreadLocalStorage::StaticSlot g_tls_shim_state = TLS_INITIALIZER; base::ThreadLocalStorage::Slot& ShimStateTLS() {
static base::NoDestructor<base::ThreadLocalStorage::Slot> shim_state_tls(
&DestructShimState);
return *shim_state_tls;
}
// We don't need to worry about re-entrancy because g_prevent_reentrancy // We don't need to worry about re-entrancy because g_prevent_reentrancy
// already guards against that. // already guards against that.
ShimState* GetShimState() { ShimState* GetShimState() {
ShimState* state = static_cast<ShimState*>(g_tls_shim_state.Get()); ShimState* state = static_cast<ShimState*>(ShimStateTLS().Get());
if (!state) { if (!state) {
state = new ShimState(); state = new ShimState();
g_tls_shim_state.Set(state); ShimStateTLS().Set(state);
} }
return state; return state;
...@@ -549,8 +554,7 @@ class FrameSerializer { ...@@ -549,8 +554,7 @@ class FrameSerializer {
void InitTLSSlot() { void InitTLSSlot() {
ignore_result(g_prevent_reentrancy.Pointer()->Get()); ignore_result(g_prevent_reentrancy.Pointer()->Get());
if (!g_tls_shim_state.initialized()) ignore_result(ShimStateTLS());
g_tls_shim_state.Initialize(DestructShimState);
} }
// In order for pseudo stacks to work, trace event filtering must be enabled. // In order for pseudo stacks to work, trace event filtering must be enabled.
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include "components/url_formatter/idn_spoof_checker.h" #include "components/url_formatter/idn_spoof_checker.h"
#include "base/no_destructor.h"
#include "base/numerics/safe_conversions.h" #include "base/numerics/safe_conversions.h"
#include "base/strings/string_piece.h" #include "base/strings/string_piece.h"
#include "base/strings/string_split.h" #include "base/strings/string_split.h"
...@@ -20,12 +21,17 @@ ...@@ -20,12 +21,17 @@
namespace url_formatter { namespace url_formatter {
namespace { namespace {
base::ThreadLocalStorage::StaticSlot tls_index = TLS_INITIALIZER;
void OnThreadTermination(void* regex_matcher) { void OnThreadTermination(void* regex_matcher) {
delete reinterpret_cast<icu::RegexMatcher*>(regex_matcher); delete reinterpret_cast<icu::RegexMatcher*>(regex_matcher);
} }
base::ThreadLocalStorage::Slot& DangerousPatternTLS() {
static base::NoDestructor<base::ThreadLocalStorage::Slot>
dangerous_pattern_tls(&OnThreadTermination);
return *dangerous_pattern_tls;
}
#include "components/url_formatter/top_domains/alexa_skeletons-inc.cc" #include "components/url_formatter/top_domains/alexa_skeletons-inc.cc"
// All the domains in the above file have 3 or fewer labels. // All the domains in the above file have 3 or fewer labels.
const size_t kNumberOfLabelsToCheck = 3; const size_t kNumberOfLabelsToCheck = 3;
...@@ -249,10 +255,8 @@ bool IDNSpoofChecker::SafeToDisplayAsUnicode(base::StringPiece16 label, ...@@ -249,10 +255,8 @@ bool IDNSpoofChecker::SafeToDisplayAsUnicode(base::StringPiece16 label,
!lgc_letters_n_ascii_.containsAll(label_string)) !lgc_letters_n_ascii_.containsAll(label_string))
return false; return false;
if (!tls_index.initialized())
tls_index.Initialize(&OnThreadTermination);
icu::RegexMatcher* dangerous_pattern = icu::RegexMatcher* dangerous_pattern =
reinterpret_cast<icu::RegexMatcher*>(tls_index.Get()); reinterpret_cast<icu::RegexMatcher*>(DangerousPatternTLS().Get());
if (!dangerous_pattern) { if (!dangerous_pattern) {
// Disallow the katakana no, so, zo, or n, as they may be mistaken for // Disallow the katakana no, so, zo, or n, as they may be mistaken for
// slashes when they're surrounded by non-Japanese scripts (i.e. scripts // slashes when they're surrounded by non-Japanese scripts (i.e. scripts
...@@ -295,7 +299,7 @@ bool IDNSpoofChecker::SafeToDisplayAsUnicode(base::StringPiece16 label, ...@@ -295,7 +299,7 @@ bool IDNSpoofChecker::SafeToDisplayAsUnicode(base::StringPiece16 label,
R"([ijl]\u0307)", R"([ijl]\u0307)",
-1, US_INV), -1, US_INV),
0, status); 0, status);
tls_index.Set(dangerous_pattern); DangerousPatternTLS().Set(dangerous_pattern);
} }
dangerous_pattern->reset(label_string); dangerous_pattern->reset(label_string);
return !dangerous_pattern->find(); return !dangerous_pattern->find();
......
...@@ -81,29 +81,22 @@ class DnsReloader : public NetworkChangeNotifier::DNSObserver { ...@@ -81,29 +81,22 @@ class DnsReloader : public NetworkChangeNotifier::DNSObserver {
} }
private: private:
DnsReloader() : resolver_generation_(0) { DnsReloader() { NetworkChangeNotifier::AddDNSObserver(this); }
tls_index_.Initialize(SlotReturnFunction);
NetworkChangeNotifier::AddDNSObserver(this);
}
~DnsReloader() override { ~DnsReloader() override {
NOTREACHED(); // LeakyLazyInstance is not destructed. NOTREACHED(); // LeakyLazyInstance is not destructed.
} }
base::Lock lock_; // Protects resolver_generation_. base::Lock lock_; // Protects resolver_generation_.
int resolver_generation_; int resolver_generation_ = 0;
friend struct base::LazyInstanceTraitsBase<DnsReloader>; friend struct base::LazyInstanceTraitsBase<DnsReloader>;
// We use thread local storage to identify which ReloadState to interact with. // We use thread local storage to identify which ReloadState to interact with.
static base::ThreadLocalStorage::StaticSlot tls_index_; base::ThreadLocalStorage::Slot tls_index_{&SlotReturnFunction};
DISALLOW_COPY_AND_ASSIGN(DnsReloader); DISALLOW_COPY_AND_ASSIGN(DnsReloader);
}; };
// A TLS slot to the ReloadState for the current thread.
// static
base::ThreadLocalStorage::StaticSlot DnsReloader::tls_index_ = TLS_INITIALIZER;
base::LazyInstance<DnsReloader>::Leaky base::LazyInstance<DnsReloader>::Leaky
g_dns_reloader = LAZY_INSTANCE_INITIALIZER; g_dns_reloader = LAZY_INSTANCE_INITIALIZER;
......
...@@ -36,3 +36,4 @@ $ git am --3way --message-id -p4 /tmp/patchdir ...@@ -36,3 +36,4 @@ $ git am --3way --message-id -p4 /tmp/patchdir
Local Modifications: Local Modifications:
- codereview.settings has been excluded. - codereview.settings has been excluded.
- thread_log_messages.cc (using ThreadLocalStorage::Slot instead of StaticSlot)
...@@ -46,10 +46,6 @@ class ThreadLogMessagesMaster { ...@@ -46,10 +46,6 @@ class ThreadLogMessagesMaster {
private: private:
ThreadLogMessagesMaster() { ThreadLogMessagesMaster() {
DCHECK(!tls_.initialized());
tls_.Initialize(nullptr);
DCHECK(tls_.initialized());
DCHECK(!logging::GetLogMessageHandler()); DCHECK(!logging::GetLogMessageHandler());
logging::SetLogMessageHandler(LogMessageHandler); logging::SetLogMessageHandler(LogMessageHandler);
} }
...@@ -62,7 +58,7 @@ class ThreadLogMessagesMaster { ...@@ -62,7 +58,7 @@ class ThreadLogMessagesMaster {
size_t message_start, size_t message_start,
const std::string& string) { const std::string& string) {
std::vector<std::string>* log_messages = std::vector<std::string>* log_messages =
reinterpret_cast<std::vector<std::string>*>(tls_.Get()); reinterpret_cast<std::vector<std::string>*>(GetInstance()->tls_.Get());
if (log_messages) { if (log_messages) {
log_messages->push_back(string); log_messages->push_back(string);
} }
...@@ -72,15 +68,11 @@ class ThreadLogMessagesMaster { ...@@ -72,15 +68,11 @@ class ThreadLogMessagesMaster {
return false; return false;
} }
static base::ThreadLocalStorage::StaticSlot tls_; base::ThreadLocalStorage::Slot tls_;
DISALLOW_COPY_AND_ASSIGN(ThreadLogMessagesMaster); DISALLOW_COPY_AND_ASSIGN(ThreadLogMessagesMaster);
}; };
// static
base::ThreadLocalStorage::StaticSlot ThreadLogMessagesMaster::tls_
= TLS_INITIALIZER;
} // namespace } // namespace
ThreadLogMessages::ThreadLogMessages() : log_messages_() { ThreadLogMessages::ThreadLogMessages() : log_messages_() {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment