Commit 77169a69 authored by jar@chromium.org's avatar jar@chromium.org

Use leaky lazy instance for master lock in profiler

The master lock is very rarely used, so we can afford
to use a lazy instance of this lock.

r=rtenneti
BUG=104167
Review URL: http://codereview.chromium.org/8567007

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@109947 0039d316-1c4b-4281-b951-d872f2087c98
parent 0d289c0c
...@@ -165,7 +165,9 @@ ThreadData* ThreadData::all_thread_data_list_head_ = NULL; ...@@ -165,7 +165,9 @@ ThreadData* ThreadData::all_thread_data_list_head_ = NULL;
ThreadData::ThreadDataPool* ThreadData::unregistered_thread_data_pool_ = NULL; ThreadData::ThreadDataPool* ThreadData::unregistered_thread_data_pool_ = NULL;
// static // static
base::Lock* ThreadData::list_lock_; base::LazyInstance<base::Lock,
base::LeakyLazyInstanceTraits<base::Lock> >
ThreadData::list_lock_(base::LINKER_INITIALIZED);
// static // static
ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED; ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED;
...@@ -185,7 +187,7 @@ ThreadData::ThreadData() ...@@ -185,7 +187,7 @@ ThreadData::ThreadData()
is_a_worker_thread_(true) { is_a_worker_thread_(true) {
int thread_number; int thread_number;
{ {
base::AutoLock lock(*list_lock_); base::AutoLock lock(*list_lock_.Pointer());
thread_number = ++thread_number_counter_; thread_number = ++thread_number_counter_;
} }
base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number); base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number);
...@@ -196,7 +198,7 @@ ThreadData::~ThreadData() {} ...@@ -196,7 +198,7 @@ ThreadData::~ThreadData() {}
void ThreadData::PushToHeadOfList() { void ThreadData::PushToHeadOfList() {
DCHECK(!next_); DCHECK(!next_);
base::AutoLock lock(*list_lock_); base::AutoLock lock(*list_lock_.Pointer());
incarnation_count_for_pool_ = incarnation_counter_; incarnation_count_for_pool_ = incarnation_counter_;
next_ = all_thread_data_list_head_; next_ = all_thread_data_list_head_;
all_thread_data_list_head_ = this; all_thread_data_list_head_ = this;
...@@ -225,8 +227,9 @@ ThreadData* ThreadData::Get() { ...@@ -225,8 +227,9 @@ ThreadData* ThreadData::Get() {
// We must be a worker thread, since we didn't pre-register. // We must be a worker thread, since we didn't pre-register.
ThreadData* worker_thread_data = NULL; ThreadData* worker_thread_data = NULL;
{ {
base::AutoLock lock(*list_lock_); base::AutoLock lock(*list_lock_.Pointer());
if (!unregistered_thread_data_pool_->empty()) { if (unregistered_thread_data_pool_ &&
!unregistered_thread_data_pool_->empty()) {
worker_thread_data = worker_thread_data =
const_cast<ThreadData*>(unregistered_thread_data_pool_->top()); const_cast<ThreadData*>(unregistered_thread_data_pool_->top());
unregistered_thread_data_pool_->pop(); unregistered_thread_data_pool_->pop();
...@@ -253,7 +256,7 @@ void ThreadData::OnThreadTermination(void* thread_data) { ...@@ -253,7 +256,7 @@ void ThreadData::OnThreadTermination(void* thread_data) {
void ThreadData::OnThreadTerminationCleanup() const { void ThreadData::OnThreadTerminationCleanup() const {
if (!is_a_worker_thread_) if (!is_a_worker_thread_)
return; return;
base::AutoLock lock(*list_lock_); base::AutoLock lock(*list_lock_.Pointer());
if (incarnation_counter_ != incarnation_count_for_pool_) if (incarnation_counter_ != incarnation_count_for_pool_)
return; // ThreadData was constructed in an earlier unit test. return; // ThreadData was constructed in an earlier unit test.
...@@ -261,7 +264,8 @@ void ThreadData::OnThreadTerminationCleanup() const { ...@@ -261,7 +264,8 @@ void ThreadData::OnThreadTerminationCleanup() const {
// In that case, the pool might be NULL. We really should detect this via the // In that case, the pool might be NULL. We really should detect this via the
// incarnation_counter_, but this call is rarely made, so we can afford to // incarnation_counter_, but this call is rarely made, so we can afford to
// code defensively. // code defensively.
if (unregistered_thread_data_pool_) if (!unregistered_thread_data_pool_)
unregistered_thread_data_pool_ = new ThreadDataPool;
unregistered_thread_data_pool_->push(this); unregistered_thread_data_pool_->push(this);
} }
...@@ -535,7 +539,7 @@ void ThreadData::TallyRunInAScopedRegionIfTracking( ...@@ -535,7 +539,7 @@ void ThreadData::TallyRunInAScopedRegionIfTracking(
// static // static
ThreadData* ThreadData::first() { ThreadData* ThreadData::first() {
base::AutoLock lock(*list_lock_); base::AutoLock lock(*list_lock_.Pointer());
return all_thread_data_list_head_; return all_thread_data_list_head_;
} }
...@@ -588,17 +592,9 @@ bool ThreadData::Initialize() { ...@@ -588,17 +592,9 @@ bool ThreadData::Initialize() {
if (!tls_index_.initialized()) // Testing may have initialized this. if (!tls_index_.initialized()) // Testing may have initialized this.
tls_index_.Initialize(&ThreadData::OnThreadTermination); tls_index_.Initialize(&ThreadData::OnThreadTermination);
DCHECK(tls_index_.initialized()); DCHECK(tls_index_.initialized());
ThreadDataPool* pool = new ThreadDataPool;
// TODO(jar): A linker initialized spin lock would be much safer than this
// allocation, which relies on being called while single threaded.
if (!list_lock_) // In case testing deleted this.
list_lock_ = new base::Lock;
status_ = kInitialStartupState; status_ = kInitialStartupState;
base::AutoLock lock(*list_lock_); base::AutoLock lock(*list_lock_.Pointer());
DCHECK_EQ(unregistered_thread_data_pool_,
reinterpret_cast<ThreadDataPool*>(NULL));
unregistered_thread_data_pool_ = pool;
++incarnation_counter_; ++incarnation_counter_;
return true; return true;
} }
...@@ -644,7 +640,7 @@ void ThreadData::ShutdownSingleThreadedCleanup(bool leak) { ...@@ -644,7 +640,7 @@ void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
ThreadData* thread_data_list; ThreadData* thread_data_list;
ThreadDataPool* final_pool; ThreadDataPool* final_pool;
{ {
base::AutoLock lock(*list_lock_); base::AutoLock lock(*list_lock_.Pointer());
thread_data_list = all_thread_data_list_head_; thread_data_list = all_thread_data_list_head_;
all_thread_data_list_head_ = NULL; all_thread_data_list_head_ = NULL;
final_pool = unregistered_thread_data_pool_; final_pool = unregistered_thread_data_pool_;
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <vector> #include <vector>
#include "base/base_export.h" #include "base/base_export.h"
#include "base/lazy_instance.h"
#include "base/location.h" #include "base/location.h"
#include "base/profiler/tracked_time.h" #include "base/profiler/tracked_time.h"
#include "base/time.h" #include "base/time.h"
...@@ -432,7 +433,7 @@ class BASE_EXPORT Aggregation: public DeathData { ...@@ -432,7 +433,7 @@ class BASE_EXPORT Aggregation: public DeathData {
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Comparator is a class that supports the comparison of Snapshot instances. // Comparator is a class that supports the comparison of Snapshot instances.
// An instance is actually a list of chained Comparitors, that can provide for // An instance is actually a list of chained Comparitors, that can provide for
// arbitrary ordering. The path portion of an about:tracking URL is translated // arbitrary ordering. The path portion of an about:profiler URL is translated
// into such a chain, which is then used to order Snapshot instances in a // into such a chain, which is then used to order Snapshot instances in a
// vector. It orders them into groups (for aggregation), and can also order // vector. It orders them into groups (for aggregation), and can also order
// instances within the groups (for detailed rendering of the instances in an // instances within the groups (for detailed rendering of the instances in an
...@@ -575,7 +576,7 @@ class BASE_EXPORT ThreadData { ...@@ -575,7 +576,7 @@ class BASE_EXPORT ThreadData {
// This may return NULL if the system is disabled for any reason. // This may return NULL if the system is disabled for any reason.
static ThreadData* Get(); static ThreadData* Get();
// For a given (unescaped) about:tracking query, develop resulting HTML, and // For a given (unescaped) about:profiler query, develop resulting HTML, and
// append to output. // append to output.
static void WriteHTML(const std::string& query, std::string* output); static void WriteHTML(const std::string& query, std::string* output);
...@@ -642,7 +643,7 @@ class BASE_EXPORT ThreadData { ...@@ -642,7 +643,7 @@ class BASE_EXPORT ThreadData {
ThreadData* next() const { return next_; } ThreadData* next() const { return next_; }
// Using our lock, make a copy of the specified maps. These calls may arrive // Using our lock, make a copy of the specified maps. These calls may arrive
// from non-local threads, and are used to quickly scan data from all threads // from non-local threads, and are used to quickly scan data from all threads
// in order to build an HTML page for about:tracking. // in order to build an HTML page for about:profiler.
void SnapshotBirthMap(BirthMap *output) const; void SnapshotBirthMap(BirthMap *output) const;
void SnapshotDeathMap(DeathMap *output) const; void SnapshotDeathMap(DeathMap *output) const;
// -------- end of should be private methods. // -------- end of should be private methods.
...@@ -747,7 +748,10 @@ class BASE_EXPORT ThreadData { ...@@ -747,7 +748,10 @@ class BASE_EXPORT ThreadData {
static int incarnation_counter_; static int incarnation_counter_;
// Protection for access to all_thread_data_list_head_, and to // Protection for access to all_thread_data_list_head_, and to
// unregistered_thread_data_pool_. This lock is leaked at shutdown. // unregistered_thread_data_pool_. This lock is leaked at shutdown.
static base::Lock* list_lock_; // The lock is very infrequently used, so we can afford to just make a lazy
// instance and be safe.
static base::LazyInstance<base::Lock,
base::LeakyLazyInstanceTraits<base::Lock> > list_lock_;
// Record of what the incarnation_counter_ was when this instance was created. // Record of what the incarnation_counter_ was when this instance was created.
// If the incarnation_counter_ has changed, then we avoid pushing into the // If the incarnation_counter_ has changed, then we avoid pushing into the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment