Commit 0ec2bec1 authored by Jordan Werthman's avatar Jordan Werthman Committed by Commit Bot

Prevent conflict in base/thread_annotations.h

Much like dynamic_annotations, Chromium's //base contains a copy of
base/thread_annotations.h which causes macro classes and ODR
violations when compiling a target with both //base and Abseil.

In order to prevent these issues, the ./rename_dynamic_annotations.sh
script has been generalized to also rename base/thread_annotations.h
macros and symbols.

Verified that WebRTC uses its own custom thread_annotations.h with
"namespaced" macros and GoogleTest does not include
thread_annotations.h so this change should be a no-op for all
targets currently depending on Abseil.

Bug: None
Change-Id: I1b0115128aa22f4a3982082ede437ae8f0324bc1
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1592684Reviewed-by: default avatarMirko Bonadei <mbonadei@chromium.org>
Commit-Queue: Jordan Werthman <jordanjtw@google.com>
Cr-Commit-Position: refs/heads/master@{#656439}
parent b1c560a4
......@@ -18,9 +18,10 @@ How to update Abseil:
2. Copy the content of the Abseil git repo to //third_party/abseil-cpp.
3. From //third_party/abseil-cpp/ launch ./rename_dynamic_annotations.sh.
This script will rewrite dynamic_annotations macros and function inside
Abseil in order to avoid ODR violations and macro clashing with Chromium
3. From //third_party/abseil-cpp/ launch ./rename_annotations.sh.
This script will rewrite dynamic_annotations and thread_annotations
macros and function inside Abseil in order to avoid ODR violations
and macro clashing with Chromium
(see: https://github.com/abseil/abseil-cpp/issues/122).
Local Modifications:
......@@ -30,5 +31,6 @@ Local Modifications:
* All the BUILD.bazel files has been translated to BUILD.gn files.
* Functions and macros in absl/base/dynamic_annotations.{h,cc} have been renamed
to avoid ODR violations (see step 3).
* Functions and macros in absl/base/dynamic_annotations.{h,cc} and
absl/base/thread_annotations.h have been renamed to avoid ODR
violations and macro clashes with Chromium (see step 3).
......@@ -30,11 +30,11 @@ absl::once_flag once;
ABSL_CONST_INIT Mutex counters_mu(absl::kConstInit);
int running_thread_count GUARDED_BY(counters_mu) = 0;
int call_once_invoke_count GUARDED_BY(counters_mu) = 0;
int call_once_finished_count GUARDED_BY(counters_mu) = 0;
int call_once_return_count GUARDED_BY(counters_mu) = 0;
bool done_blocking GUARDED_BY(counters_mu) = false;
int running_thread_count ABSL_GUARDED_BY(counters_mu) = 0;
int call_once_invoke_count ABSL_GUARDED_BY(counters_mu) = 0;
int call_once_finished_count ABSL_GUARDED_BY(counters_mu) = 0;
int call_once_return_count ABSL_GUARDED_BY(counters_mu) = 0;
bool done_blocking ABSL_GUARDED_BY(counters_mu) = false;
// Function to be called from absl::call_once. Waits for a notification.
void WaitAndIncrement() {
......@@ -60,7 +60,7 @@ void ThreadBody() {
}
// Returns true if all threads are set up for the test.
bool ThreadsAreSetup(void*) EXCLUSIVE_LOCKS_REQUIRED(counters_mu) {
bool ThreadsAreSetup(void*) ABSL_EXCLUSIVE_LOCKS_REQUIRED(counters_mu) {
// All ten threads must be running, and WaitAndIncrement should be blocked.
return running_thread_count == 10 && call_once_invoke_count == 1;
}
......
......@@ -203,9 +203,9 @@ struct LowLevelAlloc::Arena {
base_internal::SpinLock mu;
// Head of free list, sorted by address
AllocList freelist GUARDED_BY(mu);
AllocList freelist ABSL_GUARDED_BY(mu);
// Count of allocated blocks
int32_t allocation_count GUARDED_BY(mu);
int32_t allocation_count ABSL_GUARDED_BY(mu);
// flags passed to NewArena
const uint32_t flags;
// Result of sysconf(_SC_PAGESIZE)
......@@ -215,7 +215,7 @@ struct LowLevelAlloc::Arena {
// Smallest allocation block size
const size_t min_size;
// PRNG state
uint32_t random GUARDED_BY(mu);
uint32_t random ABSL_GUARDED_BY(mu);
};
namespace {
......@@ -275,10 +275,10 @@ static const uintptr_t kMagicAllocated = 0x4c833e95U;
static const uintptr_t kMagicUnallocated = ~kMagicAllocated;
namespace {
class SCOPED_LOCKABLE ArenaLock {
class ABSL_SCOPED_LOCKABLE ArenaLock {
public:
explicit ArenaLock(LowLevelAlloc::Arena *arena)
EXCLUSIVE_LOCK_FUNCTION(arena->mu)
ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu)
: arena_(arena) {
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
......@@ -290,7 +290,7 @@ class SCOPED_LOCKABLE ArenaLock {
arena_->mu.Lock();
}
~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
void Leave() UNLOCK_FUNCTION() {
void Leave() ABSL_UNLOCK_FUNCTION() {
arena_->mu.Unlock();
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
if (mask_valid_) {
......
......@@ -47,7 +47,7 @@
namespace absl {
namespace base_internal {
class LOCKABLE SpinLock {
class ABSL_LOCKABLE SpinLock {
public:
SpinLock() : lockword_(kSpinLockCooperative) {
ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
......@@ -76,7 +76,7 @@ class LOCKABLE SpinLock {
~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
// Acquire this SpinLock.
inline void Lock() EXCLUSIVE_LOCK_FUNCTION() {
inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
if (!TryLockImpl()) {
SlowLock();
......@@ -88,7 +88,7 @@ class LOCKABLE SpinLock {
// acquisition was successful. If the lock was not acquired, false is
// returned. If this SpinLock is free at the time of the call, TryLock
// will return true with high probability.
inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
bool res = TryLockImpl();
ABSL_TSAN_MUTEX_POST_LOCK(
......@@ -98,7 +98,7 @@ class LOCKABLE SpinLock {
}
// Release this SpinLock, which must be held by the calling thread.
inline void Unlock() UNLOCK_FUNCTION() {
inline void Unlock() ABSL_UNLOCK_FUNCTION() {
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
......@@ -176,13 +176,13 @@ class LOCKABLE SpinLock {
// Corresponding locker object that arranges to acquire a spinlock for
// the duration of a C++ scope.
class SCOPED_LOCKABLE SpinLockHolder {
class ABSL_SCOPED_LOCKABLE SpinLockHolder {
public:
inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l)
inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
: lock_(l) {
l->Lock();
}
inline ~SpinLockHolder() UNLOCK_FUNCTION() { lock_->Unlock(); }
inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); }
SpinLockHolder(const SpinLockHolder&) = delete;
SpinLockHolder& operator=(const SpinLockHolder&) = delete;
......
......@@ -333,7 +333,7 @@ static absl::base_internal::SpinLock tid_lock(
// We set a bit per thread in this array to indicate that an ID is in
// use. ID 0 is unused because it is the default value returned by
// pthread_getspecific().
static std::vector<uint32_t>* tid_array GUARDED_BY(tid_lock) = nullptr;
static std::vector<uint32_t>* tid_array ABSL_GUARDED_BY(tid_lock) = nullptr;
static constexpr int kBitsPerWord = 32; // tid_array is uint32_t.
// Returns the TID to tid_array.
......
......@@ -65,7 +65,7 @@ struct HashtablezInfo {
// Puts the object into a clean state, fills in the logically `const` members,
// blocking for any readers that are currently sampling the object.
void PrepareForSampling() EXCLUSIVE_LOCKS_REQUIRED(init_mu);
void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
// These fields are mutated by the various Record* APIs and need to be
// thread-safe.
......@@ -83,7 +83,7 @@ struct HashtablezInfo {
// prevents races with sampling and resurrecting an object.
absl::Mutex init_mu;
HashtablezInfo* next;
HashtablezInfo* dead GUARDED_BY(init_mu);
HashtablezInfo* dead ABSL_GUARDED_BY(init_mu);
// All of the fields below are set by `PrepareForSampling`, they must not be
// mutated in `Record*` functions. They are logically `const` in that sense.
......
......@@ -69,8 +69,8 @@ class Barrier {
private:
Mutex lock_;
int num_to_block_ GUARDED_BY(lock_);
int num_to_exit_ GUARDED_BY(lock_);
int num_to_block_ ABSL_GUARDED_BY(lock_);
int num_to_exit_ ABSL_GUARDED_BY(lock_);
};
} // namespace absl
......
......@@ -88,8 +88,8 @@ class BlockingCounter {
private:
Mutex lock_;
int count_ GUARDED_BY(lock_);
int num_waiting_ GUARDED_BY(lock_);
int count_ ABSL_GUARDED_BY(lock_);
int num_waiting_ ABSL_GUARDED_BY(lock_);
};
} // namespace absl
......
......@@ -60,7 +60,7 @@ class ThreadPool {
}
private:
bool WorkAvailable() const EXCLUSIVE_LOCKS_REQUIRED(mu_) {
bool WorkAvailable() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return !queue_.empty();
}
......@@ -81,7 +81,7 @@ class ThreadPool {
}
absl::Mutex mu_;
std::queue<std::function<void()>> queue_ GUARDED_BY(mu_);
std::queue<std::function<void()>> queue_ ABSL_GUARDED_BY(mu_);
std::vector<std::thread> threads_;
};
......
......@@ -143,11 +143,11 @@ ABSL_CONST_INIT absl::Mutex early_const_init_mutex(absl::kConstInit);
// constructors of globals "happen at link time"; memory is pre-initialized,
// before the constructors of either grab_lock or check_still_locked are run.)
extern absl::Mutex const_init_sanity_mutex;
OnConstruction grab_lock([]() NO_THREAD_SAFETY_ANALYSIS {
OnConstruction grab_lock([]() ABSL_NO_THREAD_SAFETY_ANALYSIS {
const_init_sanity_mutex.Lock();
});
ABSL_CONST_INIT absl::Mutex const_init_sanity_mutex(absl::kConstInit);
OnConstruction check_still_locked([]() NO_THREAD_SAFETY_ANALYSIS {
OnConstruction check_still_locked([]() ABSL_NO_THREAD_SAFETY_ANALYSIS {
const_init_sanity_mutex.AssertHeld();
const_init_sanity_mutex.Unlock();
});
......
......@@ -207,8 +207,8 @@ static absl::base_internal::SpinLock deadlock_graph_mu(
absl::base_internal::kLinkerInitialized);
// graph used to detect deadlocks.
static GraphCycles *deadlock_graph GUARDED_BY(deadlock_graph_mu)
PT_GUARDED_BY(deadlock_graph_mu);
static GraphCycles *deadlock_graph ABSL_GUARDED_BY(deadlock_graph_mu)
ABSL_PT_GUARDED_BY(deadlock_graph_mu);
//------------------------------------------------------------------
// An event mechanism for debugging mutex use.
......@@ -279,10 +279,10 @@ static const uint32_t kNSynchEvent = 1031;
static struct SynchEvent { // this is a trivial hash table for the events
// struct is freed when refcount reaches 0
int refcount GUARDED_BY(synch_event_mu);
int refcount ABSL_GUARDED_BY(synch_event_mu);
// buckets have linear, 0-terminated chains
SynchEvent *next GUARDED_BY(synch_event_mu);
SynchEvent *next ABSL_GUARDED_BY(synch_event_mu);
// Constant after initialization
uintptr_t masked_addr; // object at this address is called "name"
......@@ -296,7 +296,7 @@ static struct SynchEvent { // this is a trivial hash table for the events
// Constant after initialization
char name[1]; // actually longer---null-terminated std::string
} *synch_event[kNSynchEvent] GUARDED_BY(synch_event_mu);
} *synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu);
// Ensure that the object at "addr" has a SynchEvent struct associated with it,
// set "bits" in the word there (waiting until lockbit is clear before doing
......@@ -1139,7 +1139,7 @@ PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) {
}
static GraphId GetGraphIdLocked(Mutex *mu)
EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) {
if (!deadlock_graph) { // (re)create the deadlock graph.
deadlock_graph =
new (base_internal::LowLevelAlloc::Alloc(sizeof(*deadlock_graph)))
......@@ -1148,7 +1148,7 @@ static GraphId GetGraphIdLocked(Mutex *mu)
return deadlock_graph->GetId(mu);
}
static GraphId GetGraphId(Mutex *mu) LOCKS_EXCLUDED(deadlock_graph_mu) {
static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) {
deadlock_graph_mu.Lock();
GraphId id = GetGraphIdLocked(mu);
deadlock_graph_mu.Unlock();
......
......@@ -135,7 +135,7 @@ struct SynchWaitParams;
//
// See also `MutexLock`, below, for scoped `Mutex` acquisition.
class LOCKABLE Mutex {
class ABSL_LOCKABLE Mutex {
public:
// Creates a `Mutex` that is not held by anyone. This constructor is
// typically used for Mutexes allocated on the heap or the stack.
......@@ -164,27 +164,27 @@ class LOCKABLE Mutex {
//
// Blocks the calling thread, if necessary, until this `Mutex` is free, and
// then acquires it exclusively. (This lock is also known as a "write lock.")
void Lock() EXCLUSIVE_LOCK_FUNCTION();
void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION();
// Mutex::Unlock()
//
// Releases this `Mutex` and returns it from the exclusive/write state to the
// free state. Caller must hold the `Mutex` exclusively.
void Unlock() UNLOCK_FUNCTION();
void Unlock() ABSL_UNLOCK_FUNCTION();
// Mutex::TryLock()
//
// If the mutex can be acquired without blocking, does so exclusively and
// returns `true`. Otherwise, returns `false`. Returns `true` with high
// probability if the `Mutex` was free.
bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true);
bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true);
// Mutex::AssertHeld()
//
// Return immediately if this thread holds the `Mutex` exclusively (in write
// mode). Otherwise, may report an error (typically by crashing with a
// diagnostic), or may return immediately.
void AssertHeld() const ASSERT_EXCLUSIVE_LOCK();
void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK();
// ---------------------------------------------------------------------------
// Reader-Writer Locking
......@@ -225,28 +225,28 @@ class LOCKABLE Mutex {
// `ReaderLock()` will block if some other thread has an exclusive/writer lock
// on the mutex.
void ReaderLock() SHARED_LOCK_FUNCTION();
void ReaderLock() ABSL_SHARED_LOCK_FUNCTION();
// Mutex::ReaderUnlock()
//
// Releases a read share of this `Mutex`. `ReaderUnlock` may return a mutex to
// the free state if this thread holds the last reader lock on the mutex. Note
// that you cannot call `ReaderUnlock()` on a mutex held in write mode.
void ReaderUnlock() UNLOCK_FUNCTION();
void ReaderUnlock() ABSL_UNLOCK_FUNCTION();
// Mutex::ReaderTryLock()
//
// If the mutex can be acquired without blocking, acquires this mutex for
// shared access and returns `true`. Otherwise, returns `false`. Returns
// `true` with high probability if the `Mutex` was free or shared.
bool ReaderTryLock() SHARED_TRYLOCK_FUNCTION(true);
bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true);
// Mutex::AssertReaderHeld()
//
// Returns immediately if this thread holds the `Mutex` in at least shared
// mode (read mode). Otherwise, may report an error (typically by
// crashing with a diagnostic), or may return immediately.
void AssertReaderHeld() const ASSERT_SHARED_LOCK();
void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK();
// Mutex::WriterLock()
// Mutex::WriterUnlock()
......@@ -257,11 +257,11 @@ class LOCKABLE Mutex {
// These methods may be used (along with the complementary `Reader*()`
// methods) to distingish simple exclusive `Mutex` usage (`Lock()`,
// etc.) from reader/writer lock usage.
void WriterLock() EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); }
void WriterUnlock() UNLOCK_FUNCTION() { this->Unlock(); }
void WriterUnlock() ABSL_UNLOCK_FUNCTION() { this->Unlock(); }
bool WriterTryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
return this->TryLock();
}
......@@ -315,11 +315,11 @@ class LOCKABLE Mutex {
// be acquired, then atomically acquires this `Mutex`. `LockWhen()` is
// logically equivalent to `*Lock(); Await();` though they may have different
// performance characteristics.
void LockWhen(const Condition &cond) EXCLUSIVE_LOCK_FUNCTION();
void LockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION();
void ReaderLockWhen(const Condition &cond) SHARED_LOCK_FUNCTION();
void ReaderLockWhen(const Condition &cond) ABSL_SHARED_LOCK_FUNCTION();
void WriterLockWhen(const Condition &cond) EXCLUSIVE_LOCK_FUNCTION() {
void WriterLockWhen(const Condition &cond) ABSL_EXCLUSIVE_LOCK_FUNCTION() {
this->LockWhen(cond);
}
......@@ -361,11 +361,11 @@ class LOCKABLE Mutex {
//
// Negative timeouts are equivalent to a zero timeout.
bool LockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
EXCLUSIVE_LOCK_FUNCTION();
ABSL_EXCLUSIVE_LOCK_FUNCTION();
bool ReaderLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
SHARED_LOCK_FUNCTION();
ABSL_SHARED_LOCK_FUNCTION();
bool WriterLockWhenWithTimeout(const Condition &cond, absl::Duration timeout)
EXCLUSIVE_LOCK_FUNCTION() {
ABSL_EXCLUSIVE_LOCK_FUNCTION() {
return this->LockWhenWithTimeout(cond, timeout);
}
......@@ -381,11 +381,11 @@ class LOCKABLE Mutex {
//
// Deadlines in the past are equivalent to an immediate deadline.
bool LockWhenWithDeadline(const Condition &cond, absl::Time deadline)
EXCLUSIVE_LOCK_FUNCTION();
ABSL_EXCLUSIVE_LOCK_FUNCTION();
bool ReaderLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
SHARED_LOCK_FUNCTION();
ABSL_SHARED_LOCK_FUNCTION();
bool WriterLockWhenWithDeadline(const Condition &cond, absl::Time deadline)
EXCLUSIVE_LOCK_FUNCTION() {
ABSL_EXCLUSIVE_LOCK_FUNCTION() {
return this->LockWhenWithDeadline(cond, deadline);
}
......@@ -535,9 +535,9 @@ class LOCKABLE Mutex {
// private:
// Mutex lock_;
// };
class SCOPED_LOCKABLE MutexLock {
class ABSL_SCOPED_LOCKABLE MutexLock {
public:
explicit MutexLock(Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
explicit MutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
this->mu_->Lock();
}
......@@ -546,7 +546,7 @@ class SCOPED_LOCKABLE MutexLock {
MutexLock& operator=(const MutexLock&) = delete;
MutexLock& operator=(MutexLock&&) = delete;
~MutexLock() UNLOCK_FUNCTION() { this->mu_->Unlock(); }
~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); }
private:
Mutex *const mu_;
......@@ -556,9 +556,9 @@ class SCOPED_LOCKABLE MutexLock {
//
// The `ReaderMutexLock` is a helper class, like `MutexLock`, which acquires and
// releases a shared lock on a `Mutex` via RAII.
class SCOPED_LOCKABLE ReaderMutexLock {
class ABSL_SCOPED_LOCKABLE ReaderMutexLock {
public:
explicit ReaderMutexLock(Mutex *mu) SHARED_LOCK_FUNCTION(mu)
explicit ReaderMutexLock(Mutex *mu) ABSL_SHARED_LOCK_FUNCTION(mu)
: mu_(mu) {
mu->ReaderLock();
}
......@@ -568,7 +568,7 @@ class SCOPED_LOCKABLE ReaderMutexLock {
ReaderMutexLock& operator=(const ReaderMutexLock&) = delete;
ReaderMutexLock& operator=(ReaderMutexLock&&) = delete;
~ReaderMutexLock() UNLOCK_FUNCTION() {
~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() {
this->mu_->ReaderUnlock();
}
......@@ -580,9 +580,9 @@ class SCOPED_LOCKABLE ReaderMutexLock {
//
// The `WriterMutexLock` is a helper class, like `MutexLock`, which acquires and
// releases a write (exclusive) lock on a `Mutex` via RAII.
class SCOPED_LOCKABLE WriterMutexLock {
class ABSL_SCOPED_LOCKABLE WriterMutexLock {
public:
explicit WriterMutexLock(Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu)
explicit WriterMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
mu->WriterLock();
}
......@@ -592,7 +592,7 @@ class SCOPED_LOCKABLE WriterMutexLock {
WriterMutexLock& operator=(const WriterMutexLock&) = delete;
WriterMutexLock& operator=(WriterMutexLock&&) = delete;
~WriterMutexLock() UNLOCK_FUNCTION() {
~WriterMutexLock() ABSL_UNLOCK_FUNCTION() {
this->mu_->WriterUnlock();
}
......@@ -633,7 +633,7 @@ class SCOPED_LOCKABLE WriterMutexLock {
// Example:
//
// // assume count_ is not internal reference count
// int count_ GUARDED_BY(mu_);
// int count_ ABSL_GUARDED_BY(mu_);
//
// mu_.LockWhen(Condition(+[](int* count) { return *count == 0; },
// &count_));
......@@ -860,11 +860,11 @@ class CondVar {
// MutexLockMaybe
//
// MutexLockMaybe is like MutexLock, but is a no-op when mu is null.
class SCOPED_LOCKABLE MutexLockMaybe {
class ABSL_SCOPED_LOCKABLE MutexLockMaybe {
public:
explicit MutexLockMaybe(Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu)
explicit MutexLockMaybe(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) { if (this->mu_ != nullptr) { this->mu_->Lock(); } }
~MutexLockMaybe() UNLOCK_FUNCTION() {
~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() {
if (this->mu_ != nullptr) { this->mu_->Unlock(); }
}
private:
......@@ -879,17 +879,17 @@ class SCOPED_LOCKABLE MutexLockMaybe {
//
// ReleasableMutexLock is like MutexLock, but permits `Release()` of its
// mutex before destruction. `Release()` may be called at most once.
class SCOPED_LOCKABLE ReleasableMutexLock {
class ABSL_SCOPED_LOCKABLE ReleasableMutexLock {
public:
explicit ReleasableMutexLock(Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu)
explicit ReleasableMutexLock(Mutex *mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu)
: mu_(mu) {
this->mu_->Lock();
}
~ReleasableMutexLock() UNLOCK_FUNCTION() {
~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() {
if (this->mu_ != nullptr) { this->mu_->Unlock(); }
}
void Release() UNLOCK_FUNCTION();
void Release() ABSL_UNLOCK_FUNCTION();
private:
Mutex *mu_;
......
......@@ -425,10 +425,10 @@ TEST(Mutex, CondVarWaitSignalsAwait) {
// Use a struct so the lock annotations apply.
struct {
absl::Mutex barrier_mu;
bool barrier GUARDED_BY(barrier_mu) = false;
bool barrier ABSL_GUARDED_BY(barrier_mu) = false;
absl::Mutex release_mu;
bool release GUARDED_BY(release_mu) = false;
bool release ABSL_GUARDED_BY(release_mu) = false;
absl::CondVar released_cv;
} state;
......@@ -466,10 +466,10 @@ TEST(Mutex, CondVarWaitWithTimeoutSignalsAwait) {
// Use a struct so the lock annotations apply.
struct {
absl::Mutex barrier_mu;
bool barrier GUARDED_BY(barrier_mu) = false;
bool barrier ABSL_GUARDED_BY(barrier_mu) = false;
absl::Mutex release_mu;
bool release GUARDED_BY(release_mu) = false;
bool release ABSL_GUARDED_BY(release_mu) = false;
absl::CondVar released_cv;
} state;
......@@ -770,7 +770,7 @@ static void GetReadLock(ReaderDecrementBugStruct *x) {
// Test for reader counter being decremented incorrectly by waiter
// with false condition.
TEST(Mutex, MutexReaderDecrementBug) NO_THREAD_SAFETY_ANALYSIS {
TEST(Mutex, MutexReaderDecrementBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
ReaderDecrementBugStruct x;
x.cond = false;
x.waiting_on_cond = false;
......@@ -815,7 +815,7 @@ TEST(Mutex, MutexReaderDecrementBug) NO_THREAD_SAFETY_ANALYSIS {
// Test that we correctly handle the situation when a lock is
// held and then destroyed (w/o unlocking).
TEST(Mutex, LockedMutexDestructionBug) NO_THREAD_SAFETY_ANALYSIS {
TEST(Mutex, LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
for (int i = 0; i != 10; i++) {
// Create, lock and destroy 10 locks.
const int kNumLocks = 10;
......@@ -1087,11 +1087,11 @@ TEST(Mutex, DeadlockDetectorBazelWarning) {
absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort);
}
// This test is tagged with NO_THREAD_SAFETY_ANALYSIS because the
// This test is tagged with ABSL_NO_THREAD_SAFETY_ANALYSIS because the
// annotation-based static thread-safety analysis is not currently
// predicate-aware and cannot tell if the two for-loops that acquire and
// release the locks have the same predicates.
TEST(Mutex, DeadlockDetectorStessTest) NO_THREAD_SAFETY_ANALYSIS {
TEST(Mutex, DeadlockDetectorStessTest) ABSL_NO_THREAD_SAFETY_ANALYSIS {
// Stress test: Here we create a large number of locks and use all of them.
// If a deadlock detector keeps a full graph of lock acquisition order,
// it will likely be too slow for this test to pass.
......@@ -1109,7 +1109,7 @@ TEST(Mutex, DeadlockDetectorStessTest) NO_THREAD_SAFETY_ANALYSIS {
}
}
TEST(Mutex, DeadlockIdBug) NO_THREAD_SAFETY_ANALYSIS {
TEST(Mutex, DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS {
// Test a scenario where a cached deadlock graph node id in the
// list of held locks is not invalidated when the corresponding
// mutex is deleted.
......
......@@ -385,7 +385,7 @@ static uint64_t UpdateLastSample(
// TODO(absl-team): Remove this attribute when our compiler is smart enough
// to do the right thing.
ABSL_ATTRIBUTE_NOINLINE
static int64_t GetCurrentTimeNanosSlowPath() LOCKS_EXCLUDED(lock) {
static int64_t GetCurrentTimeNanosSlowPath() ABSL_LOCKS_EXCLUDED(lock) {
// Serialize access to slow-path. Fast-path readers are not blocked yet, and
// code below must not modify last_sample until the seqlock is acquired.
lock.Lock();
......@@ -430,7 +430,7 @@ static int64_t GetCurrentTimeNanosSlowPath() LOCKS_EXCLUDED(lock) {
static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns,
uint64_t delta_cycles,
const struct TimeSample *sample)
EXCLUSIVE_LOCKS_REQUIRED(lock) {
ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock) {
uint64_t estimated_base_ns = now_ns;
uint64_t lock_value = SeqAcquire(&seq); // acquire seqlock to block readers
......
#!/bin/bash
# This script renames all the functions and the macros defined in
# absl/base/dynamic_annotations.{h,cc}.
# absl/base/dynamic_annotations.{h,cc} and absl/base/thread_annotations.h.
#
# Chromium's dynamic_annotations live in //base/third_party/dynamic_annotations
# and the are in conflict with Abseil's dynamic_annotations (ODR violations and
# macro clashing).
# and its //base contains a copy of thread_annotations.h which conflict with
# Abseil's versions (ODR violations and macro clashing).
# In order to avoid problems in Chromium, this copy of Abseil has its own
# dynamic_annotations renamed.
# dynamic_annotations and thread_annotations renamed.
# -------------------------- dynamic_annotations -------------------------
for w in \
AnnotateBarrierDestroy \
AnnotateBarrierInit \
......@@ -125,3 +126,40 @@ for w in \
; do
find absl/ -type f -exec sed -i "s/\b$w\b/ABSL_$w/g" {} \;
done
# -------------------------- thread_annotations -------------------------
for w in \
ts_unchecked_read \
; do
find absl/ -type f -exec sed -i "s/\b$w\b/absl_$w/g" {} \;
done
for w in \
THREAD_ANNOTATION_ATTRIBUTE__ \
GUARDED_BY \
PT_GUARDED_BY \
ACQUIRED_AFTER \
ACQUIRED_BEFORE \
EXCLUSIVE_LOCKS_REQUIRED \
SHARED_LOCKS_REQUIRED \
LOCKS_EXCLUDED \
LOCK_RETURNED \
LOCKABLE \
SCOPED_LOCKABLE \
EXCLUSIVE_LOCK_FUNCTION \
SHARED_LOCK_FUNCTION \
UNLOCK_FUNCTION \
EXCLUSIVE_TRYLOCK_FUNCTION \
SHARED_TRYLOCK_FUNCTION \
ASSERT_EXCLUSIVE_LOCK \
ASSERT_SHARED_LOCK \
NO_THREAD_SAFETY_ANALYSIS \
TS_UNCHECKED \
TS_FIXME \
NO_THREAD_SAFETY_ANALYSIS_FIXME \
GUARDED_BY_FIXME \
TS_UNCHECKED_READ \
; do
find absl/ -type f -exec sed -i "s/\b$w\b/ABSL_$w/g" {} \;
done
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment