Commit 7b42cd60 authored by Danil Chapovalov's avatar Danil Chapovalov Committed by Chromium LUCI CQ

Roll abseil_revision 322ae2420d..b2dcbba183

Change Log:
https://chromium.googlesource.com/external/github.com/abseil/abseil-cpp/+log/322ae2420d..b2dcbba183
Full diff:
https://chromium.googlesource.com/external/github.com/abseil/abseil-cpp/+/322ae2420d..b2dcbba183

Bug: None
Change-Id: I7f4a4235d5eb8b57bfbdb0e07039f975c76b7e6c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2640441Reviewed-by: default avatarMirko Bonadei <mbonadei@chromium.org>
Commit-Queue: Danil Chapovalov <danilchap@chromium.org>
Cr-Commit-Position: refs/heads/master@{#845631}
parent 5f1eb180
......@@ -4,7 +4,7 @@ URL: https://github.com/abseil/abseil-cpp
License: Apache 2.0
License File: LICENSE
Version: 0
Revision: 322ae2420d27fc96d0a8ab1167d7de33671048df
Revision: b2dcbba18341d75f3fef486b717585cefda0195d
Security Critical: yes
Description:
......
......@@ -125,8 +125,9 @@ void SpinLock::SlowLock() {
// it as having a sleeper.
if ((lock_value & kWaitTimeMask) == 0) {
// Here, just "mark" that the thread is going to sleep. Don't store the
// lock wait time in the lock as that will cause the current lock
// owner to think it experienced contention.
// lock wait time in the lock -- the lock word stores the amount of time
// that the current holder waited before acquiring the lock, not the wait
// time of any thread currently waiting to acquire it.
if (lockword_.compare_exchange_strong(
lock_value, lock_value | kSpinLockSleeper,
std::memory_order_relaxed, std::memory_order_relaxed)) {
......@@ -140,6 +141,14 @@ void SpinLock::SlowLock() {
// this thread obtains the lock.
lock_value = TryLockInternal(lock_value, wait_cycles);
continue; // Skip the delay at the end of the loop.
} else if ((lock_value & kWaitTimeMask) == 0) {
// The lock is still held, without a waiter being marked, but something
// else about the lock word changed, causing our CAS to fail. For
// example, a new lock holder may have acquired the lock with
// kSpinLockDisabledScheduling set, whereas the previous holder had not
// set that flag. In this case, attempt again to mark ourselves as a
// waiter.
continue;
}
}
......
......@@ -137,8 +137,20 @@ class ABSL_LOCKABLE SpinLock {
//
// bit[0] encodes whether a lock is being held.
// bit[1] encodes whether a lock uses cooperative scheduling.
// bit[2] encodes whether a lock disables scheduling.
// bit[2] encodes whether the current lock holder disabled scheduling when
// acquiring the lock. Only set when kSpinLockHeld is also set.
// bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
// This is set by the lock holder to indicate how long it waited on
// the lock before eventually acquiring it. The number of cycles is
// encoded as a 29-bit unsigned int, or in the case that the current
// holder did not wait but another waiter is queued, the LSB
// (kSpinLockSleeper) is set. The implementation does not explicitly
// track the number of queued waiters beyond this. It must always be
// assumed that waiters may exist if the current holder was required to
// queue.
//
// Invariant: if the lock is not held, the value is either 0 or
// kSpinLockCooperative.
static constexpr uint32_t kSpinLockHeld = 1;
static constexpr uint32_t kSpinLockCooperative = 2;
static constexpr uint32_t kSpinLockDisabledScheduling = 4;
......
......@@ -208,9 +208,9 @@ static CordRep* NewTree(const char* data,
size_t n = 0;
do {
const size_t len = std::min(length, kMaxFlatLength);
CordRep* rep = CordRepFlat::New(len + alloc_hint);
CordRepFlat* rep = CordRepFlat::New(len + alloc_hint);
rep->length = len;
memcpy(rep->data, data, len);
memcpy(rep->Data(), data, len);
reps[n++] = VerifyTree(rep);
data += len;
length -= len;
......@@ -272,10 +272,10 @@ inline CordRep* Cord::InlineRep::force_tree(size_t extra_hint) {
return data_.as_tree.rep;
}
CordRep* result = CordRepFlat::New(len + extra_hint);
CordRepFlat* result = CordRepFlat::New(len + extra_hint);
result->length = len;
static_assert(kMinFlatLength >= sizeof(data_.as_chars), "");
memcpy(result->data, data_.as_chars, sizeof(data_.as_chars));
memcpy(result->Data(), data_.as_chars, sizeof(data_.as_chars));
set_tree(result);
return result;
}
......@@ -349,7 +349,7 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region,
}
dst->length += size_increase;
*region = dst->data + in_use;
*region = dst->flat()->Data() + in_use;
*size = size_increase;
return true;
}
......@@ -381,7 +381,7 @@ void Cord::InlineRep::GetAppendRegion(char** region, size_t* size,
CordRepFlat* new_node =
CordRepFlat::New(std::max(static_cast<size_t>(root->length), max_length));
new_node->length = std::min(new_node->Capacity(), max_length);
*region = new_node->data;
*region = new_node->Data();
*size = new_node->length;
replace_tree(Concat(root, new_node));
}
......@@ -407,7 +407,7 @@ void Cord::InlineRep::GetAppendRegion(char** region, size_t* size) {
// Allocate new node.
CordRepFlat* new_node = CordRepFlat::New(root->length);
new_node->length = new_node->Capacity();
*region = new_node->data;
*region = new_node->Data();
*size = new_node->length;
replace_tree(Concat(root, new_node));
}
......@@ -523,7 +523,7 @@ Cord& Cord::operator=(absl::string_view src) {
tree->flat()->Capacity() >= length &&
tree->refcount.IsOne()) {
// Copy in place if the existing FLAT node is reusable.
memmove(tree->data, data, length);
memmove(tree->flat()->Data(), data, length);
tree->length = length;
VerifyTree(tree);
return *this;
......@@ -578,8 +578,8 @@ void Cord::InlineRep::AppendArray(const char* src_data, size_t src_size) {
root = CordRepFlat::New(std::max<size_t>(size1, size2));
appended = std::min(
src_size, root->flat()->Capacity() - inline_length);
memcpy(root->data, data_.as_chars, inline_length);
memcpy(root->data + inline_length, src_data, appended);
memcpy(root->flat()->Data(), data_.as_chars, inline_length);
memcpy(root->flat()->Data() + inline_length, src_data, appended);
root->length = inline_length + appended;
set_tree(root);
}
......@@ -635,7 +635,7 @@ inline void Cord::AppendImpl(C&& src) {
}
if (src_tree->tag >= FLAT) {
// src tree just has one flat node.
contents_.AppendArray(src_tree->data, src_size);
contents_.AppendArray(src_tree->flat()->Data(), src_size);
return;
}
if (&src == this) {
......@@ -1093,7 +1093,7 @@ inline absl::string_view Cord::InlineRep::FindFlatStartPiece() const {
CordRep* node = tree();
if (node->tag >= FLAT) {
return absl::string_view(node->data, node->length);
return absl::string_view(node->flat()->Data(), node->length);
}
if (node->tag == EXTERNAL) {
......@@ -1116,7 +1116,7 @@ inline absl::string_view Cord::InlineRep::FindFlatStartPiece() const {
}
if (node->tag >= FLAT) {
return absl::string_view(node->data + offset, length);
return absl::string_view(node->flat()->Data() + offset, length);
}
assert((node->tag == EXTERNAL) && "Expect FLAT or EXTERNAL node here");
......@@ -1329,7 +1329,7 @@ Cord::ChunkIterator& Cord::ChunkIterator::AdvanceStack() {
assert(node->tag == EXTERNAL || node->tag >= FLAT);
assert(length != 0);
const char* data =
node->tag == EXTERNAL ? node->external()->base : node->data;
node->tag == EXTERNAL ? node->external()->base : node->flat()->Data();
current_chunk_ = absl::string_view(data + offset, length);
current_leaf_ = node;
return *this;
......@@ -1362,8 +1362,8 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
// Range to read is a proper subrange of the current chunk.
assert(current_leaf_ != nullptr);
CordRep* subnode = CordRep::Ref(current_leaf_);
const char* data =
subnode->tag == EXTERNAL ? subnode->external()->base : subnode->data;
const char* data = subnode->tag == EXTERNAL ? subnode->external()->base
: subnode->flat()->Data();
subnode = NewSubstring(subnode, current_chunk_.data() - data, n);
subcord.contents_.set_tree(VerifyTree(subnode));
RemoveChunkPrefix(n);
......@@ -1375,8 +1375,8 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
assert(current_leaf_ != nullptr);
CordRep* subnode = CordRep::Ref(current_leaf_);
if (current_chunk_.size() < subnode->length) {
const char* data =
subnode->tag == EXTERNAL ? subnode->external()->base : subnode->data;
const char* data = subnode->tag == EXTERNAL ? subnode->external()->base
: subnode->flat()->Data();
subnode = NewSubstring(subnode, current_chunk_.data() - data,
current_chunk_.size());
}
......@@ -1444,7 +1444,7 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
subnode = Concat(subnode, NewSubstring(CordRep::Ref(node), offset, n));
}
const char* data =
node->tag == EXTERNAL ? node->external()->base : node->data;
node->tag == EXTERNAL ? node->external()->base : node->flat()->Data();
current_chunk_ = absl::string_view(data + offset + n, length - n);
current_leaf_ = node;
bytes_remaining_ -= n;
......@@ -1511,7 +1511,7 @@ void Cord::ChunkIterator::AdvanceBytesSlowPath(size_t n) {
assert(node->tag == EXTERNAL || node->tag >= FLAT);
assert(length > n);
const char* data =
node->tag == EXTERNAL ? node->external()->base : node->data;
node->tag == EXTERNAL ? node->external()->base : node->flat()->Data();
current_chunk_ = absl::string_view(data + offset + n, length - n);
current_leaf_ = node;
bytes_remaining_ -= n;
......@@ -1529,7 +1529,7 @@ char Cord::operator[](size_t i) const {
assert(offset < rep->length);
if (rep->tag >= FLAT) {
// Get the "i"th character directly from the flat array.
return rep->data[offset];
return rep->flat()->Data()[offset];
} else if (rep->tag == EXTERNAL) {
// Get the "i"th character from the external array.
return rep->external()->base[offset];
......@@ -1562,7 +1562,7 @@ absl::string_view Cord::FlattenSlowPath() {
if (total_size <= kMaxFlatLength) {
new_rep = CordRepFlat::New(total_size);
new_rep->length = total_size;
new_buffer = new_rep->data;
new_buffer = new_rep->flat()->Data();
CopyToArraySlowPath(new_buffer);
} else {
new_buffer = std::allocator<char>().allocate(total_size);
......@@ -1583,7 +1583,7 @@ absl::string_view Cord::FlattenSlowPath() {
/* static */ bool Cord::GetFlatAux(CordRep* rep, absl::string_view* fragment) {
assert(rep != nullptr);
if (rep->tag >= FLAT) {
*fragment = absl::string_view(rep->data, rep->length);
*fragment = absl::string_view(rep->flat()->Data(), rep->length);
return true;
} else if (rep->tag == EXTERNAL) {
*fragment = absl::string_view(rep->external()->base, rep->length);
......@@ -1591,8 +1591,8 @@ absl::string_view Cord::FlattenSlowPath() {
} else if (rep->tag == SUBSTRING) {
CordRep* child = rep->substring()->child;
if (child->tag >= FLAT) {
*fragment =
absl::string_view(child->data + rep->substring()->start, rep->length);
*fragment = absl::string_view(
child->flat()->Data() + rep->substring()->start, rep->length);
return true;
} else if (child->tag == EXTERNAL) {
*fragment = absl::string_view(
......@@ -1680,7 +1680,7 @@ static void DumpNode(CordRep* rep, bool include_data, std::ostream* os) {
*os << "FLAT cap=" << rep->flat()->Capacity()
<< " [";
if (include_data)
*os << absl::CEscape(std::string(rep->data, rep->length));
*os << absl::CEscape(std::string(rep->flat()->Data(), rep->length));
*os << "]\n";
}
if (stack.empty()) break;
......
......@@ -166,7 +166,7 @@ enum CordRepKind {
struct CordRep {
CordRep() = default;
constexpr CordRep(Refcount::Immortal immortal, size_t l)
: length(l), refcount(immortal), tag(EXTERNAL), data{} {}
: length(l), refcount(immortal), tag(EXTERNAL), storage{} {}
// The following three fields have to be less than 32 bytes since
// that is the smallest supported flat node size.
......@@ -175,7 +175,7 @@ struct CordRep {
// If tag < FLAT, it represents CordRepKind and indicates the type of node.
// Otherwise, the node type is CordRepFlat and the tag is the encoded size.
uint8_t tag;
char data[1]; // Starting point for flat array: MUST BE LAST FIELD of CordRep
char storage[1]; // Starting point for flat array: MUST BE LAST FIELD
inline CordRepConcat* concat();
inline const CordRepConcat* concat() const;
......@@ -219,8 +219,8 @@ struct CordRepConcat : public CordRep {
CordRep* left;
CordRep* right;
uint8_t depth() const { return static_cast<uint8_t>(data[0]); }
void set_depth(uint8_t depth) { data[0] = static_cast<char>(depth); }
uint8_t depth() const { return static_cast<uint8_t>(storage[0]); }
void set_depth(uint8_t depth) { storage[0] = static_cast<char>(depth); }
};
struct CordRepSubstring : public CordRep {
......
......@@ -37,7 +37,7 @@ namespace cord_internal {
// ideally a 'nice' size aligning with allocation and cacheline sizes like 32.
// kMaxFlatSize is bounded by the size resulting in a computed tag no greater
// than MAX_FLAT_TAG. MAX_FLAT_TAG provides for additional 'high' tag values.
static constexpr size_t kFlatOverhead = offsetof(CordRep, data);
static constexpr size_t kFlatOverhead = offsetof(CordRep, storage);
static constexpr size_t kMinFlatSize = 32;
static constexpr size_t kMaxFlatSize = 4096;
static constexpr size_t kMaxFlatLength = kMaxFlatSize - kFlatOverhead;
......@@ -115,6 +115,9 @@ struct CordRepFlat : public CordRep {
#endif
}
char* Data() { return storage; }
const char* Data() const { return storage; }
// Returns the maximum capacity (payload size) of this instance.
size_t Capacity() const { return TagToLength(tag); }
......
This diff is collapsed.
......@@ -981,6 +981,7 @@ EXPORTS
??0Time@absl@@QEAA@XZ
??0TimeConversion@absl@@QEAA@XZ
??0TimeInfo@TimeZone@absl@@QEAA@XZ
??0TimeSample@absl@@QEAA@XZ
??0TimeZone@absl@@QEAA@Vtime_zone@cctz@time_internal@1@@Z
??0TimeZoneIf@cctz@time_internal@absl@@IEAA@XZ
??0TimeZoneInfo@cctz@time_internal@absl@@QEAA@XZ
......@@ -1512,6 +1513,8 @@ EXPORTS
?Crash@Helper@internal_statusor@absl@@SAXAEBVStatus@3@@Z
?CreateThreadIdentity@synchronization_internal@absl@@YAPEAUThreadIdentity@base_internal@2@XZ
?CurrentThreadIdentityIfPresent@base_internal@absl@@YAPEAUThreadIdentity@12@XZ
?Data@CordRepFlat@cord_internal@absl@@QEAAPEADXZ
?Data@CordRepFlat@cord_internal@absl@@QEBAPEBDXZ
?DataLength@Header@TimeZoneInfo@cctz@time_internal@absl@@QEBA_K_K@Z
?DataLossError@absl@@YA?AVStatus@1@Vstring_view@1@@Z
?DeadlineExceededError@absl@@YA?AVStatus@1@Vstring_view@1@@Z
......
......@@ -983,6 +983,7 @@ EXPORTS
??0Time@absl@@QEAA@XZ
??0TimeConversion@absl@@QEAA@XZ
??0TimeInfo@TimeZone@absl@@QEAA@XZ
??0TimeSample@absl@@QEAA@XZ
??0TimeZone@absl@@QEAA@Vtime_zone@cctz@time_internal@1@@Z
??0TimeZoneIf@cctz@time_internal@absl@@IEAA@XZ
??0TimeZoneInfo@cctz@time_internal@absl@@QEAA@XZ
......@@ -1513,6 +1514,8 @@ EXPORTS
?Crash@Helper@internal_statusor@absl@@SAXAEBVStatus@3@@Z
?CreateThreadIdentity@synchronization_internal@absl@@YAPEAUThreadIdentity@base_internal@2@XZ
?CurrentThreadIdentityIfPresent@base_internal@absl@@YAPEAUThreadIdentity@12@XZ
?Data@CordRepFlat@cord_internal@absl@@QEAAPEADXZ
?Data@CordRepFlat@cord_internal@absl@@QEBAPEBDXZ
?DataLength@Header@TimeZoneInfo@cctz@time_internal@absl@@QEBA_K_K@Z
?DataLossError@absl@@YA?AVStatus@1@Vstring_view@1@@Z
?DeadlineExceededError@absl@@YA?AVStatus@1@Vstring_view@1@@Z
......
......@@ -981,6 +981,7 @@ EXPORTS
??0Time@absl@@QAE@XZ
??0TimeConversion@absl@@QAE@XZ
??0TimeInfo@TimeZone@absl@@QAE@XZ
??0TimeSample@absl@@QAE@XZ
??0TimeZone@absl@@QAE@Vtime_zone@cctz@time_internal@1@@Z
??0TimeZoneIf@cctz@time_internal@absl@@IAE@XZ
??0TimeZoneInfo@cctz@time_internal@absl@@QAE@XZ
......@@ -1510,6 +1511,8 @@ EXPORTS
?Crash@Helper@internal_statusor@absl@@SAXABVStatus@3@@Z
?CreateThreadIdentity@synchronization_internal@absl@@YAPAUThreadIdentity@base_internal@2@XZ
?CurrentThreadIdentityIfPresent@base_internal@absl@@YAPAUThreadIdentity@12@XZ
?Data@CordRepFlat@cord_internal@absl@@QAEPADXZ
?Data@CordRepFlat@cord_internal@absl@@QBEPBDXZ
?DataLength@Header@TimeZoneInfo@cctz@time_internal@absl@@QBEII@Z
?DataLossError@absl@@YA?AVStatus@1@Vstring_view@1@@Z
?DeadlineExceededError@absl@@YA?AVStatus@1@Vstring_view@1@@Z
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment