Commit 2a67d299 authored by Etienne Pierre-doray's avatar Etienne Pierre-doray Committed by Commit Bot

[Jobs]: Fix AcquireTaskIds memory fences.

Relaxed ordering is insufficient since the resulting task id is used by
the caller for order operations. Test case written by omerkatz@
demonstrates the race:

TEST_F(ConcurrentMarkingTest, JobTaskIds) {
  static constexpr size_t kNumTasksToRun = 2;
  static size_t tmp[kNumTasksToRun] = { 0 };
  base::JobHandle handle = base::PostJob(
      FROM_HERE, {base::ThreadPool(), base::TaskPriority::USER_VISIBLE},
      ConvertToBaseRepeatingCallback(WTF::CrossThreadBindRepeating(
          [](base::JobDelegate* job) {
            uint8_t id = job->GetTaskId();
            size_t& slot = tmp[id];
            slot++;
          })),
      ConvertToBaseRepeatingCallback(WTF::CrossThreadBindRepeating(
          [](size_t) { return kNumTasksToRun; })));
  handle.Join();
}

Thread 1 can release its task id and thread 2 immediately pick it up,
which causes a data race on tmp when both thread access the same idx
with no memory fence in between. Adding memory_order_seq_cst on
Release/AcquireTaskId ensures that different thread with same task id
are sequential.

Bug: 1119518
Change-Id: Ifa35ca03e3663f53f76ee830bcaa8654bc48838c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2387554Reviewed-by: default avatarFrançois Doray <fdoray@chromium.org>
Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
Cr-Commit-Position: refs/heads/master@{#804082}
parent d95a7db4
......@@ -5,6 +5,8 @@
#include "base/task/post_job.h"
#include <atomic>
#include <iterator>
#include <numeric>
#include "base/task/test_task_traits_extension.h"
#include "base/test/bind_test_util.h"
......@@ -37,4 +39,29 @@ TEST(PostJobTest, PostJobExtension) {
});
}
// Verify that concurrent accesses with task_id as the only form of
// synchronisation doesn't trigger a race.
TEST(PostJobTest, TaskIds) {
static constexpr size_t kNumConcurrentThreads = 2;
static constexpr size_t kNumTasksToRun = 1000;
base::test::TaskEnvironment task_environment;
size_t concurrent_array[kNumConcurrentThreads] = {0};
std::atomic_size_t remaining_tasks{kNumTasksToRun};
base::JobHandle handle = base::PostJob(
FROM_HERE, {base::ThreadPool()},
BindLambdaForTesting([&](base::JobDelegate* job) {
uint8_t id = job->GetTaskId();
size_t& slot = concurrent_array[id];
slot++;
--remaining_tasks;
}),
BindLambdaForTesting([&remaining_tasks](size_t) {
return std::min(remaining_tasks.load(), kNumConcurrentThreads);
}));
handle.Join();
EXPECT_EQ(kNumTasksToRun, std::accumulate(std::begin(concurrent_array),
std::end(concurrent_array), 0U));
}
} // namespace base
......@@ -289,19 +289,24 @@ uint8_t JobTaskSource::AcquireTaskId() {
assigned_task_ids_.load(std::memory_order_relaxed);
uint32_t new_assigned_task_ids = 0;
uint8_t task_id = 0;
// memory_order_acquire on success, matched with memory_order_release in
// ReleaseTaskId() so that operations done by previous threads that had
// the same task_id become visible to the current thread.
do {
// Count trailing one bits. This is the id of the right-most 0-bit in
// |assigned_task_ids|.
task_id = bits::CountTrailingZeroBits(~assigned_task_ids);
new_assigned_task_ids = assigned_task_ids | (uint32_t(1) << task_id);
} while (!assigned_task_ids_.compare_exchange_weak(
assigned_task_ids, new_assigned_task_ids, std::memory_order_relaxed));
assigned_task_ids, new_assigned_task_ids, std::memory_order_acquire,
std::memory_order_relaxed));
return task_id;
}
void JobTaskSource::ReleaseTaskId(uint8_t task_id) {
uint32_t previous_task_ids =
assigned_task_ids_.fetch_and(~(uint32_t(1) << task_id));
// memory_order_release to match AcquireTaskId().
uint32_t previous_task_ids = assigned_task_ids_.fetch_and(
~(uint32_t(1) << task_id), std::memory_order_release);
DCHECK(previous_task_ids & (uint32_t(1) << task_id));
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment