Commit b14c4627 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[wasm] Rename an atomic counter and use relaxed ordering

The rename makes it clear that the atomic counter is an approximation
only. Explanation is added about the update of the counter (increased
when units are added, and reduced to zero if a worker finds no more
units). The comment also sais why it's safe to use relaxed memory
ordering in this case.

R=thibaudm@chromium.org, ahaas@chromium.org

Bug: chromium:1101340
Change-Id: I307d646189bc5732d50c92a94b2a654fa6a7f763
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2410185
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarThibaud Michaud <thibaudm@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69908}
parent d626121e
......@@ -695,8 +695,17 @@ class CompilationStateImpl {
std::atomic<bool> compile_failed_{false};
// The atomic counter is shared with the compilation job. It's increased if
// more units are added, and decreased when the queue drops to zero.
std::shared_ptr<std::atomic<int>> current_compile_concurrency_ =
// more units are added, and decreased when the queue drops to zero. Hence
// it's an approximation of the current number of available units in the
// queue, but it's not updated after popping a single unit, because that
// would create too much contention.
// This counter is not used for synchronization, hence relaxed memory ordering
// can be used. The thread that increases the counter is the same that calls
// {NotifyConcurrencyIncrease} later. The only reduction of the counter is a
// drop to zero after a worker does not find any unit in the queue, and after
// that drop another check is executed to ensure that any left-over units are
// still processed.
std::shared_ptr<std::atomic<int>> scheduled_units_approximation_ =
std::make_shared<std::atomic<int>>(0);
const int max_compile_concurrency_ = 0;
......@@ -1627,11 +1636,12 @@ class BackgroundCompileJob : public JobTask {
explicit BackgroundCompileJob(
std::shared_ptr<BackgroundCompileToken> token,
std::shared_ptr<Counters> async_counters,
std::shared_ptr<std::atomic<int>> current_concurrency,
std::shared_ptr<std::atomic<int>> scheduled_units_approximation,
size_t max_concurrency)
: token_(std::move(token)),
async_counters_(std::move(async_counters)),
current_concurrency_(std::move(current_concurrency)),
scheduled_units_approximation_(
std::move(scheduled_units_approximation)),
max_concurrency_(max_concurrency) {}
void Run(JobDelegate* delegate) override {
......@@ -1639,11 +1649,12 @@ class BackgroundCompileJob : public JobTask {
kBaselineOrTopTier) == kYield) {
return;
}
// Otherwise we didn't find any more units to execute. Reduce the available
// concurrency to zero, but then check whether any more units were added in
// the meantime, and increase back if necessary.
current_concurrency_->store(0);
{
// Otherwise we didn't find any more units to execute. Reduce the atomic
// counter of the approximated number of available units to zero, but then
// check whether any more units were added in the meantime, and increase
// back if necessary.
scheduled_units_approximation_->store(0, std::memory_order_relaxed);
BackgroundCompileScope scope(token_);
if (scope.cancelled()) return;
size_t outstanding_units =
......@@ -1655,19 +1666,18 @@ class BackgroundCompileJob : public JobTask {
scope.compilation_state()->ScheduleCompileJobForNewUnits(
static_cast<int>(outstanding_units));
}
}
size_t GetMaxConcurrency(size_t worker_count) const override {
// {current_concurrency_} does not reflect the units that running workers
// are processing, thus add the current worker count to that number.
return std::min(max_concurrency_,
worker_count + current_concurrency_->load());
worker_count + scheduled_units_approximation_->load());
}
private:
const std::shared_ptr<BackgroundCompileToken> token_;
const std::shared_ptr<Counters> async_counters_;
const std::shared_ptr<std::atomic<int>> current_concurrency_;
const std::shared_ptr<std::atomic<int>> scheduled_units_approximation_;
const size_t max_concurrency_;
};
......@@ -3205,12 +3215,13 @@ void CompilationStateImpl::PublishDetectedFeatures(Isolate* isolate) {
}
void CompilationStateImpl::ScheduleCompileJobForNewUnits(int new_units) {
// Increase the {current_compile_concurrency_} counter and remember the old
// Increase the {scheduled_units_approximation_} counter and remember the old
// value to check whether it increased towards {max_compile_concurrency_}.
// In that case, we need to notify the compile job about the increased
// concurrency.
DCHECK_LT(0, new_units);
int old_units = current_compile_concurrency_->fetch_add(new_units);
int old_units = scheduled_units_approximation_->fetch_add(
new_units, std::memory_order_relaxed);
bool concurrency_increased = old_units < max_compile_concurrency_;
base::MutexGuard guard(&mutex_);
......@@ -3225,7 +3236,7 @@ void CompilationStateImpl::ScheduleCompileJobForNewUnits(int new_units) {
std::unique_ptr<JobTask> new_compile_job =
std::make_unique<BackgroundCompileJob>(
background_compile_token_, async_counters_,
current_compile_concurrency_, max_compile_concurrency_);
scheduled_units_approximation_, max_compile_concurrency_);
// TODO(wasm): Lower priority for TurboFan-only jobs.
std::shared_ptr<JobHandle> handle = V8::GetCurrentPlatform()->PostJob(
TaskPriority::kUserVisible, std::move(new_compile_job));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment