Commit be45cd8e authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

cppgc: Add concurrent marking tests

Bug: chromium:1056170
Change-Id: I2f62c74c3e435e05fd9e313af2f15925583872ab
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2423716
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70153}
parent 9329f558
......@@ -45,6 +45,8 @@ class HeapGrowing::HeapGrowingImpl final
size_t limit_for_atomic_gc() const { return limit_for_atomic_gc_; }
size_t limit_for_incremental_gc() const { return limit_for_incremental_gc_; }
void DisableForTesting();
private:
void ConfigureLimit(size_t allocated_object_size);
......@@ -56,6 +58,8 @@ class HeapGrowing::HeapGrowingImpl final
size_t limit_for_incremental_gc_ = 0; // See ConfigureLimit().
SingleThreadedHandle gc_task_handle_;
bool disabled_for_testing_ = false;
};
HeapGrowing::HeapGrowingImpl::HeapGrowingImpl(
......@@ -77,6 +81,7 @@ HeapGrowing::HeapGrowingImpl::~HeapGrowingImpl() {
}
void HeapGrowing::HeapGrowingImpl::AllocatedObjectSizeIncreased(size_t) {
if (disabled_for_testing_) return;
size_t allocated_object_size = stats_collector_->allocated_object_size();
if (allocated_object_size > limit_for_atomic_gc_) {
collector_->CollectGarbage(
......@@ -122,6 +127,10 @@ void HeapGrowing::HeapGrowingImpl::ConfigureLimit(
limit_incremental_gc_based_on_allocation_rate));
}
void HeapGrowing::HeapGrowingImpl::DisableForTesting() {
disabled_for_testing_ = true;
}
HeapGrowing::HeapGrowing(GarbageCollector* collector,
StatsCollector* stats_collector,
cppgc::Heap::ResourceConstraints constraints)
......@@ -137,6 +146,8 @@ size_t HeapGrowing::limit_for_incremental_gc() const {
return impl_->limit_for_incremental_gc();
}
void HeapGrowing::DisableForTesting() { impl_->DisableForTesting(); }
// static
constexpr double HeapGrowing::kGrowingFactor;
......
......@@ -43,6 +43,8 @@ class V8_EXPORT_PRIVATE HeapGrowing final {
size_t limit_for_atomic_gc() const;
size_t limit_for_incremental_gc() const;
void DisableForTesting();
private:
class HeapGrowingImpl;
std::unique_ptr<HeapGrowingImpl> impl_;
......
......@@ -170,5 +170,7 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
gc_in_progress_ = false;
}
void Heap::DisableHeapGrowingForTesting() { growing_.DisableForTesting(); }
} // namespace internal
} // namespace cppgc
......@@ -38,6 +38,8 @@ class V8_EXPORT_PRIVATE Heap final : public HeapBase,
size_t epoch() const final { return epoch_; }
void DisableHeapGrowingForTesting();
private:
void StartGarbageCollection(Config);
void FinalizeGarbageCollection(Config::StackState);
......
......@@ -319,10 +319,14 @@ bool MarkerBase::AdvanceMarkingWithMaxDuration(
}
bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration) {
size_t step_size_in_bytes = GetNextIncrementalStepDuration(schedule_, heap_);
bool is_done = ProcessWorklistsWithDeadline(
mutator_marking_state_.marked_bytes() + step_size_in_bytes,
v8::base::TimeTicks::Now() + max_duration);
bool is_done = false;
if (!incremental_marking_disabled_for_testing_) {
size_t step_size_in_bytes =
GetNextIncrementalStepDuration(schedule_, heap_);
is_done = ProcessWorklistsWithDeadline(
mutator_marking_state_.marked_bytes() + step_size_in_bytes,
v8::base::TimeTicks::Now() + max_duration);
}
schedule_.UpdateIncrementalMarkedBytes(mutator_marking_state_.marked_bytes());
if (!is_done) {
// If marking is atomic, |is_done| should always be true.
......@@ -395,6 +399,10 @@ void MarkerBase::ClearAllWorklistsForTesting() {
marking_worklists_.ClearForTesting();
}
void MarkerBase::DisableIncrementalMarkingForTesting() {
incremental_marking_disabled_for_testing_ = true;
}
Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
MarkingConfig config)
: MarkerBase(key, heap, platform, config),
......
......@@ -116,6 +116,8 @@ class V8_EXPORT_PRIVATE MarkerBase {
Handle handle_;
};
void DisableIncrementalMarkingForTesting();
protected:
static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration =
v8::base::TimeDelta::FromMilliseconds(2);
......@@ -165,6 +167,8 @@ class V8_EXPORT_PRIVATE MarkerBase {
IncrementalMarkingSchedule schedule_;
bool incremental_marking_disabled_for_testing_{false};
friend class MarkerFactory;
};
......
......@@ -80,6 +80,7 @@ v8_source_set("cppgc_unittests_sources") {
testonly = true
sources = [
"heap/cppgc/concurrent-marking-unittest.cc",
"heap/cppgc/concurrent-sweeper-unittest.cc",
"heap/cppgc/custom-spaces-unittest.cc",
"heap/cppgc/finalizer-trait-unittest.cc",
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "include/cppgc/allocation.h"
#include "include/cppgc/default-platform.h"
#include "include/cppgc/member.h"
#include "include/cppgc/persistent.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/marking-visitor.h"
#include "src/heap/cppgc/stats-collector.h"
#include "test/unittests/heap/cppgc/tests.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace cppgc {
namespace internal {
#if defined(THREAD_SANITIZER)
namespace {
class GCed : public GarbageCollected<GCed> {
public:
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
Member<GCed> child_;
};
class GCedWithCallback : public GarbageCollected<GCedWithCallback> {
public:
template <typename Callback>
explicit GCedWithCallback(Callback callback) {
callback(this);
}
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
Member<GCedWithCallback> child_;
};
class Mixin : public GarbageCollectedMixin {
public:
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(child_); }
Member<Mixin> child_;
};
class GCedWithMixin : public GarbageCollected<GCedWithMixin>, public Mixin {
public:
void Trace(cppgc::Visitor* visitor) const { Mixin::Trace(visitor); }
};
template <typename T>
class GCedHolder : public GarbageCollected<GCedHolder<T>> {
public:
void Trace(cppgc::Visitor* visitor) const { visitor->Trace(object_); }
Member<T> object_;
};
class ConcurrentMarkingTest : public testing::TestWithHeap {
public:
using Config = Heap::Config;
static constexpr Config ConcurrentPreciseConfig = {
Config::CollectionType::kMajor, Config::StackState::kNoHeapPointers,
Config::MarkingType::kIncrementalAndConcurrent,
Config::SweepingType::kIncrementalAndConcurrent};
void StartConcurrentGC() {
Heap* heap = Heap::From(GetHeap());
heap->DisableHeapGrowingForTesting();
heap->StartIncrementalGarbageCollection(ConcurrentPreciseConfig);
heap->marker()->DisableIncrementalMarkingForTesting();
}
bool SingleStep(Config::StackState stack_state) {
MarkerBase* marker = Heap::From(GetHeap())->marker();
DCHECK(marker);
return marker->IncrementalMarkingStepForTesting(stack_state);
}
void FinishSteps(Config::StackState stack_state) {
while (!SingleStep(stack_state)) {
}
}
void FinishGC() {
Heap::From(GetHeap())->FinalizeIncrementalGarbageCollectionIfRunning(
ConcurrentPreciseConfig);
}
};
// static
constexpr ConcurrentMarkingTest::Config
ConcurrentMarkingTest::ConcurrentPreciseConfig;
} // namespace
// The following tests below check for data races during concurrent marking.
TEST_F(ConcurrentMarkingTest, MarkingObjects) {
static constexpr int kNumStep = 1000;
StartConcurrentGC();
Persistent<GCedHolder<GCed>> root =
MakeGarbageCollected<GCedHolder<GCed>>(GetAllocationHandle());
Member<GCed>* last_object = &root->object_;
for (int i = 0; i < kNumStep; ++i) {
for (int j = 0; j < kNumStep; ++j) {
*last_object = MakeGarbageCollected<GCed>(GetAllocationHandle());
last_object = &(*last_object)->child_;
}
// Use SignleStep to re-post concurrent jobs.
SingleStep(Config::StackState::kNoHeapPointers);
}
FinishGC();
}
TEST_F(ConcurrentMarkingTest, MarkingInConstructionObjects) {
static constexpr int kNumStep = 1000;
StartConcurrentGC();
Persistent<GCedHolder<GCedWithCallback>> root =
MakeGarbageCollected<GCedHolder<GCedWithCallback>>(GetAllocationHandle());
Member<GCedWithCallback>* last_object = &root->object_;
for (int i = 0; i < kNumStep; ++i) {
for (int j = 0; j < kNumStep; ++j) {
MakeGarbageCollected<GCedWithCallback>(
GetAllocationHandle(), [&last_object](GCedWithCallback* obj) {
*last_object = obj;
last_object = &(*last_object)->child_;
});
}
// Use SignleStep to re-post concurrent jobs.
SingleStep(Config::StackState::kNoHeapPointers);
}
FinishGC();
}
TEST_F(ConcurrentMarkingTest, MarkingMixinObjects) {
static constexpr int kNumStep = 1000;
StartConcurrentGC();
Persistent<GCedHolder<Mixin>> root =
MakeGarbageCollected<GCedHolder<Mixin>>(GetAllocationHandle());
Member<Mixin>* last_object = &root->object_;
for (int i = 0; i < kNumStep; ++i) {
for (int j = 0; j < kNumStep; ++j) {
*last_object = MakeGarbageCollected<GCedWithMixin>(GetAllocationHandle());
last_object = &(*last_object)->child_;
}
// Use SignleStep to re-post concurrent jobs.
SingleStep(Config::StackState::kNoHeapPointers);
}
FinishGC();
}
#endif // defined(THREAD_SANITIZER)
} // namespace internal
} // namespace cppgc
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment