Commit 09f082f2 authored by Anton Bikineev's avatar Anton Bikineev Committed by Commit Bot

Reland "cppgc: Port concurrent sweeper"

This reverts commit a35d0e8c.

The original CL is likely not a culprit for the infra failures.

Bug: chromium:1056170
Change-Id: I8fa85db8a737fb01328021782f0c43626fa52b0d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2215826Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67977}
parent d0a1c5b3
......@@ -10,13 +10,16 @@
namespace cppgc {
// TODO(v8:10346): Put PageAllocator in a non-V8 include header to avoid
// depending on namespace v8.
// TODO(v8:10346): Put PageAllocator and Platform in a non-V8 include header to
// avoid depending on namespace v8.
using PageAllocator = v8::PageAllocator;
using Platform = v8::Platform;
// Initializes the garbage collector with the provided platform. Must be called
// before creating a Heap.
V8_EXPORT void InitializePlatform(PageAllocator* page_allocator);
V8_EXPORT void InitializePlatform(Platform* platform);
V8_EXPORT Platform* GetPlatform();
// Must be called after destroying the last used heap.
V8_EXPORT void ShutdownPlatform();
......
......@@ -73,6 +73,15 @@ const BasePage* BasePage::FromInnerAddress(const Heap* heap,
heap->page_backend()->Lookup(static_cast<ConstAddress>(address)));
}
// static
void BasePage::Destroy(BasePage* page) {
if (page->is_large()) {
LargePage::Destroy(LargePage::From(page));
} else {
NormalPage::Destroy(NormalPage::From(page));
}
}
HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(void* address) const {
return const_cast<HeapObjectHeader&>(
ObjectHeaderFromInnerAddress(const_cast<const void*>(address)));
......
......@@ -28,6 +28,8 @@ class V8_EXPORT_PRIVATE BasePage {
static BasePage* FromInnerAddress(const Heap*, void*);
static const BasePage* FromInnerAddress(const Heap*, const void*);
static void Destroy(BasePage*);
BasePage(const BasePage&) = delete;
BasePage& operator=(const BasePage&) = delete;
......@@ -141,15 +143,15 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
static size_t PayloadSize();
bool PayloadContains(ConstAddress address) const {
return (PayloadStart() <= address) && (address < PayloadEnd());
}
ObjectStartBitmap& object_start_bitmap() { return object_start_bitmap_; }
const ObjectStartBitmap& object_start_bitmap() const {
return object_start_bitmap_;
}
bool PayloadContains(ConstAddress address) const {
return (PayloadStart() <= address) && (address < PayloadEnd());
}
private:
NormalPage(Heap* heap, BaseSpace* space);
~NormalPage();
......
......@@ -7,6 +7,7 @@
#include <algorithm>
#include "src/base/logging.h"
#include "src/base/platform/mutex.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/object-start-bitmap-inl.h"
......@@ -17,11 +18,13 @@ BaseSpace::BaseSpace(RawHeap* heap, size_t index, PageType type)
: heap_(heap), index_(index), type_(type) {}
void BaseSpace::AddPage(BasePage* page) {
v8::base::LockGuard<v8::base::Mutex> lock(&pages_mutex_);
DCHECK_EQ(pages_.cend(), std::find(pages_.cbegin(), pages_.cend(), page));
pages_.push_back(page);
}
void BaseSpace::RemovePage(BasePage* page) {
v8::base::LockGuard<v8::base::Mutex> lock(&pages_mutex_);
auto it = std::find(pages_.cbegin(), pages_.cend(), page);
DCHECK_NE(pages_.cend(), it);
pages_.erase(it);
......
......@@ -9,6 +9,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/heap/cppgc/free-list.h"
namespace cppgc {
......@@ -53,6 +54,7 @@ class V8_EXPORT_PRIVATE BaseSpace {
private:
RawHeap* heap_;
Pages pages_;
v8::base::Mutex pages_mutex_;
const size_t index_;
const PageType type_;
};
......
......@@ -116,7 +116,7 @@ void Heap::CollectGarbage(GCConfig config) {
marker_.reset();
{
NoGCScope no_gc(this);
sweeper_.Start(Sweeper::Config::kAtomic);
sweeper_.Start(config.sweep_type);
}
}
......
......@@ -68,10 +68,14 @@ class V8_EXPORT_PRIVATE Heap final : public cppgc::Heap {
struct GCConfig {
using StackState = Heap::StackState;
using SweepType = Sweeper::Config;
static GCConfig Default() { return {StackState::kMayContainHeapPointers}; }
static GCConfig Default() {
return {StackState::kMayContainHeapPointers, SweepType::kAtomic};
}
StackState stack_state = StackState::kMayContainHeapPointers;
SweepType sweep_type = SweepType::kAtomic;
};
static Heap* From(cppgc::Heap* heap) { return static_cast<Heap*>(heap); }
......
......@@ -10,16 +10,18 @@
namespace cppgc {
namespace internal {
static PageAllocator* g_page_allocator;
static Platform* g_platform;
} // namespace internal
void InitializePlatform(PageAllocator* page_allocator) {
internal::g_page_allocator = page_allocator;
internal::GlobalGCInfoTable::Create(page_allocator);
void InitializePlatform(Platform* platform) {
internal::g_platform = platform;
internal::GlobalGCInfoTable::Create(internal::g_platform->GetPageAllocator());
}
void ShutdownPlatform() { internal::g_page_allocator = nullptr; }
Platform* GetPlatform() { return internal::g_platform; }
void ShutdownPlatform() { internal::g_platform = nullptr; }
namespace internal {
......
......@@ -4,8 +4,13 @@
#include "src/heap/cppgc/sweeper.h"
#include <atomic>
#include <memory>
#include <vector>
#include "include/cppgc/platform.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/heap/cppgc/free-list.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
......@@ -23,6 +28,8 @@ namespace internal {
namespace {
using v8::base::Optional;
class ObjectStartBitmapVerifier
: private HeapVisitor<ObjectStartBitmapVerifier> {
friend class HeapVisitor<ObjectStartBitmapVerifier>;
......@@ -54,15 +61,119 @@ class ObjectStartBitmapVerifier
HeapObjectHeader* prev_ = nullptr;
};
template <typename T>
class ThreadSafeStack {
public:
ThreadSafeStack() = default;
void Push(T t) {
v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
vector_.push_back(std::move(t));
}
Optional<T> Pop() {
v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
if (vector_.empty()) return v8::base::nullopt;
T top = std::move(vector_.back());
vector_.pop_back();
// std::move is redundant but is needed to avoid the bug in gcc-7.
return std::move(top);
}
template <typename It>
void Insert(It begin, It end) {
v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
vector_.insert(vector_.end(), begin, end);
}
bool IsEmpty() const {
v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
return vector_.empty();
}
private:
std::vector<T> vector_;
mutable v8::base::Mutex mutex_;
};
struct SpaceState {
BaseSpace::Pages unswept_pages;
struct SweptPageState {
BasePage* page = nullptr;
std::vector<HeapObjectHeader*> unfinalized_objects;
FreeList cached_free_list;
std::vector<FreeList::Block> unfinalized_free_list;
bool is_empty = false;
};
ThreadSafeStack<BasePage*> unswept_pages;
ThreadSafeStack<SweptPageState> swept_unfinalized_pages;
};
using SpaceStates = std::vector<SpaceState>;
bool SweepNormalPage(NormalPage* page) {
// Builder that finalizes objects and adds freelist entries right away.
class InlinedFinalizationBuilder final {
public:
using ResultType = bool;
explicit InlinedFinalizationBuilder(BasePage* page) : page_(page) {}
void AddFinalizer(HeapObjectHeader* header, size_t size) {
header->Finalize();
SET_MEMORY_INACCESIBLE(header, size);
}
void AddFreeListEntry(Address start, size_t size) {
auto* space = NormalPageSpace::From(page_->space());
space->free_list().Add({start, size});
}
ResultType GetResult(bool is_empty) { return is_empty; }
private:
BasePage* page_;
};
// Builder that produces results for deferred processing.
class DeferredFinalizationBuilder final {
public:
using ResultType = SpaceState::SweptPageState;
explicit DeferredFinalizationBuilder(BasePage* page) { result_.page = page; }
void AddFinalizer(HeapObjectHeader* header, size_t size) {
if (header->IsFinalizable()) {
result_.unfinalized_objects.push_back({header});
found_finalizer_ = true;
} else {
SET_MEMORY_INACCESIBLE(header, size);
}
}
void AddFreeListEntry(Address start, size_t size) {
if (found_finalizer_) {
result_.unfinalized_free_list.push_back({start, size});
} else {
result_.cached_free_list.Add({start, size});
}
found_finalizer_ = false;
}
ResultType&& GetResult(bool is_empty) {
result_.is_empty = is_empty;
return std::move(result_);
}
private:
ResultType result_;
bool found_finalizer_ = false;
};
template <typename FinalizationBuilder>
typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
constexpr auto kAtomicAccess = HeapObjectHeader::AccessMode::kAtomic;
FinalizationBuilder builder(page);
auto* space = NormalPageSpace::From(page->space());
ObjectStartBitmap& bitmap = page->object_start_bitmap();
bitmap.Clear();
......@@ -79,16 +190,16 @@ bool SweepNormalPage(NormalPage* page) {
}
// Check if object is not marked (not reachable).
if (!header->IsMarked<kAtomicAccess>()) {
header->Finalize();
SET_MEMORY_INACCESIBLE(header, size);
builder.AddFinalizer(header, size);
begin += size;
continue;
}
// The object is alive.
const Address header_address = reinterpret_cast<Address>(header);
if (start_of_gap != header_address) {
space->AddToFreeList(start_of_gap,
static_cast<size_t>(header_address - start_of_gap));
builder.AddFreeListEntry(
start_of_gap, static_cast<size_t>(header_address - start_of_gap));
bitmap.SetBit(start_of_gap);
}
header->Unmark<kAtomicAccess>();
bitmap.SetBit(begin);
......@@ -98,56 +209,150 @@ bool SweepNormalPage(NormalPage* page) {
if (start_of_gap != page->PayloadStart() &&
start_of_gap != page->PayloadEnd()) {
space->AddToFreeList(
builder.AddFreeListEntry(
start_of_gap, static_cast<size_t>(page->PayloadEnd() - start_of_gap));
bitmap.SetBit(start_of_gap);
}
const bool is_empty = (start_of_gap == page->PayloadStart());
return is_empty;
return builder.GetResult(is_empty);
}
// This visitor:
// - resets linear allocation buffers and clears free lists for all spaces;
// - moves all Heap pages to local Sweeper's state (SpaceStates).
class PrepareForSweepVisitor final
: public HeapVisitor<PrepareForSweepVisitor> {
// SweepFinalizer is responsible for heap/space/page finalization. Finalization
// is defined as a step following concurrent sweeping which:
// - calls finalizers;
// - returns (unmaps) empty pages;
// - merges freelists to the space's freelist.
class SweepFinalizer final {
public:
explicit PrepareForSweepVisitor(SpaceStates* states) : states_(states) {}
explicit SweepFinalizer(v8::Platform* platform) : platform_(platform) {}
bool VisitNormalPageSpace(NormalPageSpace* space) {
DCHECK(!space->linear_allocation_buffer().size());
space->free_list().Clear();
(*states_)[space->index()].unswept_pages = space->RemoveAllPages();
return true;
void FinalizeHeap(SpaceStates* space_states) {
for (SpaceState& space_state : *space_states) {
FinalizeSpace(&space_state);
}
}
bool VisitLargePageSpace(LargePageSpace* space) {
(*states_)[space->index()].unswept_pages = space->RemoveAllPages();
void FinalizeSpace(SpaceState* space_state) {
while (auto page_state = space_state->swept_unfinalized_pages.Pop()) {
FinalizePage(&*page_state);
}
}
bool FinalizeSpaceWithDeadline(SpaceState* space_state,
double deadline_in_seconds) {
DCHECK(platform_);
static constexpr size_t kDeadlineCheckInterval = 8;
size_t page_count = 1;
while (auto page_state = space_state->swept_unfinalized_pages.Pop()) {
FinalizePage(&*page_state);
if (page_count % kDeadlineCheckInterval == 0 &&
deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
return false;
}
page_count++;
}
return true;
}
void FinalizePage(SpaceState::SweptPageState* page_state) {
DCHECK(page_state);
DCHECK(page_state->page);
BasePage* page = page_state->page;
// Call finalizers.
for (HeapObjectHeader* object : page_state->unfinalized_objects) {
object->Finalize();
}
// Unmap page if empty.
if (page_state->is_empty) {
BasePage::Destroy(page);
return;
}
DCHECK(!page->is_large());
// Merge freelists without finalizers.
FreeList& space_freelist =
NormalPageSpace::From(page->space())->free_list();
space_freelist.Append(std::move(page_state->cached_free_list));
// Merge freelist with finalizers.
for (auto entry : page_state->unfinalized_free_list) {
space_freelist.Add(std::move(entry));
}
// Add the page to the space.
page->space()->AddPage(page);
}
private:
SpaceStates* states_;
v8::Platform* platform_;
};
class MutatorThreadSweepVisitor final
: private HeapVisitor<MutatorThreadSweepVisitor> {
friend class HeapVisitor<MutatorThreadSweepVisitor>;
class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
friend class HeapVisitor<MutatorThreadSweeper>;
public:
explicit MutatorThreadSweepVisitor(SpaceStates* space_states) {
for (SpaceState& state : *space_states) {
for (BasePage* page : state.unswept_pages) {
Traverse(page);
explicit MutatorThreadSweeper(SpaceStates* states, v8::Platform* platform)
: states_(states), platform_(platform) {}
void Sweep() {
for (SpaceState& state : *states_) {
while (auto page = state.unswept_pages.Pop()) {
Traverse(*page);
}
}
}
bool SweepWithDeadline(double deadline_in_seconds) {
DCHECK(platform_);
static constexpr double kSlackInSeconds = 0.001;
for (SpaceState& state : *states_) {
// FinalizeSpaceWithDeadline() and SweepSpaceWithDeadline() won't check
// the deadline until it sweeps 10 pages. So we give a small slack for
// safety.
const double remaining_budget = deadline_in_seconds - kSlackInSeconds -
platform_->MonotonicallyIncreasingTime();
if (remaining_budget <= 0.) return false;
// First, prioritize finalization of pages that were swept concurrently.
SweepFinalizer finalizer(platform_);
if (!finalizer.FinalizeSpaceWithDeadline(&state, deadline_in_seconds)) {
return false;
}
// Help out the concurrent sweeper.
if (!SweepSpaceWithDeadline(&state, deadline_in_seconds)) {
return false;
}
state.unswept_pages.clear();
}
return true;
}
private:
bool SweepSpaceWithDeadline(SpaceState* state, double deadline_in_seconds) {
static constexpr size_t kDeadlineCheckInterval = 8;
size_t page_count = 1;
while (auto page = state->unswept_pages.Pop()) {
Traverse(*page);
if (page_count % kDeadlineCheckInterval == 0 &&
deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
return false;
}
page_count++;
}
return true;
}
bool VisitNormalPage(NormalPage* page) {
const bool is_empty = SweepNormalPage(page);
const bool is_empty = SweepNormalPage<InlinedFinalizationBuilder>(page);
if (is_empty) {
NormalPage::Destroy(page);
} else {
......@@ -157,53 +362,253 @@ class MutatorThreadSweepVisitor final
}
bool VisitLargePage(LargePage* page) {
if (page->ObjectHeader()->IsMarked()) {
HeapObjectHeader* header = page->ObjectHeader();
if (header->IsMarked()) {
header->Unmark();
page->space()->AddPage(page);
} else {
page->ObjectHeader()->Finalize();
header->Finalize();
LargePage::Destroy(page);
}
return true;
}
SpaceStates* states_;
v8::Platform* platform_;
};
class ConcurrentSweepTask final : public v8::JobTask,
private HeapVisitor<ConcurrentSweepTask> {
friend class HeapVisitor<ConcurrentSweepTask>;
public:
explicit ConcurrentSweepTask(SpaceStates* states) : states_(states) {}
void Run(v8::JobDelegate* delegate) final {
for (SpaceState& state : *states_) {
while (auto page = state.unswept_pages.Pop()) {
Traverse(*page);
if (delegate->ShouldYield()) return;
}
}
is_completed_.store(true, std::memory_order_relaxed);
}
size_t GetMaxConcurrency() const final {
return is_completed_.load(std::memory_order_relaxed) ? 0 : 1;
}
private:
bool VisitNormalPage(NormalPage* page) {
SpaceState::SweptPageState sweep_result =
SweepNormalPage<DeferredFinalizationBuilder>(page);
const size_t space_index = page->space()->index();
DCHECK_GT(states_->size(), space_index);
SpaceState& space_state = (*states_)[space_index];
space_state.swept_unfinalized_pages.Push(std::move(sweep_result));
return true;
}
bool VisitLargePage(LargePage* page) {
HeapObjectHeader* header = page->ObjectHeader();
if (header->IsMarked()) {
header->Unmark();
page->space()->AddPage(page);
return true;
}
if (!header->IsFinalizable()) {
LargePage::Destroy(page);
return true;
}
const size_t space_index = page->space()->index();
DCHECK_GT(states_->size(), space_index);
SpaceState& state = (*states_)[space_index];
state.swept_unfinalized_pages.Push(
{page, {page->ObjectHeader()}, {}, {}, true});
return true;
}
SpaceStates* states_;
std::atomic_bool is_completed_{false};
};
// This visitor:
// - resets linear allocation buffers and clears free lists for all spaces;
// - moves all Heap pages to local Sweeper's state (SpaceStates).
class PrepareForSweepVisitor final
: public HeapVisitor<PrepareForSweepVisitor> {
public:
explicit PrepareForSweepVisitor(SpaceStates* states) : states_(states) {}
bool VisitNormalPageSpace(NormalPageSpace* space) {
DCHECK(!space->linear_allocation_buffer().size());
space->free_list().Clear();
ExtractPages(space);
return true;
}
bool VisitLargePageSpace(LargePageSpace* space) {
ExtractPages(space);
return true;
}
private:
void ExtractPages(BaseSpace* space) {
BaseSpace::Pages space_pages = space->RemoveAllPages();
(*states_)[space->index()].unswept_pages.Insert(space_pages.begin(),
space_pages.end());
}
SpaceStates* states_;
};
} // namespace
class Sweeper::SweeperImpl final {
public:
explicit SweeperImpl(RawHeap* heap) : heap_(heap) {
space_states_.resize(heap_->size());
explicit SweeperImpl(RawHeap* heap)
: heap_(heap),
space_states_(heap->size()),
platform_(cppgc::GetPlatform()),
foreground_task_runner_(platform_->GetForegroundTaskRunner(nullptr)) {
// TODO(chromium:1056170): support Isolate independent task runner.
}
~SweeperImpl() { CancelSweepers(); }
void Start(Config config) {
is_in_progress_ = true;
#if DEBUG
ObjectStartBitmapVerifier().Verify(heap_);
#endif
PrepareForSweepVisitor(&space_states_).Traverse(heap_);
if (config == Config::kAtomic) {
Finish();
} else {
DCHECK_EQ(Config::kIncrementalAndConcurrent, config);
// TODO(chromium:1056170): Schedule concurrent sweeping.
ScheduleIncrementalSweeping();
ScheduleConcurrentSweeping();
}
}
void Finish() {
if (!is_in_progress_) return;
MutatorThreadSweepVisitor s(&space_states_);
// First, call finalizers on the mutator thread.
SweepFinalizer finalizer(platform_);
finalizer.FinalizeHeap(&space_states_);
// Then, help out the concurrent thread.
MutatorThreadSweeper sweeper(&space_states_, platform_);
sweeper.Sweep();
// Synchronize with the concurrent sweeper and call remaining finalizers.
SynchronizeAndFinalizeConcurrentSweeping();
is_in_progress_ = false;
}
private:
SpaceStates space_states_;
class IncrementalSweepTask : public v8::IdleTask {
public:
struct Handle {
Handle() = default;
void Cancel() {
DCHECK(is_cancelled_);
*is_cancelled_ = true;
}
bool IsCanceled() const {
DCHECK(is_cancelled_);
return *is_cancelled_;
}
explicit operator bool() const { return is_cancelled_.get(); }
private:
struct NonEmptyTag {};
explicit Handle(NonEmptyTag)
: is_cancelled_(std::make_shared<bool>(false)) {}
std::shared_ptr<bool> is_cancelled_;
friend class IncrementalSweepTask;
};
explicit IncrementalSweepTask(SweeperImpl* sweeper)
: sweeper_(sweeper), handle_(Handle::NonEmptyTag{}) {}
static Handle Post(SweeperImpl* sweeper, v8::TaskRunner* runner) {
auto task = std::make_unique<IncrementalSweepTask>(sweeper);
auto handle = task->GetHandle();
runner->PostIdleTask(std::move(task));
return handle;
}
private:
void Run(double deadline_in_seconds) override {
if (handle_.IsCanceled() || !sweeper_->is_in_progress_) return;
MutatorThreadSweeper sweeper(&sweeper_->space_states_,
sweeper_->platform_);
const bool sweep_complete =
sweeper.SweepWithDeadline(deadline_in_seconds);
if (sweep_complete) {
sweeper_->SynchronizeAndFinalizeConcurrentSweeping();
} else {
sweeper_->ScheduleIncrementalSweeping();
}
}
Handle GetHandle() const { return handle_; }
SweeperImpl* sweeper_;
// TODO(chromium:1056170): Change to CancelableTask.
Handle handle_;
};
void ScheduleIncrementalSweeping() {
if (!platform_ || !foreground_task_runner_) return;
incremental_sweeper_handle_ =
IncrementalSweepTask::Post(this, foreground_task_runner_.get());
}
void ScheduleConcurrentSweeping() {
if (!platform_) return;
concurrent_sweeper_handle_ = platform_->PostJob(
v8::TaskPriority::kUserVisible,
std::make_unique<ConcurrentSweepTask>(&space_states_));
}
void CancelSweepers() {
if (incremental_sweeper_handle_) incremental_sweeper_handle_.Cancel();
if (concurrent_sweeper_handle_) concurrent_sweeper_handle_->Cancel();
}
void SynchronizeAndFinalizeConcurrentSweeping() {
CancelSweepers();
SweepFinalizer finalizer(platform_);
finalizer.FinalizeHeap(&space_states_);
}
RawHeap* heap_;
SpaceStates space_states_;
v8::Platform* platform_;
std::shared_ptr<v8::TaskRunner> foreground_task_runner_;
IncrementalSweepTask::Handle incremental_sweeper_handle_;
std::unique_ptr<v8::JobHandle> concurrent_sweeper_handle_;
bool is_in_progress_ = false;
};
Sweeper::Sweeper(RawHeap* heap) : impl_(std::make_unique<SweeperImpl>(heap)) {}
Sweeper::~Sweeper() = default;
void Sweeper::Start(Config config) { impl_->Start(config); }
......
......@@ -44,6 +44,7 @@ v8_source_set("cppgc_unittests_sources") {
testonly = true
sources = [
"heap/cppgc/concurrent-sweeper-unittest.cc",
"heap/cppgc/custom-spaces-unittest.cc",
"heap/cppgc/finalizer-trait-unittest.cc",
"heap/cppgc/free-list-unittest.cc",
......@@ -63,6 +64,8 @@ v8_source_set("cppgc_unittests_sources") {
"heap/cppgc/source-location-unittest.cc",
"heap/cppgc/stack-unittest.cc",
"heap/cppgc/sweeper-unittest.cc",
"heap/cppgc/test-platform.cc",
"heap/cppgc/test-platform.h",
"heap/cppgc/tests.cc",
"heap/cppgc/tests.h",
"heap/cppgc/visitor-unittest.cc",
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <algorithm>
#include <set>
#include <vector>
#include "include/cppgc/allocation.h"
#include "include/cppgc/platform.h"
#include "include/v8-platform.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/page-memory-inl.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/sweeper.h"
#include "test/unittests/heap/cppgc/test-platform.h"
#include "test/unittests/heap/cppgc/tests.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace cppgc {
namespace internal {
namespace {
class ResetLocalAllocationBufferVisitor final
: public HeapVisitor<ResetLocalAllocationBufferVisitor> {
public:
bool VisitLargePageSpace(LargePageSpace*) { return true; }
bool VisitNormalPageSpace(NormalPageSpace* space) {
space->ResetLinearAllocationBuffer();
return true;
}
};
void ResetLocalAllocationBuffers(Heap* heap) {
ResetLocalAllocationBufferVisitor visitor;
visitor.Traverse(&heap->raw_heap());
}
size_t g_destructor_callcount;
template <size_t Size>
class Finalizable : public GarbageCollected<Finalizable<Size>> {
public:
Finalizable() : creation_thread_{v8::base::OS::GetCurrentThreadId()} {}
virtual ~Finalizable() {
++g_destructor_callcount;
EXPECT_EQ(creation_thread_, v8::base::OS::GetCurrentThreadId());
}
virtual void Trace(cppgc::Visitor*) const {}
private:
char array_[Size];
int creation_thread_;
};
using NormalFinalizable = Finalizable<32>;
using LargeFinalizable = Finalizable<kLargeObjectSizeThreshold * 2>;
template <size_t Size>
class NonFinalizable : public GarbageCollected<NonFinalizable<Size>> {
public:
virtual void Trace(cppgc::Visitor*) const {}
private:
char array_[Size];
};
using NormalNonFinalizable = NonFinalizable<32>;
using LargeNonFinalizable = NonFinalizable<kLargeObjectSizeThreshold * 2>;
class ConcurrentSweeperTest : public testing::TestWithHeap {
public:
ConcurrentSweeperTest() { g_destructor_callcount = 0; }
void StartSweeping() {
Heap* heap = Heap::From(GetHeap());
ResetLocalAllocationBuffers(heap);
Sweeper& sweeper = heap->sweeper();
sweeper.Start(Sweeper::Config::kIncrementalAndConcurrent);
}
void FinishSweeping() {
Heap* heap = Heap::From(GetHeap());
Sweeper& sweeper = heap->sweeper();
sweeper.Finish();
}
const RawHeap& GetRawHeap() const {
const Heap* heap = Heap::From(GetHeap());
return heap->raw_heap();
}
void CheckFreeListEntries(const std::vector<void*>& objects) {
const Heap* heap = Heap::From(GetHeap());
const PageBackend* backend = heap->page_backend();
for (auto* object : objects) {
// The corresponding page could be removed.
if (!backend->Lookup(static_cast<ConstAddress>(object))) continue;
const auto* header =
BasePage::FromPayload(object)->TryObjectHeaderFromInnerAddress(
object);
// TryObjectHeaderFromInnerAddress returns nullptr for freelist entries.
EXPECT_EQ(nullptr, header);
}
}
void CheckPageRemoved(const BasePage* page) {
const Heap* heap = Heap::From(GetHeap());
const PageBackend* backend = heap->page_backend();
EXPECT_EQ(nullptr, backend->Lookup(reinterpret_cast<ConstAddress>(page)));
}
bool FreeListContains(const BaseSpace* space,
const std::vector<void*>& objects) {
const Heap* heap = Heap::From(GetHeap());
const PageBackend* backend = heap->page_backend();
const auto& freelist = NormalPageSpace::From(space)->free_list();
for (void* object : objects) {
// The corresponding page could be removed.
if (!backend->Lookup(static_cast<ConstAddress>(object))) continue;
if (!freelist.Contains({object, 0})) return false;
}
return true;
}
};
} // namespace
TEST_F(ConcurrentSweeperTest, BackgroundSweepOfNormalPage) {
// Non finalizable objects are swept right away.
using GCedType = NormalNonFinalizable;
auto* unmarked_object = MakeGarbageCollected<GCedType>(GetHeap());
auto* marked_object = MakeGarbageCollected<GCedType>(GetHeap());
HeapObjectHeader::FromPayload(marked_object).TryMarkAtomic();
auto* page = BasePage::FromPayload(unmarked_object);
auto* space = page->space();
// The test requires objects to be allocated on the same page;
ASSERT_EQ(page, BasePage::FromPayload(marked_object));
StartSweeping();
// Wait for concurrent sweeping to finish.
GetPlatform().WaitAllBackgroundTasks();
// Check that the marked object was unmarked.
EXPECT_FALSE(HeapObjectHeader::FromPayload(marked_object).IsMarked());
// Check that free list entries are created right away for non-finalizable
// objects, but not immediately returned to the space's freelist.
CheckFreeListEntries({unmarked_object});
EXPECT_FALSE(FreeListContains(space, {unmarked_object}));
FinishSweeping();
// Check that finalizable objects are swept and put into the freelist of the
// corresponding space.
EXPECT_TRUE(FreeListContains(space, {unmarked_object}));
}
TEST_F(ConcurrentSweeperTest, BackgroundSweepOfLargePage) {
// Non finalizable objects are swept right away.
using GCedType = LargeNonFinalizable;
auto* unmarked_object = MakeGarbageCollected<GCedType>(GetHeap());
auto* marked_object = MakeGarbageCollected<GCedType>(GetHeap());
HeapObjectHeader::FromPayload(marked_object).TryMarkAtomic();
auto* unmarked_page = BasePage::FromPayload(unmarked_object);
auto* marked_page = BasePage::FromPayload(marked_object);
auto* space = unmarked_page->space();
ASSERT_EQ(space, marked_page->space());
StartSweeping();
// Wait for concurrent sweeping to finish.
GetPlatform().WaitAllBackgroundTasks();
// Check that the marked object was unmarked.
EXPECT_FALSE(HeapObjectHeader::FromPayload(marked_object).IsMarked());
// Check that free list entries are created right away for non-finalizable
// objects, but not immediately returned to the space's freelist.
CheckPageRemoved(unmarked_page);
// Check that marked pages are returned to space right away.
EXPECT_NE(space->end(), std::find(space->begin(), space->end(), marked_page));
FinishSweeping();
}
TEST_F(ConcurrentSweeperTest, DeferredFinalizationOfNormalPage) {
static constexpr size_t kNumberOfObjects = 10;
// Finalizable types are left intact by concurrent sweeper.
using GCedType = NormalFinalizable;
std::set<BasePage*> pages;
std::vector<void*> objects;
BaseSpace* space = nullptr;
for (size_t i = 0; i < kNumberOfObjects; ++i) {
auto* object = MakeGarbageCollected<GCedType>(GetHeap());
objects.push_back(object);
auto* page = BasePage::FromPayload(object);
pages.insert(page);
if (!space) space = page->space();
}
StartSweeping();
// Wait for concurrent sweeping to finish.
GetPlatform().WaitAllBackgroundTasks();
// Check that pages are not returned right away.
for (auto* page : pages) {
EXPECT_EQ(space->end(), std::find(space->begin(), space->end(), page));
}
// Check that finalizable objects are left intact in pages.
EXPECT_FALSE(FreeListContains(space, objects));
// No finalizers have been executed.
EXPECT_EQ(0u, g_destructor_callcount);
FinishSweeping();
// Check that finalizable objects are swept and turned into freelist entries.
CheckFreeListEntries(objects);
// Check that space's freelist contains these entries.
EXPECT_TRUE(FreeListContains(space, objects));
// Check that finalizers have been executed.
EXPECT_EQ(kNumberOfObjects, g_destructor_callcount);
}
TEST_F(ConcurrentSweeperTest, DeferredFinalizationOfLargePage) {
using GCedType = LargeFinalizable;
auto* object = MakeGarbageCollected<GCedType>(GetHeap());
auto* page = BasePage::FromPayload(object);
auto* space = page->space();
StartSweeping();
// Wait for concurrent sweeping to finish.
GetPlatform().WaitAllBackgroundTasks();
// Check that the page is not returned to the space.
EXPECT_EQ(space->end(), std::find(space->begin(), space->end(), page));
// Check that no destructors have been executed yet.
EXPECT_EQ(0u, g_destructor_callcount);
FinishSweeping();
// Check that the destructor was executed.
EXPECT_EQ(1u, g_destructor_callcount);
// Check that page was unmapped.
CheckPageRemoved(page);
}
TEST_F(ConcurrentSweeperTest, IncrementalSweeping) {
testing::TestPlatform::DisableBackgroundTasksScope disable_concurrent_sweeper(
&GetPlatform());
auto task_runner = GetPlatform().GetForegroundTaskRunner(nullptr);
// Create two unmarked objects.
MakeGarbageCollected<NormalFinalizable>(GetHeap());
MakeGarbageCollected<LargeFinalizable>(GetHeap());
// Create two marked objects.
auto* marked_normal_object =
MakeGarbageCollected<NormalFinalizable>(GetHeap());
auto* marked_large_object = MakeGarbageCollected<LargeFinalizable>(GetHeap());
auto& marked_normal_header =
HeapObjectHeader::FromPayload(marked_normal_object);
auto& marked_large_header =
HeapObjectHeader::FromPayload(marked_large_object);
marked_normal_header.TryMarkAtomic();
marked_large_header.TryMarkAtomic();
StartSweeping();
EXPECT_EQ(0u, g_destructor_callcount);
EXPECT_TRUE(marked_normal_header.IsMarked());
EXPECT_TRUE(marked_large_header.IsMarked());
// Wait for incremental sweeper to finish.
GetPlatform().WaitAllForegroundTasks();
EXPECT_EQ(2u, g_destructor_callcount);
EXPECT_FALSE(marked_normal_header.IsMarked());
EXPECT_FALSE(marked_large_header.IsMarked());
FinishSweeping();
}
} // namespace internal
} // namespace cppgc
......@@ -245,5 +245,25 @@ TEST_F(SweeperTest, SweepDoesNotTriggerRecursiveGC) {
EXPECT_EQ(saved_epoch + 1, internal_heap->epoch());
}
TEST_F(SweeperTest, UnmarkObjects) {
auto* normal_object = MakeGarbageCollected<GCed<32>>(GetHeap());
auto* large_object =
MakeGarbageCollected<GCed<kLargeObjectSizeThreshold * 2>>(GetHeap());
auto& normal_object_header = HeapObjectHeader::FromPayload(normal_object);
auto& large_object_header = HeapObjectHeader::FromPayload(large_object);
normal_object_header.TryMarkAtomic();
large_object_header.TryMarkAtomic();
EXPECT_TRUE(normal_object_header.IsMarked());
EXPECT_TRUE(large_object_header.IsMarked());
Sweep();
EXPECT_FALSE(normal_object_header.IsMarked());
EXPECT_FALSE(large_object_header.IsMarked());
}
} // namespace internal
} // namespace cppgc
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "test/unittests/heap/cppgc/test-platform.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
namespace cppgc {
namespace internal {
namespace testing {
void TestTaskRunner::PostTask(std::unique_ptr<v8::Task> task) {
tasks_.push_back(std::move(task));
}
void TestTaskRunner::PostNonNestableTask(std::unique_ptr<v8::Task> task) {
PostTask(std::move(task));
}
void TestTaskRunner::PostDelayedTask(std::unique_ptr<v8::Task> task, double) {
PostTask(std::move(task));
}
void TestTaskRunner::PostNonNestableDelayedTask(std::unique_ptr<v8::Task> task,
double) {
PostTask(std::move(task));
}
void TestTaskRunner::PostIdleTask(std::unique_ptr<v8::IdleTask> task) {
idle_tasks_.push_back(std::move(task));
}
bool TestTaskRunner::RunSingleTask() {
if (!tasks_.size()) return false;
tasks_.back()->Run();
tasks_.pop_back();
return true;
}
bool TestTaskRunner::RunSingleIdleTask(double deadline_in_seconds) {
if (!idle_tasks_.size()) return false;
idle_tasks_.back()->Run(deadline_in_seconds);
idle_tasks_.pop_back();
return true;
}
void TestTaskRunner::RunUntilIdle() {
for (auto& task : tasks_) {
task->Run();
}
tasks_.clear();
for (auto& task : idle_tasks_) {
task->Run(std::numeric_limits<double>::infinity());
}
idle_tasks_.clear();
}
class TestPlatform::TestJobHandle : public v8::JobHandle {
public:
explicit TestJobHandle(const std::shared_ptr<JobThread>& thread)
: thread_(thread) {
const bool success = thread_->Start();
USE(success);
}
void NotifyConcurrencyIncrease() override {}
void Join() override { thread_->Join(); }
void Cancel() override { Join(); }
bool IsRunning() override { return true; }
private:
std::shared_ptr<JobThread> thread_;
};
TestPlatform::TestPlatform()
: foreground_task_runner_(std::make_unique<TestTaskRunner>()) {}
TestPlatform::~TestPlatform() V8_NOEXCEPT { WaitAllBackgroundTasks(); }
void TestPlatform::CallOnWorkerThread(std::unique_ptr<v8::Task> task) {
if (AreBackgroundTasksDisabled()) return;
auto thread = std::make_unique<WorkerThread>(std::move(task));
if (thread->Start()) {
threads_.push_back(std::move(thread));
}
}
void TestPlatform::CallDelayedOnWorkerThread(std::unique_ptr<v8::Task> task,
double) {
CallOnWorkerThread(std::move(task));
}
std::unique_ptr<v8::JobHandle> TestPlatform::PostJob(
v8::TaskPriority, std::unique_ptr<v8::JobTask> job_task) {
if (AreBackgroundTasksDisabled()) return {};
auto thread = std::make_shared<JobThread>(std::move(job_task));
job_threads_.push_back(thread);
return std::make_unique<TestJobHandle>(std::move(thread));
}
double TestPlatform::MonotonicallyIncreasingTime() {
return v8::base::TimeTicks::HighResolutionNow().ToInternalValue() /
static_cast<double>(v8::base::Time::kMicrosecondsPerSecond);
}
double TestPlatform::CurrentClockTimeMillis() {
return v8::base::OS::TimeCurrentMillis();
}
void TestPlatform::WaitAllForegroundTasks() {
foreground_task_runner_->RunUntilIdle();
}
void TestPlatform::WaitAllBackgroundTasks() {
for (auto& thread : threads_) {
thread->Join();
}
threads_.clear();
for (auto& thread : job_threads_) {
thread->Join();
}
threads_.clear();
}
TestPlatform::DisableBackgroundTasksScope::DisableBackgroundTasksScope(
TestPlatform* platform)
: platform_(platform) {
++platform_->disabled_background_tasks_;
}
TestPlatform::DisableBackgroundTasksScope::~DisableBackgroundTasksScope()
V8_NOEXCEPT {
--platform_->disabled_background_tasks_;
}
} // namespace testing
} // namespace internal
} // namespace cppgc
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_UNITTESTS_HEAP_CPPGC_TEST_PLATFORM_H_
#define V8_UNITTESTS_HEAP_CPPGC_TEST_PLATFORM_H_
#include <memory>
#include <vector>
#include "include/cppgc/platform.h"
#include "src/base/page-allocator.h"
#include "src/base/platform/platform.h"
namespace cppgc {
namespace internal {
namespace testing {
class TestTaskRunner : public v8::TaskRunner {
public:
void PostTask(std::unique_ptr<v8::Task> task) override;
void PostNonNestableTask(std::unique_ptr<v8::Task> task) override;
void PostDelayedTask(std::unique_ptr<v8::Task> task, double) override;
void PostNonNestableDelayedTask(std::unique_ptr<v8::Task> task,
double) override;
void PostIdleTask(std::unique_ptr<v8::IdleTask> task) override;
bool IdleTasksEnabled() override { return true; }
bool RunSingleTask();
bool RunSingleIdleTask(double duration_in_seconds);
void RunUntilIdle();
private:
std::vector<std::unique_ptr<v8::Task>> tasks_;
std::vector<std::unique_ptr<v8::IdleTask>> idle_tasks_;
};
class TestPlatform : public Platform {
public:
class DisableBackgroundTasksScope {
public:
explicit DisableBackgroundTasksScope(TestPlatform*);
~DisableBackgroundTasksScope() V8_NOEXCEPT;
private:
TestPlatform* platform_;
};
TestPlatform();
~TestPlatform() V8_NOEXCEPT override;
PageAllocator* GetPageAllocator() override { return &page_allocator_; }
int NumberOfWorkerThreads() override {
return static_cast<int>(threads_.size());
}
std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
v8::Isolate*) override {
return foreground_task_runner_;
}
void CallOnWorkerThread(std::unique_ptr<v8::Task> task) override;
void CallDelayedOnWorkerThread(std::unique_ptr<v8::Task> task,
double) override;
bool IdleTasksEnabled(v8::Isolate*) override { return true; }
std::unique_ptr<v8::JobHandle> PostJob(
v8::TaskPriority, std::unique_ptr<v8::JobTask> job_task) override;
double MonotonicallyIncreasingTime() override;
double CurrentClockTimeMillis() override;
v8::TracingController* GetTracingController() override {
return &tracing_controller_;
}
void WaitAllForegroundTasks();
void WaitAllBackgroundTasks();
private:
class TestJobHandle;
class WorkerThread : public v8::base::Thread {
public:
explicit WorkerThread(std::unique_ptr<v8::Task> task)
: Thread(Options("worker")), task_(std::move(task)) {}
void Run() override {
if (task_) std::move(task_)->Run();
}
private:
std::unique_ptr<v8::Task> task_;
};
class JobThread : public v8::base::Thread {
public:
explicit JobThread(std::unique_ptr<v8::JobTask> task)
: Thread(Options("job")), task_(std::move(task)) {}
void Run() override {
class JobDelegate : public v8::JobDelegate {
public:
bool ShouldYield() override { return false; }
void NotifyConcurrencyIncrease() override {}
} delegate;
if (task_) task_->Run(&delegate);
}
private:
std::unique_ptr<v8::JobTask> task_;
};
bool AreBackgroundTasksDisabled() const {
return disabled_background_tasks_ > 0;
}
v8::base::PageAllocator page_allocator_;
std::shared_ptr<TestTaskRunner> foreground_task_runner_;
std::vector<std::unique_ptr<WorkerThread>> threads_;
std::vector<std::shared_ptr<JobThread>> job_threads_;
v8::TracingController tracing_controller_;
size_t disabled_background_tasks_ = 0;
};
} // namespace testing
} // namespace internal
} // namespace cppgc
#endif // V8_UNITTESTS_HEAP_CPPGC_TEST_PLATFORM_H_
......@@ -6,23 +6,25 @@
#include <memory>
#include "test/unittests/heap/cppgc/test-platform.h"
namespace cppgc {
namespace internal {
namespace testing {
// static
std::unique_ptr<cppgc::PageAllocator> TestWithPlatform::page_allocator_;
std::unique_ptr<TestPlatform> TestWithPlatform::platform_;
// static
void TestWithPlatform::SetUpTestSuite() {
page_allocator_ = std::make_unique<v8::base::PageAllocator>();
cppgc::InitializePlatform(page_allocator_.get());
platform_ = std::make_unique<TestPlatform>();
cppgc::InitializePlatform(platform_.get());
}
// static
void TestWithPlatform::TearDownTestSuite() {
cppgc::ShutdownPlatform();
page_allocator_.reset();
platform_.reset();
}
TestWithHeap::TestWithHeap() : heap_(Heap::Create()) {}
......
......@@ -7,8 +7,8 @@
#include "include/cppgc/heap.h"
#include "include/cppgc/platform.h"
#include "src/base/page-allocator.h"
#include "src/heap/cppgc/heap.h"
#include "test/unittests/heap/cppgc/test-platform.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace cppgc {
......@@ -20,8 +20,10 @@ class TestWithPlatform : public ::testing::Test {
static void SetUpTestSuite();
static void TearDownTestSuite();
TestPlatform& GetPlatform() const { return *platform_; }
private:
static std::unique_ptr<cppgc::PageAllocator> page_allocator_;
static std::unique_ptr<TestPlatform> platform_;
};
class TestWithHeap : public TestWithPlatform {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment