Commit 72d609e0 authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Allow background threads to start incremental marking

Background threads can now start incremental marking when necessary. In
contrast to the main thread they always need to schedule a job and can't
start incremental marking right away. Background threads also use a
simpler heuristic for deciding whether to start incremental marking.

Bug: v8:10315
Change-Id: I2b94e8273c8be860157fe9670797048ed1c5c3da
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2184149Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67675}
parent 16ff5f83
......@@ -75,7 +75,7 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
local_heap_->heap(), AllocationResult(object), result->second);
DCHECK(lab_.IsValid());
if (!lab_.TryMerge(&saved_lab)) {
saved_lab.CloseWithFiller();
saved_lab.CloseAndMakeIterable();
}
return true;
}
......
......@@ -31,7 +31,13 @@ Address ConcurrentAllocator::PerformCollectionAndAllocateAgain(
heap->FatalProcessOutOfMemory("ConcurrentAllocator: allocation failed");
}
void ConcurrentAllocator::FreeLinearAllocationArea() { lab_.CloseWithFiller(); }
void ConcurrentAllocator::FreeLinearAllocationArea() {
lab_.CloseAndMakeIterable();
}
void ConcurrentAllocator::MakeLinearAllocationAreaIterable() {
lab_.MakeIterable();
}
} // namespace internal
} // namespace v8
......@@ -35,6 +35,7 @@ class ConcurrentAllocator {
AllocationOrigin origin);
void FreeLinearAllocationArea();
void MakeLinearAllocationAreaIterable();
private:
inline bool EnsureLab(AllocationOrigin origin);
......
......@@ -1138,15 +1138,6 @@ void Heap::GarbageCollectionEpilogueInSafepoint() {
#undef UPDATE_COUNTERS_FOR_SPACE
#undef UPDATE_FRAGMENTATION_FOR_SPACE
#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
}
void Heap::GarbageCollectionEpilogue() {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
ZapFromSpace();
}
AllowHeapAllocation for_the_rest_of_the_epilogue;
#ifdef DEBUG
// Old-to-new slot sets must be empty after each collection.
......@@ -1164,6 +1155,15 @@ void Heap::GarbageCollectionEpilogue() {
if (FLAG_code_stats) ReportCodeStatistics("After GC");
if (FLAG_check_handle_count) CheckHandleCount();
#endif
}
void Heap::GarbageCollectionEpilogue() {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
ZapFromSpace();
}
AllowHeapAllocation for_the_rest_of_the_epilogue;
UpdateMaximumCommitted();
......@@ -1668,6 +1668,7 @@ void Heap::StartIncrementalMarking(int gc_flags,
GarbageCollectionReason gc_reason,
GCCallbackFlags gc_callback_flags) {
DCHECK(incremental_marking()->IsStopped());
SafepointScope safepoint(this);
set_current_gc_flags(gc_flags);
current_gc_callback_flags_ = gc_callback_flags;
incremental_marking()->Start(gc_reason);
......@@ -1690,6 +1691,21 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
}
}
void Heap::StartIncrementalMarkingIfAllocationLimitIsReachedBackground() {
if (!incremental_marking()->IsStopped() ||
!incremental_marking()->CanBeActivated()) {
return;
}
const size_t old_generation_space_available = OldGenerationSpaceAvailable();
const size_t global_memory_available = GlobalMemoryAvailable();
if (old_generation_space_available < new_space_->Capacity() ||
global_memory_available < new_space_->Capacity()) {
incremental_marking()->incremental_marking_job()->ScheduleTask(this);
}
}
void Heap::StartIdleIncrementalMarking(
GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags) {
......@@ -3551,6 +3567,7 @@ void Heap::FinalizeIncrementalMarkingIncrementally(
TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
SafepointScope safepoint(this);
InvokeIncrementalMarkingPrologueCallbacks();
incremental_marking()->FinalizeIncrementally();
InvokeIncrementalMarkingEpilogueCallbacks();
......@@ -4392,6 +4409,13 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
#ifdef DEBUG
void Heap::VerifyCountersAfterSweeping() {
if (FLAG_local_heaps) {
// Ensure heap is iterable
safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->MakeLinearAllocationAreaIterable();
});
}
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
......@@ -5503,12 +5527,8 @@ void Heap::StartTearDown() {
// a good time to run heap verification (if requested), before starting to
// tear down parts of the Isolate.
if (FLAG_verify_heap) {
if (FLAG_local_heaps) {
SafepointScope scope(this);
Verify();
} else {
Verify();
}
SafepointScope scope(this);
Verify();
}
#endif
}
......
......@@ -983,6 +983,7 @@ class Heap {
void StartIncrementalMarkingIfAllocationLimitIsReached(
int gc_flags,
GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
void StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
// Synchronously finalizes incremental marking.
......
......@@ -4,6 +4,7 @@
#include "src/heap/incremental-marking-job.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/time.h"
#include "src/execution/isolate.h"
#include "src/execution/vm-state-inl.h"
......@@ -47,6 +48,8 @@ void IncrementalMarkingJob::Start(Heap* heap) {
}
void IncrementalMarkingJob::ScheduleTask(Heap* heap, TaskType task_type) {
base::MutexGuard guard(&mutex_);
if (!IsTaskPending(task_type) && !heap->IsTearingDown() &&
FLAG_incremental_marking_task) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
......@@ -112,8 +115,11 @@ void IncrementalMarkingJob::Task::RunInternal() {
}
// Clear this flag after StartIncrementalMarking call to avoid
// scheduling a new task when startining incremental marking.
job_->SetTaskPending(task_type_, false);
// scheduling a new task when starting incremental marking.
{
base::MutexGuard guard(&job_->mutex_);
job_->SetTaskPending(task_type_, false);
}
if (!incremental_marking->IsStopped()) {
StepResult step_result = Step(heap);
......
......@@ -28,15 +28,15 @@ class IncrementalMarkingJob final {
double CurrentTimeToTask(Heap* heap) const;
private:
class Task;
static constexpr double kDelayInSeconds = 10.0 / 1000.0;
bool IsTaskPending(TaskType task_type) const {
return task_type == TaskType::kNormal ? normal_task_pending_
: delayed_task_pending_;
}
private:
class Task;
static constexpr double kDelayInSeconds = 10.0 / 1000.0;
void SetTaskPending(TaskType task_type, bool value) {
if (task_type == TaskType::kNormal) {
normal_task_pending_ = value;
......@@ -45,6 +45,7 @@ class IncrementalMarkingJob final {
}
}
base::Mutex mutex_;
double scheduled_time_ = 0.0;
bool normal_task_pending_ = false;
bool delayed_task_pending_ = false;
......
......@@ -413,10 +413,8 @@ void IncrementalMarking::MarkRoots() {
DCHECK(!finalize_marking_completed_);
DCHECK(IsMarking());
if (FLAG_local_heaps) heap_->safepoint()->Start();
IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG_IGNORE_STACK);
if (FLAG_local_heaps) heap_->safepoint()->End();
}
bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
......@@ -959,6 +957,11 @@ StepResult IncrementalMarking::AdvanceWithDeadline(
void IncrementalMarking::FinalizeSweeping() {
DCHECK(state_ == SWEEPING);
#ifdef DEBUG
// Enforce safepoint here such that background threads cannot allocate between
// completing sweeping and VerifyCountersAfterSweeping().
SafepointScope scope(heap());
#endif
if (collector_->sweeping_in_progress() &&
(!FLAG_concurrent_sweeping ||
!collector_->sweeper()->AreSweeperTasksRunning())) {
......@@ -967,6 +970,8 @@ void IncrementalMarking::FinalizeSweeping() {
if (!collector_->sweeping_in_progress()) {
#ifdef DEBUG
heap_->VerifyCountersAfterSweeping();
#else
SafepointScope scope(heap());
#endif
StartMarking();
}
......
......@@ -28,7 +28,7 @@ enum class StepResult {
class V8_EXPORT_PRIVATE IncrementalMarking final {
public:
enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
enum State : uint8_t { STOPPED, SWEEPING, MARKING, COMPLETE };
enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
......@@ -317,7 +317,10 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
size_t bytes_marked_concurrently_ = 0;
// Must use SetState() above to update state_
State state_;
// Atomic since main thread can complete marking (= changing state), while a
// background thread's slow allocation path will check whether incremental
// marking is currently running.
std::atomic<State> state_;
bool is_compacting_ = false;
bool was_activated_ = false;
......
......@@ -93,7 +93,7 @@ bool EvacuationAllocator::NewLocalAllocationBuffer() {
new_space_lab_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
DCHECK(new_space_lab_.IsValid());
if (!new_space_lab_.TryMerge(&saved_lab)) {
saved_lab.CloseWithFiller();
saved_lab.CloseAndMakeIterable();
}
return true;
}
......
......@@ -33,7 +33,7 @@ class EvacuationAllocator {
heap_->code_space()->MergeLocalSpace(compaction_spaces_.Get(CODE_SPACE));
// Give back remaining LAB space if this EvacuationAllocator's new space LAB
// sits right next to new space allocation top.
const LinearAllocationArea info = new_space_lab_.CloseWithFiller();
const LinearAllocationArea info = new_space_lab_.CloseAndMakeIterable();
const Address top = new_space_->top();
if (info.limit() != kNullAddress && info.limit() == top) {
DCHECK_NE(info.top(), kNullAddress);
......
......@@ -103,5 +103,9 @@ void LocalHeap::FreeLinearAllocationArea() {
old_space_allocator_.FreeLinearAllocationArea();
}
void LocalHeap::MakeLinearAllocationAreaIterable() {
old_space_allocator_.MakeLinearAllocationAreaIterable();
}
} // namespace internal
} // namespace v8
......@@ -69,6 +69,7 @@ class LocalHeap {
void EnterSafepoint();
void FreeLinearAllocationArea();
void MakeLinearAllocationAreaIterable();
Heap* heap_;
......
......@@ -90,10 +90,12 @@ void GlobalSafepoint::Barrier::Wait() {
}
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
safepoint_->StopThreads();
if (FLAG_local_heaps) safepoint_->StopThreads();
}
SafepointScope::~SafepointScope() { safepoint_->ResumeThreads(); }
SafepointScope::~SafepointScope() {
if (FLAG_local_heaps) safepoint_->ResumeThreads();
}
void GlobalSafepoint::AddLocalHeap(LocalHeap* local_heap) {
base::MutexGuard guard(&local_heaps_mutex_);
......
......@@ -2042,7 +2042,7 @@ PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
Page* page = Page::FromHeapObject(new_node);
IncreaseAllocatedBytes(new_node_size, page);
// TODO(dinfuehr): Start incremental marking if allocation limit is reached
heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
size_t used_size_in_bytes = Min(new_node_size, max_size_in_bytes);
......@@ -2356,12 +2356,9 @@ bool SemiSpace::EnsureCurrentCapacity() {
return true;
}
LinearAllocationArea LocalAllocationBuffer::CloseWithFiller() {
LinearAllocationArea LocalAllocationBuffer::CloseAndMakeIterable() {
if (IsValid()) {
heap_->CreateFillerObjectAt(
allocation_info_.top(),
static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
ClearRecordedSlots::kNo);
MakeIterable();
const LinearAllocationArea old_info = allocation_info_;
allocation_info_ = LinearAllocationArea(kNullAddress, kNullAddress);
return old_info;
......@@ -2369,6 +2366,15 @@ LinearAllocationArea LocalAllocationBuffer::CloseWithFiller() {
return LinearAllocationArea(kNullAddress, kNullAddress);
}
void LocalAllocationBuffer::MakeIterable() {
if (IsValid()) {
heap_->CreateFillerObjectAt(
allocation_info_.top(),
static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
ClearRecordedSlots::kNo);
}
}
LocalAllocationBuffer::LocalAllocationBuffer(
Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT
: heap_(heap),
......
......@@ -1715,7 +1715,7 @@ class LocalAllocationBuffer {
AllocationResult result,
intptr_t size);
~LocalAllocationBuffer() { CloseWithFiller(); }
~LocalAllocationBuffer() { CloseAndMakeIterable(); }
LocalAllocationBuffer(const LocalAllocationBuffer& other) = delete;
V8_EXPORT_PRIVATE LocalAllocationBuffer(LocalAllocationBuffer&& other)
......@@ -1737,7 +1737,8 @@ class LocalAllocationBuffer {
inline bool TryFreeLast(HeapObject object, int object_size);
// Close a LAB, effectively invalidating it. Returns the unused area.
V8_EXPORT_PRIVATE LinearAllocationArea CloseWithFiller();
V8_EXPORT_PRIVATE LinearAllocationArea CloseAndMakeIterable();
void MakeIterable();
private:
V8_EXPORT_PRIVATE LocalAllocationBuffer(
......
......@@ -204,7 +204,9 @@ class Sweeper {
SweptList swept_list_[kNumberOfSweepingSpaces];
SweepingList sweeping_list_[kNumberOfSweepingSpaces];
bool incremental_sweeper_pending_;
bool sweeping_in_progress_;
// Main thread can finalize sweeping, while background threads allocation slow
// path checks this flag to see whether it could support concurrent sweeping.
std::atomic<bool> sweeping_in_progress_;
// Counter is actively maintained by the concurrent tasks to avoid querying
// the semaphore for maintaining a task counter on the main thread.
std::atomic<intptr_t> num_sweeping_tasks_;
......
......@@ -49,6 +49,9 @@ class ConcurrentAllocationThread final : public v8::base::Thread {
AllocationOrigin::kRuntime);
heap_->CreateFillerObjectAt(address, kLargeObjectSize,
ClearRecordedSlots::kNo);
if (i % 10 == 0) {
local_heap.Safepoint();
}
}
pending_->fetch_sub(1);
......@@ -59,7 +62,7 @@ class ConcurrentAllocationThread final : public v8::base::Thread {
};
UNINITIALIZED_TEST(ConcurrentAllocationInOldSpace) {
FLAG_max_old_space_size = 8;
FLAG_max_old_space_size = 32;
FLAG_concurrent_allocation = true;
v8::Isolate::CreateParams create_params;
......
......@@ -17,6 +17,7 @@ using SafepointTest = TestWithIsolate;
TEST_F(SafepointTest, ReachSafepointWithoutLocalHeaps) {
Heap* heap = i_isolate()->heap();
FLAG_local_heaps = true;
bool run = false;
{
SafepointScope scope(heap);
......@@ -47,6 +48,7 @@ class ParkedThread final : public v8::base::Thread {
TEST_F(SafepointTest, StopParkedThreads) {
Heap* heap = i_isolate()->heap();
FLAG_local_heaps = true;
int safepoints = 0;
......@@ -105,6 +107,7 @@ class RunningThread final : public v8::base::Thread {
TEST_F(SafepointTest, StopRunningThreads) {
Heap* heap = i_isolate()->heap();
FLAG_local_heaps = true;
const int kThreads = 10;
const int kRuns = 5;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment