Commit 5bbca548 authored by Dominik Inführ's avatar Dominik Inführ Committed by Commit Bot

[heap] Allow background threads to request GC

When a background thread fails to allocate, it requests a GC and
retries the allocation afterwards. Make second allocation more likely
to succeed by allowing those allocations to expand the old space.

TLABs of LocalHeaps also need to be invalidated before the GC.

Bug: v8:10315
Change-Id: Idaea2c4ee25642d508c72ae274b06d60c6e225e0
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2154193
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67496}
parent d29b2f81
...@@ -2392,6 +2392,7 @@ v8_source_set("v8_base_without_compiler") { ...@@ -2392,6 +2392,7 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/combined-heap.cc", "src/heap/combined-heap.cc",
"src/heap/combined-heap.h", "src/heap/combined-heap.h",
"src/heap/concurrent-allocator-inl.h", "src/heap/concurrent-allocator-inl.h",
"src/heap/concurrent-allocator.cc",
"src/heap/concurrent-allocator.h", "src/heap/concurrent-allocator.h",
"src/heap/concurrent-marking.cc", "src/heap/concurrent-marking.cc",
"src/heap/concurrent-marking.h", "src/heap/concurrent-marking.h",
......
...@@ -915,6 +915,7 @@ DEFINE_BOOL(concurrent_array_buffer_sweeping, true, ...@@ -915,6 +915,7 @@ DEFINE_BOOL(concurrent_array_buffer_sweeping, true,
"concurrently sweep array buffers") "concurrently sweep array buffers")
DEFINE_BOOL(concurrent_allocation, false, "concurrently allocate in old space") DEFINE_BOOL(concurrent_allocation, false, "concurrently allocate in old space")
DEFINE_BOOL(local_heaps, false, "allow heap access from background tasks") DEFINE_BOOL(local_heaps, false, "allow heap access from background tasks")
DEFINE_NEG_NEG_IMPLICATION(array_buffer_extension, local_heaps)
DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause") DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause")
DEFINE_INT(ephemeron_fixpoint_iterations, 10, DEFINE_INT(ephemeron_fixpoint_iterations, 10,
"number of fixpoint iterations it takes to switch to linear " "number of fixpoint iterations it takes to switch to linear "
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#ifndef V8_HEAP_CONCURRENT_ALLOCATOR_INL_H_ #ifndef V8_HEAP_CONCURRENT_ALLOCATOR_INL_H_
#define V8_HEAP_CONCURRENT_ALLOCATOR_INL_H_ #define V8_HEAP_CONCURRENT_ALLOCATOR_INL_H_
#include "include/v8-internal.h"
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/heap/concurrent-allocator.h" #include "src/heap/concurrent-allocator.h"
...@@ -23,7 +24,7 @@ AllocationResult ConcurrentAllocator::Allocate(int object_size, ...@@ -23,7 +24,7 @@ AllocationResult ConcurrentAllocator::Allocate(int object_size,
CHECK(FLAG_concurrent_allocation); CHECK(FLAG_concurrent_allocation);
if (object_size > kMaxLabObjectSize) { if (object_size > kMaxLabObjectSize) {
auto result = space_->SlowGetLinearAllocationAreaBackground( auto result = space_->SlowGetLinearAllocationAreaBackground(
object_size, object_size, alignment, origin); local_heap_, object_size, object_size, alignment, origin);
if (result) { if (result) {
HeapObject object = HeapObject::FromAddress(result->first); HeapObject object = HeapObject::FromAddress(result->first);
...@@ -36,6 +37,14 @@ AllocationResult ConcurrentAllocator::Allocate(int object_size, ...@@ -36,6 +37,14 @@ AllocationResult ConcurrentAllocator::Allocate(int object_size,
return AllocateInLab(object_size, alignment, origin); return AllocateInLab(object_size, alignment, origin);
} }
Address ConcurrentAllocator::AllocateOrFail(int object_size,
AllocationAlignment alignment,
AllocationOrigin origin) {
AllocationResult result = Allocate(object_size, alignment, origin);
if (!result.IsRetry()) return result.ToObjectChecked().address();
return PerformCollectionAndAllocateAgain(object_size, alignment, origin);
}
AllocationResult ConcurrentAllocator::AllocateInLab( AllocationResult ConcurrentAllocator::AllocateInLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) { int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
AllocationResult allocation; AllocationResult allocation;
...@@ -56,7 +65,7 @@ AllocationResult ConcurrentAllocator::AllocateInLab( ...@@ -56,7 +65,7 @@ AllocationResult ConcurrentAllocator::AllocateInLab(
bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) { bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
auto result = space_->SlowGetLinearAllocationAreaBackground( auto result = space_->SlowGetLinearAllocationAreaBackground(
kLabSize, kMaxLabSize, kWordAligned, origin); local_heap_, kLabSize, kMaxLabSize, kWordAligned, origin);
if (!result) return false; if (!result) return false;
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/concurrent-allocator.h"
#include "src/heap/concurrent-allocator-inl.h"
#include "src/heap/local-heap.h"
namespace v8 {
namespace internal {
Address ConcurrentAllocator::PerformCollectionAndAllocateAgain(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
Heap* heap = local_heap_->heap();
local_heap_->allocation_failed_ = true;
for (int i = 0; i < 3; i++) {
{
ParkedScope scope(local_heap_);
heap->RequestAndWaitForCollection();
}
AllocationResult result = Allocate(object_size, alignment, origin);
if (!result.IsRetry()) {
local_heap_->allocation_failed_ = false;
return result.ToObjectChecked().address();
}
}
heap->FatalProcessOutOfMemory("ConcurrentAllocator: allocation failed");
}
void ConcurrentAllocator::FreeLinearAllocationArea() { lab_.CloseWithFiller(); }
} // namespace internal
} // namespace v8
...@@ -31,12 +31,20 @@ class ConcurrentAllocator { ...@@ -31,12 +31,20 @@ class ConcurrentAllocator {
AllocationAlignment alignment, AllocationAlignment alignment,
AllocationOrigin origin); AllocationOrigin origin);
inline Address AllocateOrFail(int object_size, AllocationAlignment alignment,
AllocationOrigin origin);
void FreeLinearAllocationArea();
private: private:
inline bool EnsureLab(AllocationOrigin origin); inline bool EnsureLab(AllocationOrigin origin);
inline AllocationResult AllocateInLab(int object_size, inline AllocationResult AllocateInLab(int object_size,
AllocationAlignment alignment, AllocationAlignment alignment,
AllocationOrigin origin); AllocationOrigin origin);
V8_EXPORT_PRIVATE Address PerformCollectionAndAllocateAgain(
int object_size, AllocationAlignment alignment, AllocationOrigin origin);
LocalHeap* const local_heap_; LocalHeap* const local_heap_;
PagedSpace* const space_; PagedSpace* const space_;
LocalAllocationBuffer lab_; LocalAllocationBuffer lab_;
......
...@@ -253,11 +253,10 @@ void GCTracer::Start(GarbageCollector collector, ...@@ -253,11 +253,10 @@ void GCTracer::Start(GarbageCollector collector,
current_.reduce_memory = heap_->ShouldReduceMemory(); current_.reduce_memory = heap_->ShouldReduceMemory();
current_.start_time = start_time; current_.start_time = start_time;
current_.start_object_size = heap_->SizeOfObjects(); current_.start_object_size = 0;
current_.start_memory_size = heap_->memory_allocator()->Size(); current_.start_memory_size = 0;
current_.start_holes_size = CountTotalHolesSize(heap_); current_.start_holes_size = 0;
current_.young_object_size = current_.young_object_size = 0;
heap_->new_space()->Size() + heap_->new_lo_space()->SizeOfObjects();
current_.incremental_marking_bytes = 0; current_.incremental_marking_bytes = 0;
current_.incremental_marking_duration = 0; current_.incremental_marking_duration = 0;
...@@ -281,6 +280,14 @@ void GCTracer::Start(GarbageCollector collector, ...@@ -281,6 +280,14 @@ void GCTracer::Start(GarbageCollector collector,
} }
} }
void GCTracer::StartInSafepoint() {
current_.start_object_size = heap_->SizeOfObjects();
current_.start_memory_size = heap_->memory_allocator()->Size();
current_.start_holes_size = CountTotalHolesSize(heap_);
current_.young_object_size =
heap_->new_space()->Size() + heap_->new_lo_space()->SizeOfObjects();
}
void GCTracer::ResetIncrementalMarkingCounters() { void GCTracer::ResetIncrementalMarkingCounters() {
incremental_marking_bytes_ = 0; incremental_marking_bytes_ = 0;
incremental_marking_duration_ = 0; incremental_marking_duration_ = 0;
...@@ -289,6 +296,13 @@ void GCTracer::ResetIncrementalMarkingCounters() { ...@@ -289,6 +296,13 @@ void GCTracer::ResetIncrementalMarkingCounters() {
} }
} }
void GCTracer::StopInSafepoint() {
current_.end_object_size = heap_->SizeOfObjects();
current_.end_memory_size = heap_->memory_allocator()->Size();
current_.end_holes_size = CountTotalHolesSize(heap_);
current_.survived_young_object_size = heap_->SurvivedYoungObjectSize();
}
void GCTracer::Stop(GarbageCollector collector) { void GCTracer::Stop(GarbageCollector collector) {
start_counter_--; start_counter_--;
if (start_counter_ != 0) { if (start_counter_ != 0) {
...@@ -309,10 +323,6 @@ void GCTracer::Stop(GarbageCollector collector) { ...@@ -309,10 +323,6 @@ void GCTracer::Stop(GarbageCollector collector) {
current_.type == Event::INCREMENTAL_MARK_COMPACTOR))); current_.type == Event::INCREMENTAL_MARK_COMPACTOR)));
current_.end_time = heap_->MonotonicallyIncreasingTimeInMs(); current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
current_.end_object_size = heap_->SizeOfObjects();
current_.end_memory_size = heap_->memory_allocator()->Size();
current_.end_holes_size = CountTotalHolesSize(heap_);
current_.survived_young_object_size = heap_->SurvivedYoungObjectSize();
AddAllocation(current_.end_time); AddAllocation(current_.end_time);
......
...@@ -219,9 +219,11 @@ class V8_EXPORT_PRIVATE GCTracer { ...@@ -219,9 +219,11 @@ class V8_EXPORT_PRIVATE GCTracer {
// Start collecting data. // Start collecting data.
void Start(GarbageCollector collector, GarbageCollectionReason gc_reason, void Start(GarbageCollector collector, GarbageCollectionReason gc_reason,
const char* collector_reason); const char* collector_reason);
void StartInSafepoint();
// Stop collecting data and print results. // Stop collecting data and print results.
void Stop(GarbageCollector collector); void Stop(GarbageCollector collector);
void StopInSafepoint();
void NotifySweepingCompleted(); void NotifySweepingCompleted();
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/base/flags.h" #include "src/base/flags.h"
#include "src/base/once.h" #include "src/base/once.h"
#include "src/base/platform/mutex.h"
#include "src/base/utils/random-number-generator.h" #include "src/base/utils/random-number-generator.h"
#include "src/builtins/accessors.h" #include "src/builtins/accessors.h"
#include "src/codegen/assembler-inl.h" #include "src/codegen/assembler-inl.h"
...@@ -204,7 +205,8 @@ Heap::Heap() ...@@ -204,7 +205,8 @@ Heap::Heap()
memory_pressure_level_(MemoryPressureLevel::kNone), memory_pressure_level_(MemoryPressureLevel::kNone),
global_pretenuring_feedback_(kInitialFeedbackCapacity), global_pretenuring_feedback_(kInitialFeedbackCapacity),
safepoint_(new GlobalSafepoint(this)), safepoint_(new GlobalSafepoint(this)),
external_string_table_(this) { external_string_table_(this),
collection_barrier_(this) {
// Ensure old_generation_size_ is a multiple of kPageSize. // Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1)); DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
...@@ -1109,6 +1111,33 @@ void Heap::DeoptMarkedAllocationSites() { ...@@ -1109,6 +1111,33 @@ void Heap::DeoptMarkedAllocationSites() {
Deoptimizer::DeoptimizeMarkedCode(isolate_); Deoptimizer::DeoptimizeMarkedCode(isolate_);
} }
void Heap::GarbageCollectionEpilogueInSafepoint() {
#define UPDATE_COUNTERS_FOR_SPACE(space) \
isolate_->counters()->space##_bytes_available()->Set( \
static_cast<int>(space()->Available())); \
isolate_->counters()->space##_bytes_committed()->Set( \
static_cast<int>(space()->CommittedMemory())); \
isolate_->counters()->space##_bytes_used()->Set( \
static_cast<int>(space()->SizeOfObjects()));
#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
if (space()->CommittedMemory() > 0) { \
isolate_->counters()->external_fragmentation_##space()->AddSample( \
static_cast<int>(100 - (space()->SizeOfObjects() * 100.0) / \
space()->CommittedMemory())); \
}
#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
UPDATE_COUNTERS_FOR_SPACE(space) \
UPDATE_FRAGMENTATION_FOR_SPACE(space)
UPDATE_COUNTERS_FOR_SPACE(new_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
#undef UPDATE_COUNTERS_FOR_SPACE
#undef UPDATE_FRAGMENTATION_FOR_SPACE
#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
}
void Heap::GarbageCollectionEpilogue() { void Heap::GarbageCollectionEpilogue() {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE); TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
...@@ -1161,33 +1190,6 @@ void Heap::GarbageCollectionEpilogue() { ...@@ -1161,33 +1190,6 @@ void Heap::GarbageCollectionEpilogue() {
static_cast<int>(MaximumCommittedMemory() / KB)); static_cast<int>(MaximumCommittedMemory() / KB));
} }
#define UPDATE_COUNTERS_FOR_SPACE(space) \
isolate_->counters()->space##_bytes_available()->Set( \
static_cast<int>(space()->Available())); \
isolate_->counters()->space##_bytes_committed()->Set( \
static_cast<int>(space()->CommittedMemory())); \
isolate_->counters()->space##_bytes_used()->Set( \
static_cast<int>(space()->SizeOfObjects()));
#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
if (space()->CommittedMemory() > 0) { \
isolate_->counters()->external_fragmentation_##space()->AddSample( \
static_cast<int>(100 - \
(space()->SizeOfObjects() * 100.0) / \
space()->CommittedMemory())); \
}
#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
UPDATE_COUNTERS_FOR_SPACE(space) \
UPDATE_FRAGMENTATION_FOR_SPACE(space)
UPDATE_COUNTERS_FOR_SPACE(new_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
#undef UPDATE_COUNTERS_FOR_SPACE
#undef UPDATE_FRAGMENTATION_FOR_SPACE
#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
#ifdef DEBUG #ifdef DEBUG
ReportStatisticsAfterGC(); ReportStatisticsAfterGC();
#endif // DEBUG #endif // DEBUG
...@@ -1628,6 +1630,8 @@ bool Heap::CollectGarbage(AllocationSpace space, ...@@ -1628,6 +1630,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
isolate()->CountUsage(v8::Isolate::kForcedGC); isolate()->CountUsage(v8::Isolate::kForcedGC);
} }
collection_barrier_.Increment();
// Start incremental marking for the next cycle. We do this only for scavenger // Start incremental marking for the next cycle. We do this only for scavenger
// to avoid a loop where mark-compact causes another mark-compact. // to avoid a loop where mark-compact causes another mark-compact.
if (IsYoungGenerationCollector(collector)) { if (IsYoungGenerationCollector(collector)) {
...@@ -1942,6 +1946,26 @@ void Heap::EnsureFromSpaceIsCommitted() { ...@@ -1942,6 +1946,26 @@ void Heap::EnsureFromSpaceIsCommitted() {
FatalProcessOutOfMemory("Committing semi space failed."); FatalProcessOutOfMemory("Committing semi space failed.");
} }
void Heap::CollectionBarrier::Increment() {
base::MutexGuard guard(&mutex_);
requested_ = false;
cond_.NotifyAll();
}
void Heap::CollectionBarrier::Wait() {
base::MutexGuard guard(&mutex_);
if (!requested_) {
heap_->MemoryPressureNotification(MemoryPressureLevel::kCritical, false);
requested_ = true;
}
while (requested_) {
cond_.Wait(&mutex_);
}
}
void Heap::RequestAndWaitForCollection() { collection_barrier_.Wait(); }
void Heap::UpdateSurvivalStatistics(int start_new_space_size) { void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return; if (start_new_space_size == 0) return;
...@@ -2005,7 +2029,16 @@ bool Heap::PerformGarbageCollection( ...@@ -2005,7 +2029,16 @@ bool Heap::PerformGarbageCollection(
} }
} }
if (FLAG_local_heaps) safepoint()->Start(); if (FLAG_local_heaps) {
safepoint()->Start();
// Fill and reset all LABs
safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
}
tracer()->StartInSafepoint();
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
if (FLAG_verify_heap) { if (FLAG_verify_heap) {
Verify(); Verify();
...@@ -2094,6 +2127,10 @@ bool Heap::PerformGarbageCollection( ...@@ -2094,6 +2127,10 @@ bool Heap::PerformGarbageCollection(
RecomputeLimits(collector); RecomputeLimits(collector);
GarbageCollectionEpilogueInSafepoint();
tracer()->StopInSafepoint();
if (FLAG_local_heaps) safepoint()->End(); if (FLAG_local_heaps) safepoint()->End();
{ {
...@@ -3845,12 +3882,17 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level, ...@@ -3845,12 +3882,17 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
} }
void Heap::EagerlyFreeExternalMemory() { void Heap::EagerlyFreeExternalMemory() {
for (Page* page : *old_space()) { if (FLAG_array_buffer_extension) {
if (!page->SweepingDone()) { array_buffer_sweeper()->EnsureFinished();
base::MutexGuard guard(page->mutex()); } else {
CHECK(!FLAG_local_heaps);
for (Page* page : *old_space()) {
if (!page->SweepingDone()) { if (!page->SweepingDone()) {
ArrayBufferTracker::FreeDead( base::MutexGuard guard(page->mutex());
page, mark_compact_collector()->non_atomic_marking_state()); if (!page->SweepingDone()) {
ArrayBufferTracker::FreeDead(
page, mark_compact_collector()->non_atomic_marking_state());
}
} }
} }
} }
...@@ -4899,10 +4941,13 @@ bool Heap::ShouldOptimizeForLoadTime() { ...@@ -4899,10 +4941,13 @@ bool Heap::ShouldOptimizeForLoadTime() {
// major GC. It happens when the old generation allocation limit is reached and // major GC. It happens when the old generation allocation limit is reached and
// - either we need to optimize for memory usage, // - either we need to optimize for memory usage,
// - or the incremental marking is not in progress and we cannot start it. // - or the incremental marking is not in progress and we cannot start it.
bool Heap::ShouldExpandOldGenerationOnSlowAllocation() { bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) {
if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true; if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
// We reached the old generation allocation limit. // We reached the old generation allocation limit.
// Ensure that retry of allocation on background thread succeeds
if (IsRetryOfFailedAllocation(local_heap)) return true;
if (ShouldOptimizeForMemoryUsage()) return false; if (ShouldOptimizeForMemoryUsage()) return false;
if (ShouldOptimizeForLoadTime()) return true; if (ShouldOptimizeForLoadTime()) return true;
...@@ -4919,6 +4964,11 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation() { ...@@ -4919,6 +4964,11 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
return true; return true;
} }
bool Heap::IsRetryOfFailedAllocation(LocalHeap* local_heap) {
if (!local_heap) return false;
return local_heap->allocation_failed_;
}
Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() { Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
if (ShouldReduceMemory() || FLAG_stress_compaction) { if (ShouldReduceMemory() || FLAG_stress_compaction) {
return Heap::HeapGrowingMode::kMinimal; return Heap::HeapGrowingMode::kMinimal;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include "include/v8-internal.h" #include "include/v8-internal.h"
#include "include/v8.h" #include "include/v8.h"
#include "src/base/atomic-utils.h" #include "src/base/atomic-utils.h"
#include "src/base/platform/condition-variable.h"
#include "src/builtins/accessors.h" #include "src/builtins/accessors.h"
#include "src/common/assert-scope.h" #include "src/common/assert-scope.h"
#include "src/common/globals.h" #include "src/common/globals.h"
...@@ -594,6 +595,8 @@ class Heap { ...@@ -594,6 +595,8 @@ class Heap {
// Returns false if not able to reserve. // Returns false if not able to reserve.
bool ReserveSpace(Reservation* reservations, std::vector<Address>* maps); bool ReserveSpace(Reservation* reservations, std::vector<Address>* maps);
void RequestAndWaitForCollection();
// //
// Support for the API. // Support for the API.
// //
...@@ -1522,6 +1525,19 @@ class Heap { ...@@ -1522,6 +1525,19 @@ class Heap {
DISALLOW_COPY_AND_ASSIGN(ExternalStringTable); DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
}; };
class CollectionBarrier {
Heap* heap_;
base::Mutex mutex_;
base::ConditionVariable cond_;
bool requested_;
public:
explicit CollectionBarrier(Heap* heap) : heap_(heap), requested_(false) {}
void Increment();
void Wait();
};
struct StrongRootsList; struct StrongRootsList;
struct StringTypeTable { struct StringTypeTable {
...@@ -1735,6 +1751,7 @@ class Heap { ...@@ -1735,6 +1751,7 @@ class Heap {
// reporting/verification activities when compiled with DEBUG set. // reporting/verification activities when compiled with DEBUG set.
void GarbageCollectionPrologue(); void GarbageCollectionPrologue();
void GarbageCollectionEpilogue(); void GarbageCollectionEpilogue();
void GarbageCollectionEpilogueInSafepoint();
// Performs a major collection in the whole heap. // Performs a major collection in the whole heap.
void MarkCompact(); void MarkCompact();
...@@ -1814,7 +1831,9 @@ class Heap { ...@@ -1814,7 +1831,9 @@ class Heap {
V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size); V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
bool ShouldExpandOldGenerationOnSlowAllocation(); bool ShouldExpandOldGenerationOnSlowAllocation(
LocalHeap* local_heap = nullptr);
bool IsRetryOfFailedAllocation(LocalHeap* local_heap);
HeapGrowingMode CurrentHeapGrowingMode(); HeapGrowingMode CurrentHeapGrowingMode();
...@@ -2177,6 +2196,8 @@ class Heap { ...@@ -2177,6 +2196,8 @@ class Heap {
base::Mutex relocation_mutex_; base::Mutex relocation_mutex_;
CollectionBarrier collection_barrier_;
int gc_callbacks_depth_ = 0; int gc_callbacks_depth_ = 0;
bool deserialization_complete_ = false; bool deserialization_complete_ = false;
......
...@@ -325,7 +325,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final { ...@@ -325,7 +325,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
bool finalize_marking_completed_ = false; bool finalize_marking_completed_ = false;
IncrementalMarkingJob incremental_marking_job_; IncrementalMarkingJob incremental_marking_job_;
GCRequestType request_type_ = NONE; std::atomic<GCRequestType> request_type_{NONE};
Observer new_generation_observer_; Observer new_generation_observer_;
Observer old_generation_observer_; Observer old_generation_observer_;
......
...@@ -19,6 +19,7 @@ LocalHeap::LocalHeap(Heap* heap, ...@@ -19,6 +19,7 @@ LocalHeap::LocalHeap(Heap* heap,
: heap_(heap), : heap_(heap),
state_(ThreadState::Running), state_(ThreadState::Running),
safepoint_requested_(false), safepoint_requested_(false),
allocation_failed_(false),
prev_(nullptr), prev_(nullptr),
next_(nullptr), next_(nullptr),
handles_(new LocalHandles), handles_(new LocalHandles),
...@@ -31,6 +32,9 @@ LocalHeap::LocalHeap(Heap* heap, ...@@ -31,6 +32,9 @@ LocalHeap::LocalHeap(Heap* heap,
} }
LocalHeap::~LocalHeap() { LocalHeap::~LocalHeap() {
// Give up LAB before parking thread
old_space_allocator_.FreeLinearAllocationArea();
// Park thread since removing the local heap could block. // Park thread since removing the local heap could block.
EnsureParkedBeforeDestruction(); EnsureParkedBeforeDestruction();
...@@ -95,5 +99,9 @@ void LocalHeap::ClearSafepointRequested() { ...@@ -95,5 +99,9 @@ void LocalHeap::ClearSafepointRequested() {
void LocalHeap::EnterSafepoint() { heap_->safepoint()->EnterFromThread(this); } void LocalHeap::EnterSafepoint() { heap_->safepoint()->EnterFromThread(this); }
void LocalHeap::FreeLinearAllocationArea() {
old_space_allocator_.FreeLinearAllocationArea();
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/execution/isolate.h" #include "src/execution/isolate.h"
#include "src/heap/concurrent-allocator.h" #include "src/heap/concurrent-allocator.h"
#include "src/heap/safepoint.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -69,6 +68,8 @@ class LocalHeap { ...@@ -69,6 +68,8 @@ class LocalHeap {
void EnterSafepoint(); void EnterSafepoint();
void FreeLinearAllocationArea();
Heap* heap_; Heap* heap_;
base::Mutex state_mutex_; base::Mutex state_mutex_;
...@@ -77,6 +78,8 @@ class LocalHeap { ...@@ -77,6 +78,8 @@ class LocalHeap {
std::atomic<bool> safepoint_requested_; std::atomic<bool> safepoint_requested_;
bool allocation_failed_;
LocalHeap* prev_; LocalHeap* prev_;
LocalHeap* next_; LocalHeap* next_;
...@@ -88,6 +91,7 @@ class LocalHeap { ...@@ -88,6 +91,7 @@ class LocalHeap {
friend class Heap; friend class Heap;
friend class GlobalSafepoint; friend class GlobalSafepoint;
friend class ParkedScope; friend class ParkedScope;
friend class ConcurrentAllocator;
}; };
class ParkedScope { class ParkedScope {
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/base/platform/condition-variable.h" #include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/handles/persistent-handles.h" #include "src/handles/persistent-handles.h"
#include "src/heap/local-heap.h"
#include "src/objects/visitors.h" #include "src/objects/visitors.h"
namespace v8 { namespace v8 {
...@@ -32,6 +33,16 @@ class GlobalSafepoint { ...@@ -32,6 +33,16 @@ class GlobalSafepoint {
// Iterate handles in local heaps // Iterate handles in local heaps
void Iterate(RootVisitor* visitor); void Iterate(RootVisitor* visitor);
// Iterate local heaps
template <typename Callback>
void IterateLocalHeaps(Callback callback) {
DCHECK(IsActive());
for (LocalHeap* current = local_heaps_head_; current;
current = current->next_) {
callback(current);
}
}
// Use these methods now instead of the more intrusive SafepointScope // Use these methods now instead of the more intrusive SafepointScope
void Start(); void Start();
void End(); void End();
......
...@@ -1876,7 +1876,6 @@ bool PagedSpace::Expand() { ...@@ -1876,7 +1876,6 @@ bool PagedSpace::Expand() {
return true; return true;
} }
int PagedSpace::CountTotalPages() { int PagedSpace::CountTotalPages() {
int count = 0; int count = 0;
for (Page* page : *this) { for (Page* page : *this) {
...@@ -2107,7 +2106,8 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList( ...@@ -2107,7 +2106,8 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
} }
base::Optional<std::pair<Address, size_t>> base::Optional<std::pair<Address, size_t>>
PagedSpace::SlowGetLinearAllocationAreaBackground(size_t min_size_in_bytes, PagedSpace::SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
size_t min_size_in_bytes,
size_t max_size_in_bytes, size_t max_size_in_bytes,
AllocationAlignment alignment, AllocationAlignment alignment,
AllocationOrigin origin) { AllocationOrigin origin) {
...@@ -2145,7 +2145,8 @@ PagedSpace::SlowGetLinearAllocationAreaBackground(size_t min_size_in_bytes, ...@@ -2145,7 +2145,8 @@ PagedSpace::SlowGetLinearAllocationAreaBackground(size_t min_size_in_bytes,
min_size_in_bytes, max_size_in_bytes, alignment, origin); min_size_in_bytes, max_size_in_bytes, alignment, origin);
} }
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) { if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
Expand()) {
DCHECK((CountTotalPages() > 1) || DCHECK((CountTotalPages() > 1) ||
(min_size_in_bytes <= free_list_->Available())); (min_size_in_bytes <= free_list_->Available()));
return TryAllocationFromFreeListBackground( return TryAllocationFromFreeListBackground(
......
...@@ -527,7 +527,7 @@ class V8_EXPORT_PRIVATE Space : public Malloced { ...@@ -527,7 +527,7 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
AllocationSpace id_; AllocationSpace id_;
// Keeps track of committed memory in a space. // Keeps track of committed memory in a space.
size_t committed_; std::atomic<size_t> committed_;
size_t max_committed_; size_t max_committed_;
std::unique_ptr<FreeList> free_list_; std::unique_ptr<FreeList> free_list_;
...@@ -1677,7 +1677,7 @@ class AllocationStats { ...@@ -1677,7 +1677,7 @@ class AllocationStats {
AllocationStats& operator=(const AllocationStats& stats) V8_NOEXCEPT { AllocationStats& operator=(const AllocationStats& stats) V8_NOEXCEPT {
capacity_ = stats.capacity_.load(); capacity_ = stats.capacity_.load();
max_capacity_ = stats.max_capacity_; max_capacity_ = stats.max_capacity_;
size_ = stats.size_; size_.store(stats.size_);
#ifdef DEBUG #ifdef DEBUG
allocated_on_page_ = stats.allocated_on_page_; allocated_on_page_ = stats.allocated_on_page_;
#endif #endif
...@@ -1707,8 +1707,11 @@ class AllocationStats { ...@@ -1707,8 +1707,11 @@ class AllocationStats {
#endif #endif
void IncreaseAllocatedBytes(size_t bytes, Page* page) { void IncreaseAllocatedBytes(size_t bytes, Page* page) {
DCHECK_GE(size_ + bytes, size_); #ifdef DEBUG
size_ += bytes; size_t size = size_;
DCHECK_GE(size + bytes, size);
#endif
size_.fetch_add(bytes);
#ifdef DEBUG #ifdef DEBUG
allocated_on_page_[page] += bytes; allocated_on_page_[page] += bytes;
#endif #endif
...@@ -1716,7 +1719,7 @@ class AllocationStats { ...@@ -1716,7 +1719,7 @@ class AllocationStats {
void DecreaseAllocatedBytes(size_t bytes, Page* page) { void DecreaseAllocatedBytes(size_t bytes, Page* page) {
DCHECK_GE(size_, bytes); DCHECK_GE(size_, bytes);
size_ -= bytes; size_.fetch_sub(bytes);
#ifdef DEBUG #ifdef DEBUG
DCHECK_GE(allocated_on_page_[page], bytes); DCHECK_GE(allocated_on_page_[page], bytes);
allocated_on_page_[page] -= bytes; allocated_on_page_[page] -= bytes;
...@@ -1748,7 +1751,7 @@ class AllocationStats { ...@@ -1748,7 +1751,7 @@ class AllocationStats {
size_t max_capacity_; size_t max_capacity_;
// |size_|: The number of allocated bytes. // |size_|: The number of allocated bytes.
size_t size_; std::atomic<size_t> size_;
#ifdef DEBUG #ifdef DEBUG
std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_; std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
...@@ -2341,7 +2344,8 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -2341,7 +2344,8 @@ class V8_EXPORT_PRIVATE PagedSpace
// Allocate the requested number of bytes in the space from a background // Allocate the requested number of bytes in the space from a background
// thread. // thread.
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>> V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
SlowGetLinearAllocationAreaBackground(size_t min_size_in_bytes, SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
size_t min_size_in_bytes,
size_t max_size_in_bytes, size_t max_size_in_bytes,
AllocationAlignment alignment, AllocationAlignment alignment,
AllocationOrigin origin); AllocationOrigin origin);
......
...@@ -23,52 +23,74 @@ ...@@ -23,52 +23,74 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
const int kNumObjects = 100; const int kNumIterations = 2000;
const int kObjectSize = 10 * kTaggedSize; const int kObjectSize = 10 * kTaggedSize;
const int kLargeObjectSize = 8 * KB;
class ConcurrentAllocationThread final : public v8::base::Thread { class ConcurrentAllocationThread final : public v8::base::Thread {
public: public:
explicit ConcurrentAllocationThread(Heap* heap) explicit ConcurrentAllocationThread(Heap* heap, std::atomic<int>* pending)
: v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")), : v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")),
heap_(heap) {} heap_(heap),
pending_(pending) {}
void Run() override { void Run() override {
LocalHeap local_heap(heap_); LocalHeap local_heap(heap_);
ConcurrentAllocator* allocator = local_heap.old_space_allocator(); ConcurrentAllocator* allocator = local_heap.old_space_allocator();
for (int i = 0; i < kNumObjects; i++) { for (int i = 0; i < kNumIterations; i++) {
AllocationResult result = Address address = allocator->AllocateOrFail(
allocator->Allocate(kObjectSize, AllocationAlignment::kWordAligned, kObjectSize, AllocationAlignment::kWordAligned,
AllocationOrigin::kRuntime); AllocationOrigin::kRuntime);
heap_->CreateFillerObjectAt(address, kObjectSize,
if (result.IsRetry()) { ClearRecordedSlots::kNo);
break; address = allocator->AllocateOrFail(kLargeObjectSize,
} AllocationAlignment::kWordAligned,
AllocationOrigin::kRuntime);
heap_->CreateFillerObjectAt(address, kLargeObjectSize,
ClearRecordedSlots::kNo);
} }
pending_->fetch_sub(1);
} }
Heap* heap_; Heap* heap_;
std::atomic<int>* pending_;
}; };
TEST(ConcurrentAllocationInOldSpace) { UNINITIALIZED_TEST(ConcurrentAllocationInOldSpace) {
CcTest::InitializeVM(); FLAG_max_old_space_size = 8;
FLAG_local_heaps = true;
FLAG_concurrent_allocation = true; FLAG_concurrent_allocation = true;
Isolate* isolate = CcTest::i_isolate();
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
FLAG_local_heaps = true;
std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads; std::vector<std::unique_ptr<ConcurrentAllocationThread>> threads;
const int kThreads = 4; const int kThreads = 4;
std::atomic<int> pending(kThreads);
for (int i = 0; i < kThreads; i++) { for (int i = 0; i < kThreads; i++) {
auto thread = std::make_unique<ConcurrentAllocationThread>(isolate->heap()); auto thread = std::make_unique<ConcurrentAllocationThread>(
i_isolate->heap(), &pending);
CHECK(thread->Start()); CHECK(thread->Start());
threads.push_back(std::move(thread)); threads.push_back(std::move(thread));
} }
while (pending > 0) {
v8::platform::PumpMessageLoop(i::V8::GetCurrentPlatform(), isolate);
}
for (auto& thread : threads) { for (auto& thread : threads) {
thread->Join(); thread->Join();
} }
isolate->Dispose();
} }
} // namespace internal } // namespace internal
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment