Commit 9663bb31 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Perform GC in a shared heap

This CL implements GC in a shared heap. A shared GC is started from
an attached client isolate that fails to allocate a shared object. In
order to perform a shared GC all other running client isolates need
to be stopped and their roots need to be scanned.

Bug: v8:11708
Change-Id: I45ac50e6b4a1e9270f9e39b69f9b8ee5e6e14134
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2964816Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarMythri Alle <mythria@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75606}
parent 7ac3b55a
......@@ -841,6 +841,11 @@ inline std::ostream& operator<<(std::ostream& os, AllocationType kind) {
UNREACHABLE();
}
inline constexpr bool IsSharedAllocationType(AllocationType kind) {
return kind == AllocationType::kSharedOld ||
kind == AllocationType::kSharedMap;
}
// TODO(ishell): review and rename kWordAligned to kTaggedAligned.
enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
......
......@@ -1802,6 +1802,16 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool HasClientIsolates() const { return client_isolate_head_; }
template <typename Callback>
void IterateClientIsolates(Callback callback) {
for (Isolate* current = client_isolate_head_; current;
current = current->next_client_isolate_) {
callback(current);
}
}
base::Mutex* client_isolate_mutex() { return &client_isolate_mutex_; }
private:
explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator,
bool is_shared);
......
......@@ -2172,9 +2172,6 @@ size_t Heap::PerformGarbageCollection(
SafepointScope safepoint_scope(this);
// Shared isolates cannot have any clients when running GC at the moment.
DCHECK_IMPLIES(IsShared(), !isolate()->HasClientIsolates());
collection_barrier_->StopTimeToCollectionTimer();
#ifdef VERIFY_HEAP
......@@ -2262,6 +2259,50 @@ size_t Heap::PerformGarbageCollection(
return freed_global_handles;
}
void Heap::CollectSharedGarbage(GarbageCollectionReason gc_reason) {
DCHECK(!IsShared());
DCHECK_NOT_NULL(isolate()->shared_isolate());
isolate()->shared_isolate()->heap()->PerformSharedGarbageCollection(
isolate(), gc_reason);
}
void Heap::PerformSharedGarbageCollection(Isolate* initiator,
GarbageCollectionReason gc_reason) {
DCHECK(IsShared());
base::MutexGuard guard(isolate()->client_isolate_mutex());
const char* collector_reason = nullptr;
GarbageCollector collector = MARK_COMPACTOR;
tracer()->Start(collector, gc_reason, collector_reason);
isolate()->IterateClientIsolates([initiator](Isolate* client) {
DCHECK_NOT_NULL(client->shared_isolate());
Heap* client_heap = client->heap();
GlobalSafepoint::StopMainThread stop_main_thread =
initiator == client ? GlobalSafepoint::StopMainThread::kNo
: GlobalSafepoint::StopMainThread::kYes;
client_heap->safepoint()->EnterSafepointScope(stop_main_thread);
client_heap->shared_old_allocator_->FreeLinearAllocationArea();
client_heap->shared_map_allocator_->FreeLinearAllocationArea();
});
PerformGarbageCollection(MARK_COMPACTOR);
isolate()->IterateClientIsolates([initiator](Isolate* client) {
GlobalSafepoint::StopMainThread stop_main_thread =
initiator == client ? GlobalSafepoint::StopMainThread::kNo
: GlobalSafepoint::StopMainThread::kYes;
client->heap()->safepoint()->LeaveSafepointScope(stop_main_thread);
});
tracer()->Stop(collector);
}
void Heap::CompleteSweepingYoung(GarbageCollector collector) {
GCTracer::Scope::ScopeId scope_id;
......@@ -4322,10 +4363,7 @@ void Heap::Verify() {
SafepointScope safepoint_scope(this);
HandleScope scope(isolate());
MakeLocalHeapLabsIterable();
// We have to wait here for the sweeper threads to have an iterable heap.
mark_compact_collector()->EnsureSweepingCompleted();
MakeHeapIterable();
array_buffer_sweeper()->EnsureFinished();
......@@ -4792,6 +4830,15 @@ void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
}
}
void Heap::IterateRootsIncludingClients(RootVisitor* v,
base::EnumSet<SkipRoot> options) {
IterateRoots(v, options);
isolate()->IterateClientIsolates([v, options](Isolate* client) {
client->heap()->IterateRoots(v, options);
});
}
void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
isolate_->global_handles()->IterateWeakRoots(v);
}
......@@ -5340,8 +5387,12 @@ HeapObject Heap::AllocateRawWithLightRetrySlowPath(
}
// Two GCs before panicking. In newspace will almost always succeed.
for (int i = 0; i < 2; i++) {
if (IsSharedAllocationType(allocation)) {
CollectSharedGarbage(GarbageCollectionReason::kAllocationFailure);
} else {
CollectGarbage(alloc.RetrySpace(),
GarbageCollectionReason::kAllocationFailure);
}
alloc = AllocateRaw(size, allocation, origin, alignment);
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
......@@ -5360,7 +5411,12 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
if (!result.is_null()) return result;
isolate()->counters()->gc_last_resort_from_handles()->Increment();
if (IsSharedAllocationType(allocation)) {
CollectSharedGarbage(GarbageCollectionReason::kLastResort);
} else {
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
}
{
AlwaysAllocateScope scope(this);
alloc = AllocateRaw(size, allocation, origin, alignment);
......
......@@ -1026,6 +1026,10 @@ class Heap {
int flags, GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// Performs garbage collection operation for the shared heap.
V8_EXPORT_PRIVATE void CollectSharedGarbage(
GarbageCollectionReason gc_reason);
// Reports and external memory pressure event, either performs a major GC or
// completes incremental marking in order to free external resources.
void ReportExternalMemoryPressure();
......@@ -1060,6 +1064,9 @@ class Heap {
// Iterates over the strong roots and the weak roots.
void IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options);
void IterateRootsIncludingClients(RootVisitor* v,
base::EnumSet<SkipRoot> options);
// Iterates over entries in the smi roots list. Only interesting to the
// serializer/deserializer, since GC does not care about smis.
void IterateSmiRoots(RootVisitor* v);
......@@ -1799,6 +1806,10 @@ class Heap {
GarbageCollector collector,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// Performs garbage collection in the shared heap.
void PerformSharedGarbageCollection(Isolate* initiator,
GarbageCollectionReason gc_reason);
inline void UpdateOldSpaceLimits();
bool CreateInitialMaps();
......
......@@ -121,7 +121,8 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
};
void MarkingVerifier::VerifyRoots() {
heap_->IterateRoots(this, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
heap_->IterateRootsIncludingClients(this,
base::EnumSet<SkipRoot>{SkipRoot::kWeak});
}
void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
......@@ -245,9 +246,10 @@ class FullMarkingVerifier : public MarkingVerifier {
private:
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
if (!heap_->IsShared() &&
if (heap_->IsShared() !=
BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap())
return;
CHECK(marking_state_->IsBlackOrGrey(heap_object));
}
......@@ -305,7 +307,8 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
};
void EvacuationVerifier::VerifyRoots() {
heap_->IterateRoots(this, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
heap_->IterateRootsIncludingClients(this,
base::EnumSet<SkipRoot>{SkipRoot::kWeak});
}
void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
......@@ -356,6 +359,10 @@ class FullEvacuationVerifier : public EvacuationVerifier {
protected:
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
if (heap_->IsShared() !=
BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap())
return;
CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
Heap::InToPage(heap_object));
CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(heap_object));
......@@ -1001,7 +1008,7 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
HeapObject heap_object = HeapObject::cast(object);
BasicMemoryChunk* target_page =
BasicMemoryChunk::FromHeapObject(heap_object);
if (!is_shared_heap_ && target_page->InSharedHeap()) return;
if (is_shared_heap_ != target_page->InSharedHeap()) return;
collector_->MarkRootObject(root, heap_object);
}
......@@ -1629,10 +1636,16 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
ObjectVisitor* custom_root_body_visitor) {
// Mark the heap roots including global variables, stack variables,
// etc., and all objects reachable from them.
heap()->IterateRoots(root_visitor, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
heap()->IterateRootsIncludingClients(
root_visitor, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
// Custom marking for top optimized frame.
ProcessTopOptimizedFrame(custom_root_body_visitor);
ProcessTopOptimizedFrame(custom_root_body_visitor, isolate());
isolate()->IterateClientIsolates(
[this, custom_root_body_visitor](Isolate* client) {
ProcessTopOptimizedFrame(custom_root_body_visitor, client);
});
}
void MarkCompactCollector::VisitObject(HeapObject obj) {
......@@ -1921,13 +1934,14 @@ void MarkCompactCollector::ProcessEphemeronMarking() {
CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
}
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
!it.done(); it.Advance()) {
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor,
Isolate* isolate) {
for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done();
it.Advance()) {
if (it.frame()->is_unoptimized()) return;
if (it.frame()->type() == StackFrame::OPTIMIZED) {
Code code = it.frame()->LookupCode();
if (!code.CanDeoptAt(isolate(), it.frame()->pc())) {
if (!code.CanDeoptAt(isolate, it.frame()->pc())) {
Code::BodyDescriptor::IterateBody(code.map(), code, visitor);
}
return;
......@@ -3984,8 +3998,9 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
// The external string table is updated at the end.
heap_->IterateRoots(&updating_visitor, base::EnumSet<SkipRoot>{
SkipRoot::kExternalStringTable});
heap_->IterateRootsIncludingClients(
&updating_visitor,
base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable});
}
{
......
......@@ -626,7 +626,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// If the call-site of the top optimized code was not prepared for
// deoptimization, then treat embedded pointers in the code as strong as
// otherwise they can die and try to deoptimize the underlying code.
void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
void ProcessTopOptimizedFrame(ObjectVisitor* visitor, Isolate* isolate);
// Drains the main thread marking work list. Will mark all pending objects
// if no concurrent threads are running.
......
......@@ -21,7 +21,7 @@ namespace internal {
GlobalSafepoint::GlobalSafepoint(Heap* heap)
: heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {}
void GlobalSafepoint::EnterSafepointScope() {
void GlobalSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
if (++active_safepoint_scopes_ > 1) return;
TimedHistogramScope timer(
......@@ -37,10 +37,10 @@ void GlobalSafepoint::EnterSafepointScope() {
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
local_heap = local_heap->next_) {
if (local_heap->is_main_thread()) {
if (local_heap->is_main_thread() &&
stop_main_thread == StopMainThread::kNo) {
continue;
}
DCHECK(!local_heap->is_main_thread());
LocalHeap::ThreadState expected = local_heap->state_relaxed();
......@@ -64,7 +64,7 @@ void GlobalSafepoint::EnterSafepointScope() {
barrier_.WaitUntilRunningThreadsInSafepoint(running);
}
void GlobalSafepoint::LeaveSafepointScope() {
void GlobalSafepoint::LeaveSafepointScope(StopMainThread stop_main_thread) {
DCHECK_GT(active_safepoint_scopes_, 0);
if (--active_safepoint_scopes_ > 0) return;
......@@ -72,7 +72,8 @@ void GlobalSafepoint::LeaveSafepointScope() {
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
local_heap = local_heap->next_) {
if (local_heap->is_main_thread()) {
if (local_heap->is_main_thread() &&
stop_main_thread == StopMainThread::kNo) {
continue;
}
......@@ -151,10 +152,12 @@ void GlobalSafepoint::Barrier::WaitInUnpark() {
}
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
safepoint_->EnterSafepointScope();
safepoint_->EnterSafepointScope(GlobalSafepoint::StopMainThread::kNo);
}
SafepointScope::~SafepointScope() { safepoint_->LeaveSafepointScope(); }
SafepointScope::~SafepointScope() {
safepoint_->LeaveSafepointScope(GlobalSafepoint::StopMainThread::kNo);
}
bool GlobalSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
base::MutexGuard guard(&local_heaps_mutex_);
......
......@@ -18,8 +18,8 @@ class Heap;
class LocalHeap;
class RootVisitor;
// Used to bring all background threads with heap access to a safepoint such
// that e.g. a garbage collection can be performed.
// Used to bring all threads with heap access to a safepoint such that e.g. a
// garbage collection can be performed.
class GlobalSafepoint {
public:
explicit GlobalSafepoint(Heap* heap);
......@@ -74,8 +74,10 @@ class GlobalSafepoint {
void NotifyPark();
};
void EnterSafepointScope();
void LeaveSafepointScope();
enum class StopMainThread { kYes, kNo };
void EnterSafepointScope(StopMainThread stop_main_thread);
void LeaveSafepointScope(StopMainThread stop_main_thread);
template <typename Callback>
void AddLocalHeap(LocalHeap* local_heap, Callback callback) {
......@@ -116,9 +118,10 @@ class GlobalSafepoint {
int active_safepoint_scopes_;
friend class SafepointScope;
friend class Heap;
friend class LocalHeap;
friend class PersistentHandles;
friend class SafepointScope;
};
class V8_NODISCARD SafepointScope {
......
......@@ -6,6 +6,8 @@
#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-object.h"
#include "test/cctest/cctest.h"
......@@ -140,7 +142,7 @@ UNINITIALIZED_TEST(ConcurrentAllocationInSharedMapSpace) {
Isolate::Delete(shared_isolate);
}
UNINITIALIZED_TEST(SharedCollection) {
UNINITIALIZED_TEST(SharedCollectionWithoutClients) {
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
......@@ -155,5 +157,43 @@ UNINITIALIZED_TEST(SharedCollection) {
Isolate::Delete(shared_isolate);
}
void AllocateInSharedSpace(Isolate* shared_isolate) {
SetupClientIsolateAndRunCallback(
shared_isolate,
[](v8::Isolate* client_isolate, Isolate* i_client_isolate) {
HandleScope scope(i_client_isolate);
std::vector<Handle<FixedArray>> arrays;
const int kKeptAliveArrays = 1000;
for (int i = 0; i < kNumIterations * 100; i++) {
HandleScope scope(i_client_isolate);
Handle<FixedArray> array = i_client_isolate->factory()->NewFixedArray(
100, AllocationType::kSharedOld);
if (i < kKeptAliveArrays) {
// Keep some of those arrays alive across GCs.
arrays.push_back(scope.CloseAndEscape(array));
}
}
for (Handle<FixedArray> array : arrays) {
CHECK_EQ(array->length(), 100);
}
});
}
UNINITIALIZED_TEST(SharedCollectionWithOneClient) {
FLAG_max_old_space_size = 8;
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator.get();
Isolate* shared_isolate = Isolate::NewShared(create_params);
AllocateInSharedSpace(shared_isolate);
Isolate::Delete(shared_isolate);
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment