Commit e404af78 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Support simple shared GCs without any clients

Allow GC of the shared heap without any attached clients. This
CL also disables incremental marking for shared heaps for now.

Bug: v8:11708
Change-Id: I1eb47a42fe3ced0f23f679ecaae0c32e09eab461
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2886878Reviewed-by: 's avatarVictor Gomes <victorgomes@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74511}
parent 0acdf365
...@@ -1776,6 +1776,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory { ...@@ -1776,6 +1776,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void AttachToSharedIsolate(Isolate* shared); void AttachToSharedIsolate(Isolate* shared);
void DetachFromSharedIsolate(); void DetachFromSharedIsolate();
bool HasClientIsolates() const { return client_isolate_head_; }
private: private:
explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator, explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator,
bool is_shared); bool is_shared);
......
...@@ -1657,9 +1657,6 @@ Heap::DevToolsTraceEventScope::~DevToolsTraceEventScope() { ...@@ -1657,9 +1657,6 @@ Heap::DevToolsTraceEventScope::~DevToolsTraceEventScope() {
bool Heap::CollectGarbage(AllocationSpace space, bool Heap::CollectGarbage(AllocationSpace space,
GarbageCollectionReason gc_reason, GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) { const v8::GCCallbackFlags gc_callback_flags) {
// So far we can't collect the shared heap.
CHECK(!IsShared());
if (V8_UNLIKELY(!deserialization_complete_)) { if (V8_UNLIKELY(!deserialization_complete_)) {
// During isolate initialization heap always grows. GC is only requested // During isolate initialization heap always grows. GC is only requested
// if a new page allocation fails. In such a case we should crash with // if a new page allocation fails. In such a case we should crash with
...@@ -2159,6 +2156,9 @@ size_t Heap::PerformGarbageCollection( ...@@ -2159,6 +2156,9 @@ size_t Heap::PerformGarbageCollection(
SafepointScope safepoint_scope(this); SafepointScope safepoint_scope(this);
// Shared isolates cannot have any clients when running GC at the moment.
DCHECK_IMPLIES(IsShared(), !isolate()->HasClientIsolates());
collection_barrier_->StopTimeToCollectionTimer(); collection_barrier_->StopTimeToCollectionTimer();
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
......
...@@ -139,12 +139,14 @@ bool IncrementalMarking::WasActivated() { return was_activated_; } ...@@ -139,12 +139,14 @@ bool IncrementalMarking::WasActivated() { return was_activated_; }
bool IncrementalMarking::CanBeActivated() { bool IncrementalMarking::CanBeActivated() {
// Only start incremental marking in a safe state: 1) when incremental // Only start incremental marking in a safe state:
// marking is turned on, 2) when we are currently not in a GC, and // 1) when incremental marking is turned on
// 3) when we are currently not serializing or deserializing the heap. // 2) when we are currently not in a GC, and
// 3) when we are currently not serializing or deserializing the heap, and
// 4) not a shared heap.
return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC && return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
heap_->deserialization_complete() && heap_->deserialization_complete() &&
!heap_->isolate()->serializer_enabled(); !heap_->isolate()->serializer_enabled() && !heap_->IsShared();
} }
bool IncrementalMarking::IsBelowActivationThresholds() const { bool IncrementalMarking::IsBelowActivationThresholds() const {
...@@ -154,6 +156,7 @@ bool IncrementalMarking::IsBelowActivationThresholds() const { ...@@ -154,6 +156,7 @@ bool IncrementalMarking::IsBelowActivationThresholds() const {
void IncrementalMarking::Start(GarbageCollectionReason gc_reason) { void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
DCHECK(!collector_->sweeping_in_progress()); DCHECK(!collector_->sweeping_in_progress());
DCHECK(!heap_->IsShared());
if (FLAG_trace_incremental_marking) { if (FLAG_trace_incremental_marking) {
const size_t old_generation_size_mb = const size_t old_generation_size_mb =
......
...@@ -244,7 +244,9 @@ class FullMarkingVerifier : public MarkingVerifier { ...@@ -244,7 +244,9 @@ class FullMarkingVerifier : public MarkingVerifier {
private: private:
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) { V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
if (BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap()) return; if (!heap_->IsShared() &&
BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap())
return;
CHECK(marking_state_->IsBlackOrGrey(heap_object)); CHECK(marking_state_->IsBlackOrGrey(heap_object));
} }
...@@ -422,6 +424,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) ...@@ -422,6 +424,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
#ifdef DEBUG #ifdef DEBUG
state_(IDLE), state_(IDLE),
#endif #endif
is_shared_heap_(heap->IsShared()),
was_marked_incrementally_(false), was_marked_incrementally_(false),
evacuation_(false), evacuation_(false),
compacting_(false), compacting_(false),
...@@ -971,7 +974,7 @@ void MarkCompactCollector::SweepArrayBufferExtensions() { ...@@ -971,7 +974,7 @@ void MarkCompactCollector::SweepArrayBufferExtensions() {
class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor { class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
public: public:
explicit RootMarkingVisitor(MarkCompactCollector* collector) explicit RootMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector) {} : collector_(collector), is_shared_heap_(collector->is_shared_heap()) {}
void VisitRootPointer(Root root, const char* description, void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) final { FullObjectSlot p) final {
...@@ -993,11 +996,12 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor { ...@@ -993,11 +996,12 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
HeapObject heap_object = HeapObject::cast(object); HeapObject heap_object = HeapObject::cast(object);
BasicMemoryChunk* target_page = BasicMemoryChunk* target_page =
BasicMemoryChunk::FromHeapObject(heap_object); BasicMemoryChunk::FromHeapObject(heap_object);
if (target_page->InSharedHeap()) return; if (!is_shared_heap_ && target_page->InSharedHeap()) return;
collector_->MarkRootObject(root, heap_object); collector_->MarkRootObject(root, heap_object);
} }
MarkCompactCollector* const collector_; MarkCompactCollector* const collector_;
const bool is_shared_heap_;
}; };
// This visitor is used to visit the body of special objects held alive by // This visitor is used to visit the body of special objects held alive by
......
...@@ -506,6 +506,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -506,6 +506,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void RecordLiveSlotsOnPage(Page* page); void RecordLiveSlotsOnPage(Page* page);
bool is_compacting() const { return compacting_; } bool is_compacting() const { return compacting_; }
bool is_shared_heap() const { return is_shared_heap_; }
// Ensures that sweeping is finished. // Ensures that sweeping is finished.
// //
...@@ -743,6 +744,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -743,6 +744,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
CollectorState state_; CollectorState state_;
#endif #endif
const bool is_shared_heap_;
bool was_marked_incrementally_; bool was_marked_incrementally_;
bool evacuation_; bool evacuation_;
......
...@@ -41,7 +41,7 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::ProcessStrongHeapObject( ...@@ -41,7 +41,7 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::ProcessStrongHeapObject(
HeapObject host, THeapObjectSlot slot, HeapObject heap_object) { HeapObject host, THeapObjectSlot slot, HeapObject heap_object) {
concrete_visitor()->SynchronizePageAccess(heap_object); concrete_visitor()->SynchronizePageAccess(heap_object);
BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(heap_object); BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(heap_object);
if (target_page->InSharedHeap()) return; if (!is_shared_heap_ && target_page->InSharedHeap()) return;
MarkObject(host, heap_object); MarkObject(host, heap_object);
concrete_visitor()->RecordSlot(host, slot, heap_object); concrete_visitor()->RecordSlot(host, slot, heap_object);
} }
......
...@@ -114,7 +114,8 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> { ...@@ -114,7 +114,8 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
mark_compact_epoch_(mark_compact_epoch), mark_compact_epoch_(mark_compact_epoch),
bytecode_flush_mode_(bytecode_flush_mode), bytecode_flush_mode_(bytecode_flush_mode),
is_embedder_tracing_enabled_(is_embedder_tracing_enabled), is_embedder_tracing_enabled_(is_embedder_tracing_enabled),
is_forced_gc_(is_forced_gc) {} is_forced_gc_(is_forced_gc),
is_shared_heap_(heap->IsShared()) {}
V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object); V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
V8_INLINE int VisitDescriptorArray(Map map, DescriptorArray object); V8_INLINE int VisitDescriptorArray(Map map, DescriptorArray object);
...@@ -201,6 +202,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> { ...@@ -201,6 +202,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
const BytecodeFlushMode bytecode_flush_mode_; const BytecodeFlushMode bytecode_flush_mode_;
const bool is_embedder_tracing_enabled_; const bool is_embedder_tracing_enabled_;
const bool is_forced_gc_; const bool is_forced_gc_;
const bool is_shared_heap_;
}; };
} // namespace internal } // namespace internal
......
...@@ -75,5 +75,17 @@ UNINITIALIZED_TEST(ConcurrentAllocationInSharedOldSpace) { ...@@ -75,5 +75,17 @@ UNINITIALIZED_TEST(ConcurrentAllocationInSharedOldSpace) {
Isolate::Delete(shared_isolate); Isolate::Delete(shared_isolate);
} }
UNINITIALIZED_TEST(SharedCollection) {
std::unique_ptr<v8::ArrayBuffer::Allocator> allocator(
v8::ArrayBuffer::Allocator::NewDefaultAllocator());
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator.get();
Isolate* shared_isolate = Isolate::NewShared(create_params);
CcTest::CollectGarbage(OLD_SPACE, shared_isolate);
Isolate::Delete(shared_isolate);
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment