Commit 52a7ae36 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Extracts parts of ConcurrentMarkingVisitor into a base class

This is the first step in unification of concurrent and main thread
marking visitors. The new MarkingVisitorBase will become a base class
for all marking visitors and will remove the existing code duplication.

This is a refactoring without behavior change.

Subsequent CL will change the main thread marking visitor to derive
from the new base class.

Bug: chromium:1019218

Change-Id: I3d47030d396e0ba6706882fbd922bbcac46181b2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1886920Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64632}
parent 1dea7e42
...@@ -2248,6 +2248,8 @@ v8_source_set("v8_base_without_compiler") { ...@@ -2248,6 +2248,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/mark-compact-inl.h", "src/heap/mark-compact-inl.h",
"src/heap/mark-compact.cc", "src/heap/mark-compact.cc",
"src/heap/mark-compact.h", "src/heap/mark-compact.h",
"src/heap/marking-visitor-inl.h",
"src/heap/marking-visitor.h",
"src/heap/marking.cc", "src/heap/marking.cc",
"src/heap/marking.h", "src/heap/marking.h",
"src/heap/memory-measurement.cc", "src/heap/memory-measurement.cc",
......
This diff is collapsed.
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "src/base/atomic-utils.h" #include "src/base/atomic-utils.h"
#include "src/base/platform/condition-variable.h" #include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/slot-set.h" #include "src/heap/slot-set.h"
#include "src/heap/spaces.h" #include "src/heap/spaces.h"
#include "src/heap/worklist.h" #include "src/heap/worklist.h"
...@@ -65,12 +66,11 @@ class V8_EXPORT_PRIVATE ConcurrentMarking { ...@@ -65,12 +66,11 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
// Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use // Worklist::kMaxNumTasks being maxed at 8 (concurrent marking doesn't use
// task 0, reserved for the main thread). // task 0, reserved for the main thread).
static constexpr int kMaxTasks = 7; static constexpr int kMaxTasks = 7;
using MarkingWorklist = Worklist<HeapObject, 64 /* segment size */>;
using EmbedderTracingWorklist = Worklist<HeapObject, 16 /* segment size */>;
ConcurrentMarking(Heap* heap, MarkingWorklist* shared, ConcurrentMarking(Heap* heap, MarkingWorklist* marking_worklist,
MarkingWorklist* on_hold, WeakObjects* weak_objects, MarkingWorklist* on_hold,
EmbedderTracingWorklist* embedder_objects); EmbedderTracingWorklist* embedder_worklist,
WeakObjects* weak_objects);
// Schedules asynchronous tasks to perform concurrent marking. Objects in the // Schedules asynchronous tasks to perform concurrent marking. Objects in the
// heap should not be moved while these are active (can be stopped safely via // heap should not be moved while these are active (can be stopped safely via
...@@ -112,10 +112,10 @@ class V8_EXPORT_PRIVATE ConcurrentMarking { ...@@ -112,10 +112,10 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
class Task; class Task;
void Run(int task_id, TaskState* task_state); void Run(int task_id, TaskState* task_state);
Heap* const heap_; Heap* const heap_;
MarkingWorklist* const shared_; MarkingWorklist* const marking_worklist_;
MarkingWorklist* const on_hold_; MarkingWorklist* const on_hold_;
EmbedderTracingWorklist* const embedder_worklist_;
WeakObjects* const weak_objects_; WeakObjects* const weak_objects_;
EmbedderTracingWorklist* const embedder_objects_;
TaskState task_state_[kMaxTasks + 1]; TaskState task_state_[kMaxTasks + 1];
std::atomic<size_t> total_marked_bytes_{0}; std::atomic<size_t> total_marked_bytes_{0};
std::atomic<bool> ephemeron_marked_{false}; std::atomic<bool> ephemeron_marked_{false};
......
...@@ -5009,7 +5009,7 @@ void Heap::SetUp() { ...@@ -5009,7 +5009,7 @@ void Heap::SetUp() {
mark_compact_collector_->marking_worklist(); mark_compact_collector_->marking_worklist();
concurrent_marking_.reset(new ConcurrentMarking( concurrent_marking_.reset(new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->on_hold(), this, marking_worklist->shared(), marking_worklist->on_hold(),
mark_compact_collector_->weak_objects(), marking_worklist->embedder())); marking_worklist->embedder(), mark_compact_collector_->weak_objects()));
} else { } else {
concurrent_marking_.reset( concurrent_marking_.reset(
new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr)); new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr));
......
...@@ -21,26 +21,6 @@ ...@@ -21,26 +21,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
template <typename ConcreteState, AccessMode access_mode>
bool MarkingStateBase<ConcreteState, access_mode>::GreyToBlack(HeapObject obj) {
MemoryChunk* p = MemoryChunk::FromHeapObject(obj);
MarkBit markbit = MarkBitFrom(p, obj.address());
if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, obj.Size());
return true;
}
template <typename ConcreteState, AccessMode access_mode>
bool MarkingStateBase<ConcreteState, access_mode>::WhiteToGrey(HeapObject obj) {
return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
}
template <typename ConcreteState, AccessMode access_mode>
bool MarkingStateBase<ConcreteState, access_mode>::WhiteToBlack(
HeapObject obj) {
return WhiteToGrey(obj) && GreyToBlack(obj);
}
template <FixedArrayVisitationMode fixed_array_mode, template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState> TraceRetainingPathMode retaining_path_mode, typename MarkingState>
MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingVisitor<fixed_array_mode, retaining_path_mode,
......
...@@ -9,13 +9,10 @@ ...@@ -9,13 +9,10 @@
#include <vector> #include <vector>
#include "src/heap/concurrent-marking.h" #include "src/heap/concurrent-marking.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking.h" #include "src/heap/marking.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h" #include "src/heap/spaces.h"
#include "src/heap/sweeper.h" #include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
#include "src/objects/heap-object.h" // For Worklist<HeapObject, ...>
#include "src/objects/js-weak-refs.h" // For Worklist<WeakCell, ...>
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -29,53 +26,6 @@ class RecordMigratedSlotVisitor; ...@@ -29,53 +26,6 @@ class RecordMigratedSlotVisitor;
class UpdatingItem; class UpdatingItem;
class YoungGenerationMarkingVisitor; class YoungGenerationMarkingVisitor;
template <typename ConcreteState, AccessMode access_mode>
class MarkingStateBase {
public:
V8_INLINE MarkBit MarkBitFrom(HeapObject obj) {
return MarkBitFrom(MemoryChunk::FromHeapObject(obj), obj.ptr());
}
// {addr} may be tagged or aligned.
V8_INLINE MarkBit MarkBitFrom(MemoryChunk* p, Address addr) {
return static_cast<ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
p->AddressToMarkbitIndex(addr));
}
Marking::ObjectColor Color(HeapObject obj) {
return Marking::Color(MarkBitFrom(obj));
}
V8_INLINE bool IsImpossible(HeapObject obj) {
return Marking::IsImpossible<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool IsBlack(HeapObject obj) {
return Marking::IsBlack<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool IsWhite(HeapObject obj) {
return Marking::IsWhite<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool IsGrey(HeapObject obj) {
return Marking::IsGrey<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool IsBlackOrGrey(HeapObject obj) {
return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool WhiteToGrey(HeapObject obj);
V8_INLINE bool WhiteToBlack(HeapObject obj);
V8_INLINE bool GreyToBlack(HeapObject obj);
void ClearLiveness(MemoryChunk* chunk) {
static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
}
};
class MarkBitCellIterator { class MarkBitCellIterator {
public: public:
MarkBitCellIterator(MemoryChunk* chunk, Bitmap* bitmap) : chunk_(chunk) { MarkBitCellIterator(MemoryChunk* chunk, Bitmap* bitmap) : chunk_(chunk) {
...@@ -405,57 +355,6 @@ class MajorNonAtomicMarkingState final ...@@ -405,57 +355,6 @@ class MajorNonAtomicMarkingState final
} }
}; };
struct Ephemeron {
HeapObject key;
HeapObject value;
};
using EphemeronWorklist = Worklist<Ephemeron, 64>;
// Weak objects encountered during marking.
struct WeakObjects {
Worklist<TransitionArray, 64> transition_arrays;
// Keep track of all EphemeronHashTables in the heap to process
// them in the atomic pause.
Worklist<EphemeronHashTable, 64> ephemeron_hash_tables;
// Keep track of all ephemerons for concurrent marking tasks. Only store
// ephemerons in these Worklists if both key and value are unreachable at the
// moment.
//
// MarkCompactCollector::ProcessEphemeronsUntilFixpoint drains and fills these
// worklists.
//
// current_ephemerons is used as draining worklist in the current fixpoint
// iteration.
EphemeronWorklist current_ephemerons;
// Stores ephemerons to visit in the next fixpoint iteration.
EphemeronWorklist next_ephemerons;
// When draining the marking worklist new discovered ephemerons are pushed
// into this worklist.
EphemeronWorklist discovered_ephemerons;
// TODO(marja): For old space, we only need the slot, not the host
// object. Optimize this by adding a different storage for old space.
Worklist<std::pair<HeapObject, HeapObjectSlot>, 64> weak_references;
Worklist<std::pair<HeapObject, Code>, 64> weak_objects_in_code;
Worklist<JSWeakRef, 64> js_weak_refs;
Worklist<WeakCell, 64> weak_cells;
Worklist<SharedFunctionInfo, 64> bytecode_flushing_candidates;
Worklist<JSFunction, 64> flushed_js_functions;
};
struct EphemeronMarking {
std::vector<HeapObject> newly_discovered;
bool newly_discovered_overflowed;
size_t newly_discovered_limit;
};
// Collector for young and old generation. // Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase { class MarkCompactCollector final : public MarkCompactCollectorBase {
public: public:
......
This diff is collapsed.
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_MARKING_VISITOR_H_
#define V8_HEAP_MARKING_VISITOR_H_
#include "src/heap/marking.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
#include "src/objects/heap-object.h" // For Worklist<HeapObject, ...>
#include "src/objects/js-weak-refs.h" // For Worklist<WeakCell, ...>
namespace v8 {
namespace internal {
using MarkingWorklist = Worklist<HeapObject, 64 /* segment size */>;
// Worklist for objects that potentially require embedder tracing, i.e.,
// these objects need to be handed over to the embedder to find the full
// transitive closure.
using EmbedderTracingWorklist = Worklist<HeapObject, 16 /* segment size */>;
struct Ephemeron {
HeapObject key;
HeapObject value;
};
using EphemeronWorklist = Worklist<Ephemeron, 64>;
// Weak objects encountered during marking.
struct WeakObjects {
Worklist<TransitionArray, 64> transition_arrays;
// Keep track of all EphemeronHashTables in the heap to process
// them in the atomic pause.
Worklist<EphemeronHashTable, 64> ephemeron_hash_tables;
// Keep track of all ephemerons for concurrent marking tasks. Only store
// ephemerons in these Worklists if both key and value are unreachable at the
// moment.
//
// MarkCompactCollector::ProcessEphemeronsUntilFixpoint drains and fills these
// worklists.
//
// current_ephemerons is used as draining worklist in the current fixpoint
// iteration.
EphemeronWorklist current_ephemerons;
// Stores ephemerons to visit in the next fixpoint iteration.
EphemeronWorklist next_ephemerons;
// When draining the marking worklist new discovered ephemerons are pushed
// into this worklist.
EphemeronWorklist discovered_ephemerons;
// TODO(marja): For old space, we only need the slot, not the host
// object. Optimize this by adding a different storage for old space.
Worklist<std::pair<HeapObject, HeapObjectSlot>, 64> weak_references;
Worklist<std::pair<HeapObject, Code>, 64> weak_objects_in_code;
Worklist<JSWeakRef, 64> js_weak_refs;
Worklist<WeakCell, 64> weak_cells;
Worklist<SharedFunctionInfo, 64> bytecode_flushing_candidates;
Worklist<JSFunction, 64> flushed_js_functions;
};
struct EphemeronMarking {
std::vector<HeapObject> newly_discovered;
bool newly_discovered_overflowed;
size_t newly_discovered_limit;
};
template <typename ConcreteState, AccessMode access_mode>
class MarkingStateBase {
public:
V8_INLINE MarkBit MarkBitFrom(HeapObject obj) {
return MarkBitFrom(MemoryChunk::FromHeapObject(obj), obj.ptr());
}
// {addr} may be tagged or aligned.
V8_INLINE MarkBit MarkBitFrom(MemoryChunk* p, Address addr) {
return static_cast<ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
p->AddressToMarkbitIndex(addr));
}
Marking::ObjectColor Color(HeapObject obj) {
return Marking::Color(MarkBitFrom(obj));
}
V8_INLINE bool IsImpossible(HeapObject obj) {
return Marking::IsImpossible<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool IsBlack(HeapObject obj) {
return Marking::IsBlack<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool IsWhite(HeapObject obj) {
return Marking::IsWhite<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool IsGrey(HeapObject obj) {
return Marking::IsGrey<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool IsBlackOrGrey(HeapObject obj) {
return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool WhiteToGrey(HeapObject obj) {
return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
}
V8_INLINE bool WhiteToBlack(HeapObject obj) {
return WhiteToGrey(obj) && GreyToBlack(obj);
}
V8_INLINE bool GreyToBlack(HeapObject obj) {
MemoryChunk* p = MemoryChunk::FromHeapObject(obj);
MarkBit markbit = MarkBitFrom(p, obj.address());
if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, obj.Size());
return true;
}
void ClearLiveness(MemoryChunk* chunk) {
static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
}
};
// The base class for all marking visitors. It implements marking logic with
// support of bytecode flushing, embedder tracing, weak and references.
//
// Derived classes are expected to provide the following:
// - ConcreteVisitor::marking_state method,
// - ConcreteVisitor::VisitJSObjectSubclass method,
// - ConcreteVisitor::VisitLeftTrimmableArray method,
// - ConcreteVisitor::RecordSlot method,
// - ConcreteVisitor::RecordRelocSlot method,
// - ConcreteVisitor::SynchronizePageAccess method.
template <typename ConcreteVisitor, typename MarkingState>
class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
public:
MarkingVisitorBase(int task_id, MarkingWorklist* marking_worklist,
EmbedderTracingWorklist* embedder_worklist,
WeakObjects* weak_objects, unsigned mark_compact_epoch,
BytecodeFlushMode bytecode_flush_mode,
bool is_embedder_tracing_enabled, bool is_forced_gc)
: marking_worklist_(marking_worklist),
embedder_worklist_(embedder_worklist),
weak_objects_(weak_objects),
task_id_(task_id),
mark_compact_epoch_(mark_compact_epoch),
bytecode_flush_mode_(bytecode_flush_mode),
is_embedder_tracing_enabled_(is_embedder_tracing_enabled),
is_forced_gc_(is_forced_gc) {}
// HeapVisitor overrides for objects that require custom visitation.
V8_INLINE bool ShouldVisit(HeapObject object) {
return concrete_visitor()->marking_state()->GreyToBlack(object);
}
V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
V8_INLINE int VisitDescriptorArray(Map map, DescriptorArray object);
V8_INLINE int VisitEphemeronHashTable(Map map, EphemeronHashTable object);
V8_INLINE int VisitFixedArray(Map map, FixedArray object);
V8_INLINE int VisitFixedDoubleArray(Map map, FixedDoubleArray object);
V8_INLINE int VisitJSApiObject(Map map, JSObject object);
V8_INLINE int VisitJSArrayBuffer(Map map, JSArrayBuffer object);
V8_INLINE int VisitJSDataView(Map map, JSDataView object);
V8_INLINE int VisitJSFunction(Map map, JSFunction object);
V8_INLINE int VisitJSTypedArray(Map map, JSTypedArray object);
V8_INLINE int VisitJSWeakRef(Map map, JSWeakRef object);
V8_INLINE int VisitMap(Map map, Map object);
V8_INLINE int VisitSharedFunctionInfo(Map map, SharedFunctionInfo object);
V8_INLINE int VisitTransitionArray(Map map, TransitionArray object);
V8_INLINE int VisitWeakCell(Map map, WeakCell object);
// ObjectVisitor overrides.
V8_INLINE void VisitPointer(HeapObject host, ObjectSlot p) final {
VisitPointersImpl(host, p, p + 1);
}
V8_INLINE void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
VisitPointersImpl(host, p, p + 1);
}
V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) final {
VisitPointersImpl(host, start, end);
}
V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) final {
VisitPointersImpl(host, start, end);
}
V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final;
V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final;
void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) final {
// Weak list pointers should be ignored during marking. The lists are
// reconstructed after GC.
}
protected:
ConcreteVisitor* concrete_visitor() {
return static_cast<ConcreteVisitor*>(this);
}
template <typename THeapObjectSlot>
void ProcessStrongHeapObject(HeapObject host, THeapObjectSlot slot,
HeapObject heap_object);
template <typename THeapObjectSlot>
void ProcessWeakHeapObject(HeapObject host, THeapObjectSlot slot,
HeapObject heap_object);
template <typename TSlot>
V8_INLINE void VisitPointerImpl(HeapObject host, TSlot p);
template <typename TSlot>
V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end);
V8_INLINE void VisitDescriptors(DescriptorArray descriptors,
int number_of_own_descriptors);
template <typename T>
int VisitEmbedderTracingSubclass(Map map, T object);
V8_INLINE int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
MemoryChunk* chunk);
// Marks the descriptor array black without pushing it on the marking work
// list and visits its header.
V8_INLINE void MarkDescriptorArrayBlack(HeapObject host,
DescriptorArray descriptors);
// Marks the object grey and pushes it on the marking work list.
V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
MarkingWorklist* const marking_worklist_;
EmbedderTracingWorklist* const embedder_worklist_;
WeakObjects* const weak_objects_;
const int task_id_;
const unsigned mark_compact_epoch_;
const BytecodeFlushMode bytecode_flush_mode_;
const bool is_embedder_tracing_enabled_;
const bool is_forced_gc_;
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_MARKING_VISITOR_H_
...@@ -982,7 +982,8 @@ class Map : public HeapObject { ...@@ -982,7 +982,8 @@ class Map : public HeapObject {
static const int kMaxFastProperties = 128; static const int kMaxFastProperties = 128;
friend class MapUpdater; friend class MapUpdater;
friend class ConcurrentMarkingVisitor; template <typename ConcreteVisitor, typename MarkingState>
friend class MarkingVisitorBase;
OBJECT_CONSTRUCTORS(Map, HeapObject); OBJECT_CONSTRUCTORS(Map, HeapObject);
}; };
......
...@@ -18,10 +18,8 @@ namespace v8 { ...@@ -18,10 +18,8 @@ namespace v8 {
namespace internal { namespace internal {
namespace heap { namespace heap {
void PublishSegment(ConcurrentMarking::MarkingWorklist* worklist, void PublishSegment(MarkingWorklist* worklist, HeapObject object) {
HeapObject object) { for (size_t i = 0; i <= MarkingWorklist::kSegmentCapacity; i++) {
for (size_t i = 0; i <= ConcurrentMarking::MarkingWorklist::kSegmentCapacity;
i++) {
worklist->Push(0, object); worklist->Push(0, object);
} }
CHECK(worklist->Pop(0, &object)); CHECK(worklist->Pop(0, &object));
...@@ -38,11 +36,11 @@ TEST(ConcurrentMarking) { ...@@ -38,11 +36,11 @@ TEST(ConcurrentMarking) {
collector->EnsureSweepingCompleted(); collector->EnsureSweepingCompleted();
} }
ConcurrentMarking::MarkingWorklist shared, on_hold; MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects; EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects; WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking( ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &on_hold, &weak_objects, &embedder_objects); heap, &shared, &on_hold, &embedder_objects, &weak_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value()); PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks(); concurrent_marking->ScheduleTasks();
concurrent_marking->Stop( concurrent_marking->Stop(
...@@ -61,11 +59,11 @@ TEST(ConcurrentMarkingReschedule) { ...@@ -61,11 +59,11 @@ TEST(ConcurrentMarkingReschedule) {
collector->EnsureSweepingCompleted(); collector->EnsureSweepingCompleted();
} }
ConcurrentMarking::MarkingWorklist shared, on_hold; MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects; EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects; WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking( ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &on_hold, &weak_objects, &embedder_objects); heap, &shared, &on_hold, &embedder_objects, &weak_objects);
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value()); PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks(); concurrent_marking->ScheduleTasks();
concurrent_marking->Stop( concurrent_marking->Stop(
...@@ -88,11 +86,11 @@ TEST(ConcurrentMarkingPreemptAndReschedule) { ...@@ -88,11 +86,11 @@ TEST(ConcurrentMarkingPreemptAndReschedule) {
collector->EnsureSweepingCompleted(); collector->EnsureSweepingCompleted();
} }
ConcurrentMarking::MarkingWorklist shared, on_hold; MarkingWorklist shared, on_hold;
ConcurrentMarking::EmbedderTracingWorklist embedder_objects; EmbedderTracingWorklist embedder_objects;
WeakObjects weak_objects; WeakObjects weak_objects;
ConcurrentMarking* concurrent_marking = new ConcurrentMarking( ConcurrentMarking* concurrent_marking = new ConcurrentMarking(
heap, &shared, &on_hold, &weak_objects, &embedder_objects); heap, &shared, &on_hold, &embedder_objects, &weak_objects);
for (int i = 0; i < 5000; i++) for (int i = 0; i < 5000; i++)
PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value()); PublishSegment(&shared, ReadOnlyRoots(heap).undefined_value());
concurrent_marking->ScheduleTasks(); concurrent_marking->ScheduleTasks();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment