Commit 2c9fc184 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Refactor MC and introduce MarkCompactCollectorBase

- Dispatch evacuation of a page to its concrete evacuator.
- Create MC base class to accommodate shared state.

BUG=chromium:651354

Review-Url: https://codereview.chromium.org/2828323004
Cr-Commit-Position: refs/heads/master@{#44807}
parent 852a20b0
...@@ -6404,5 +6404,23 @@ int Heap::GetStaticVisitorIdForMap(Map* map) { ...@@ -6404,5 +6404,23 @@ int Heap::GetStaticVisitorIdForMap(Map* map) {
return StaticVisitorBase::GetVisitorId(map); return StaticVisitorBase::GetVisitorId(map);
} }
const char* AllocationSpaceName(AllocationSpace space) {
switch (space) {
case NEW_SPACE:
return "NEW_SPACE";
case OLD_SPACE:
return "OLD_SPACE";
case CODE_SPACE:
return "CODE_SPACE";
case MAP_SPACE:
return "MAP_SPACE";
case LO_SPACE:
return "LO_SPACE";
default:
UNREACHABLE();
}
return NULL;
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -2655,6 +2655,8 @@ class AllocationObserver { ...@@ -2655,6 +2655,8 @@ class AllocationObserver {
DISALLOW_COPY_AND_ASSIGN(AllocationObserver); DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
}; };
V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -283,12 +283,39 @@ class FullEvacuationVerifier : public EvacuationVerifier { ...@@ -283,12 +283,39 @@ class FullEvacuationVerifier : public EvacuationVerifier {
#endif // VERIFY_HEAP #endif // VERIFY_HEAP
// ============================================================================= // =============================================================================
// MarkCompactCollector // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
// ============================================================================= // =============================================================================
int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(
int pages, intptr_t live_bytes) {
if (!FLAG_parallel_compaction) return 1;
// Compute the number of needed tasks based on a target compaction time, the
// profiled compaction speed and marked live memory.
//
// The number of parallel compaction tasks is limited by:
// - #evacuation pages
// - #cores
const double kTargetCompactionTimeInMs = .5;
double compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
const int available_cores = Max(
1, static_cast<int>(
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
int tasks;
if (compaction_speed > 0) {
tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
kTargetCompactionTimeInMs);
} else {
tasks = pages;
}
const int tasks_capped_pages = Min(pages, tasks);
return Min(available_cores, tasks_capped_pages);
}
MarkCompactCollector::MarkCompactCollector(Heap* heap) MarkCompactCollector::MarkCompactCollector(Heap* heap)
: // NOLINT : MarkCompactCollectorBase(heap),
heap_(heap),
page_parallel_job_semaphore_(0), page_parallel_job_semaphore_(0),
#ifdef DEBUG #ifdef DEBUG
state_(IDLE), state_(IDLE),
...@@ -616,25 +643,6 @@ bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() { ...@@ -616,25 +643,6 @@ bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() {
return num_sweeping_tasks_.Value() != 0; return num_sweeping_tasks_.Value() != 0;
} }
const char* AllocationSpaceName(AllocationSpace space) {
switch (space) {
case NEW_SPACE:
return "NEW_SPACE";
case OLD_SPACE:
return "OLD_SPACE";
case CODE_SPACE:
return "CODE_SPACE";
case MAP_SPACE:
return "MAP_SPACE";
case LO_SPACE:
return "LO_SPACE";
default:
UNREACHABLE();
}
return NULL;
}
void MarkCompactCollector::ComputeEvacuationHeuristics( void MarkCompactCollector::ComputeEvacuationHeuristics(
size_t area_size, int* target_fragmentation_percent, size_t area_size, int* target_fragmentation_percent,
size_t* max_evacuated_bytes) { size_t* max_evacuated_bytes) {
...@@ -3140,7 +3148,7 @@ class Evacuator : public Malloced { ...@@ -3140,7 +3148,7 @@ class Evacuator : public Malloced {
virtual ~Evacuator() {} virtual ~Evacuator() {}
virtual bool EvacuatePage(Page* page, const MarkingState& state) = 0; bool EvacuatePage(Page* page, const MarkingState& state);
// Merge back locally cached info sequentially. Note that this method needs // Merge back locally cached info sequentially. Note that this method needs
// to be called from the main thread. // to be called from the main thread.
...@@ -3152,6 +3160,8 @@ class Evacuator : public Malloced { ...@@ -3152,6 +3160,8 @@ class Evacuator : public Malloced {
protected: protected:
static const int kInitialLocalPretenuringFeedbackCapacity = 256; static const int kInitialLocalPretenuringFeedbackCapacity = 256;
virtual bool RawEvacuatePage(Page* page, const MarkingState& state) = 0;
inline Heap* heap() { return heap_; } inline Heap* heap() { return heap_; }
void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
...@@ -3178,6 +3188,34 @@ class Evacuator : public Malloced { ...@@ -3178,6 +3188,34 @@ class Evacuator : public Malloced {
intptr_t bytes_compacted_; intptr_t bytes_compacted_;
}; };
bool Evacuator::EvacuatePage(Page* page, const MarkingState& state) {
bool success = false;
DCHECK(page->SweepingDone());
intptr_t saved_live_bytes = state.live_bytes();
double evacuation_time = 0.0;
{
AlwaysAllocateScope always_allocate(heap()->isolate());
TimedScope timed_scope(&evacuation_time);
success = RawEvacuatePage(page, state);
}
ReportCompactionProgress(evacuation_time, saved_live_bytes);
if (FLAG_trace_evacuation) {
PrintIsolate(
heap()->isolate(),
"evacuation[%p]: page=%p new_space=%d "
"page_evacuation=%d executable=%d contains_age_mark=%d "
"live_bytes=%" V8PRIdPTR " time=%f page_promotion_qualifies=%d\n",
static_cast<void*>(this), static_cast<void*>(page), page->InNewSpace(),
page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
page->Contains(heap()->new_space()->age_mark()), saved_live_bytes,
evacuation_time,
saved_live_bytes > Evacuator::PageEvacuationThreshold());
}
return success;
}
void Evacuator::Finalize() { void Evacuator::Finalize() {
heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
heap()->code_space()->MergeCompactionSpace( heap()->code_space()->MergeCompactionSpace(
...@@ -3201,23 +3239,17 @@ class FullEvacuator : public Evacuator { ...@@ -3201,23 +3239,17 @@ class FullEvacuator : public Evacuator {
FullEvacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor) FullEvacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
: Evacuator(heap, record_visitor) {} : Evacuator(heap, record_visitor) {}
bool EvacuatePage(Page* page, const MarkingState& state) override; protected:
bool RawEvacuatePage(Page* page, const MarkingState& state) override;
}; };
bool FullEvacuator::EvacuatePage(Page* page, const MarkingState& state) { bool FullEvacuator::RawEvacuatePage(Page* page, const MarkingState& state) {
bool success = false; bool success = false;
DCHECK(page->SweepingDone());
intptr_t saved_live_bytes = state.live_bytes();
double evacuation_time = 0.0;
{
AlwaysAllocateScope always_allocate(heap()->isolate());
TimedScope timed_scope(&evacuation_time);
LiveObjectVisitor object_visitor; LiveObjectVisitor object_visitor;
switch (ComputeEvacuationMode(page)) { switch (ComputeEvacuationMode(page)) {
case kObjectsNewToOld: case kObjectsNewToOld:
success = success = object_visitor.VisitBlackObjects(
object_visitor.VisitBlackObjects(page, state, &new_space_visitor_, page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits);
LiveObjectVisitor::kClearMarkbits);
DCHECK(success); DCHECK(success);
ArrayBufferTracker::ProcessBuffers( ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedRemoveOthers); page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
...@@ -3241,9 +3273,8 @@ bool FullEvacuator::EvacuatePage(Page* page, const MarkingState& state) { ...@@ -3241,9 +3273,8 @@ bool FullEvacuator::EvacuatePage(Page* page, const MarkingState& state) {
// ArrayBufferTracker will be updated during sweeping. // ArrayBufferTracker will be updated during sweeping.
break; break;
case kObjectsOldToOld: case kObjectsOldToOld:
success = success = object_visitor.VisitBlackObjects(
object_visitor.VisitBlackObjects(page, state, &old_space_visitor_, page, state, &old_space_visitor_, LiveObjectVisitor::kClearMarkbits);
LiveObjectVisitor::kClearMarkbits);
if (!success) { if (!success) {
// Aborted compaction page. We have to record slots here, since we // Aborted compaction page. We have to record slots here, since we
// might not have recorded them in first place. // might not have recorded them in first place.
...@@ -3265,52 +3296,9 @@ bool FullEvacuator::EvacuatePage(Page* page, const MarkingState& state) { ...@@ -3265,52 +3296,9 @@ bool FullEvacuator::EvacuatePage(Page* page, const MarkingState& state) {
} }
break; break;
} }
}
ReportCompactionProgress(evacuation_time, saved_live_bytes);
if (FLAG_trace_evacuation) {
PrintIsolate(heap()->isolate(),
"evacuation[%p]: page=%p new_space=%d "
"page_evacuation=%d executable=%d contains_age_mark=%d "
"live_bytes=%" V8PRIdPTR " time=%f\n",
static_cast<void*>(this), static_cast<void*>(page),
page->InNewSpace(),
page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
page->Contains(heap()->new_space()->age_mark()),
saved_live_bytes, evacuation_time);
}
return success; return success;
} }
int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
intptr_t live_bytes) {
if (!FLAG_parallel_compaction) return 1;
// Compute the number of needed tasks based on a target compaction time, the
// profiled compaction speed and marked live memory.
//
// The number of parallel compaction tasks is limited by:
// - #evacuation pages
// - #cores
const double kTargetCompactionTimeInMs = .5;
double compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
const int available_cores = Max(
1, static_cast<int>(
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
int tasks;
if (compaction_speed > 0) {
tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
kTargetCompactionTimeInMs);
} else {
tasks = pages;
}
const int tasks_capped_pages = Min(pages, tasks);
return Min(available_cores, tasks_capped_pages);
}
class EvacuationJobTraits { class EvacuationJobTraits {
public: public:
typedef int* PerPageData; // Pointer to number of aborted pages. typedef int* PerPageData; // Pointer to number of aborted pages.
......
...@@ -17,20 +17,13 @@ ...@@ -17,20 +17,13 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// Callback function, returns whether an object is alive. The heap size
// of the object is returned in size. It optionally updates the offset
// to the first live object in the page (only used for old and map objects).
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// Callback function to mark an object in a given heap.
typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
// Forward declarations. // Forward declarations.
class CodeFlusher; class CodeFlusher;
class HeapObjectVisitor; class HeapObjectVisitor;
class MarkCompactCollector; class MarkCompactCollector;
class MinorMarkCompactCollector; class MinorMarkCompactCollector;
class MarkingVisitor; class MarkingVisitor;
class ThreadLocalTop;
class ObjectMarking : public AllStatic { class ObjectMarking : public AllStatic {
public: public:
...@@ -310,10 +303,6 @@ class CodeFlusher { ...@@ -310,10 +303,6 @@ class CodeFlusher {
DISALLOW_COPY_AND_ASSIGN(CodeFlusher); DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
}; };
// Defined in isolate.h.
class ThreadLocalTop;
class MarkBitCellIterator BASE_EMBEDDED { class MarkBitCellIterator BASE_EMBEDDED {
public: public:
MarkBitCellIterator(MemoryChunk* chunk, MarkingState state) : chunk_(chunk) { MarkBitCellIterator(MemoryChunk* chunk, MarkingState state) : chunk_(chunk) {
...@@ -422,41 +411,58 @@ class LiveObjectVisitor BASE_EMBEDDED { ...@@ -422,41 +411,58 @@ class LiveObjectVisitor BASE_EMBEDDED {
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD }; enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
class MinorMarkCompactCollector { // Base class for minor and full MC collectors.
class MarkCompactCollectorBase {
public: public:
explicit MinorMarkCompactCollector(Heap* heap) virtual ~MarkCompactCollectorBase() {}
: heap_(heap), marking_deque_(heap) {} virtual void SetUp() = 0;
virtual void TearDown() = 0;
virtual void CollectGarbage() = 0;
void SetUp(); inline Heap* heap() const { return heap_; }
void TearDown(); inline Isolate* isolate() { return heap()->isolate(); }
void CollectGarbage(); protected:
explicit MarkCompactCollectorBase(Heap* heap) : heap_(heap) {}
inline Heap* heap() const { return heap_; } virtual void MarkLiveObjects() = 0;
// The number of parallel compaction tasks, including the main thread.
int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
Heap* heap_;
};
// Collector for young-generation only.
class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
public:
explicit MinorMarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap), marking_deque_(heap) {}
void SetUp() override;
void TearDown() override;
void CollectGarbage() override;
private: private:
class RootMarkingVisitor; class RootMarkingVisitor;
inline Isolate* isolate() { return heap()->isolate(); }
inline MarkingDeque* marking_deque() { return &marking_deque_; } inline MarkingDeque* marking_deque() { return &marking_deque_; }
V8_INLINE void MarkObject(HeapObject* obj); V8_INLINE void MarkObject(HeapObject* obj);
V8_INLINE void PushBlack(HeapObject* obj); V8_INLINE void PushBlack(HeapObject* obj);
SlotCallbackResult CheckAndMarkObject(Heap* heap, Address slot_address); SlotCallbackResult CheckAndMarkObject(Heap* heap, Address slot_address);
void MarkLiveObjects(); void MarkLiveObjects() override;
void ProcessMarkingDeque(); void ProcessMarkingDeque();
void EmptyMarkingDeque(); void EmptyMarkingDeque();
Heap* heap_;
MarkingDeque marking_deque_; MarkingDeque marking_deque_;
friend class StaticYoungGenerationMarkingVisitor; friend class StaticYoungGenerationMarkingVisitor;
}; };
// ------------------------------------------------------------------------- // Collector for young and old generation.
// Mark-Compact collector class MarkCompactCollector final : public MarkCompactCollectorBase {
class MarkCompactCollector {
public: public:
class RootMarkingVisitor; class RootMarkingVisitor;
...@@ -543,12 +549,10 @@ class MarkCompactCollector { ...@@ -543,12 +549,10 @@ class MarkCompactCollector {
static void Initialize(); static void Initialize();
static SlotCallbackResult CheckAndMarkObject(Heap* heap, void SetUp() override;
Address slot_address); void TearDown() override;
// Performs a global garbage collection.
void SetUp(); void CollectGarbage() override;
void TearDown();
void CollectEvacuationCandidates(PagedSpace* space); void CollectEvacuationCandidates(PagedSpace* space);
...@@ -558,9 +562,6 @@ class MarkCompactCollector { ...@@ -558,9 +562,6 @@ class MarkCompactCollector {
// choosing spaces to compact. // choosing spaces to compact.
void Prepare(); void Prepare();
// Performs a global garbage collection.
void CollectGarbage();
bool StartCompaction(); bool StartCompaction();
void AbortCompaction(); void AbortCompaction();
...@@ -573,7 +574,6 @@ class MarkCompactCollector { ...@@ -573,7 +574,6 @@ class MarkCompactCollector {
static const uint32_t kSingleFreeEncoding = 0; static const uint32_t kSingleFreeEncoding = 0;
static const uint32_t kMultiFreeEncoding = 1; static const uint32_t kMultiFreeEncoding = 1;
inline Heap* heap() const { return heap_; }
inline Isolate* isolate() const; inline Isolate* isolate() const;
CodeFlusher* code_flusher() { return code_flusher_; } CodeFlusher* code_flusher() { return code_flusher_; }
...@@ -659,23 +659,6 @@ class MarkCompactCollector { ...@@ -659,23 +659,6 @@ class MarkCompactCollector {
// Finishes GC, performs heap verification if enabled. // Finishes GC, performs heap verification if enabled.
void Finish(); void Finish();
// -----------------------------------------------------------------------
// Phase 1: Marking live objects.
//
// Before: The heap has been prepared for garbage collection by
// MarkCompactCollector::Prepare() and is otherwise in its
// normal state.
//
// After: Live objects are marked and non-live objects are unmarked.
friend class CodeMarkingVisitor;
friend class IncrementalMarkingMarkingVisitor;
friend class MarkCompactMarkingVisitor;
friend class MarkingVisitor;
friend class RecordMigratedSlotVisitor;
friend class SharedFunctionInfoMarkingVisitor;
friend class StaticYoungGenerationMarkingVisitor;
// Mark code objects that are active on the stack to prevent them // Mark code objects that are active on the stack to prevent them
// from being flushed. // from being flushed.
void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top); void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
...@@ -683,7 +666,7 @@ class MarkCompactCollector { ...@@ -683,7 +666,7 @@ class MarkCompactCollector {
void PrepareForCodeFlushing(); void PrepareForCodeFlushing();
// Marking operations for objects reachable from roots. // Marking operations for objects reachable from roots.
void MarkLiveObjects(); void MarkLiveObjects() override;
// Pushes a black object onto the marking stack and accounts for live bytes. // Pushes a black object onto the marking stack and accounts for live bytes.
// Note that this assumes live bytes have not yet been counted. // Note that this assumes live bytes have not yet been counted.
...@@ -793,26 +776,12 @@ class MarkCompactCollector { ...@@ -793,26 +776,12 @@ class MarkCompactCollector {
void EvacuateEpilogue(); void EvacuateEpilogue();
void EvacuatePagesInParallel(); void EvacuatePagesInParallel();
// The number of parallel compaction tasks, including the main thread.
int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
void EvacuateNewSpaceAndCandidates(); void EvacuateNewSpaceAndCandidates();
void UpdatePointersAfterEvacuation(); void UpdatePointersAfterEvacuation();
void ReleaseEvacuationCandidates(); void ReleaseEvacuationCandidates();
#ifdef DEBUG
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);
friend class UnmarkObjectVisitor;
static void UnmarkObject(HeapObject* obj);
#endif
Heap* heap_;
base::Semaphore page_parallel_job_semaphore_; base::Semaphore page_parallel_job_semaphore_;
#ifdef DEBUG #ifdef DEBUG
...@@ -854,11 +823,17 @@ class MarkCompactCollector { ...@@ -854,11 +823,17 @@ class MarkCompactCollector {
Sweeper sweeper_; Sweeper sweeper_;
friend class CodeMarkingVisitor;
friend class Heap; friend class Heap;
friend class IncrementalMarkingMarkingVisitor;
friend class MarkCompactMarkingVisitor;
friend class MarkingVisitor;
friend class RecordMigratedSlotVisitor;
friend class SharedFunctionInfoMarkingVisitor;
friend class StaticYoungGenerationMarkingVisitor;
friend class StoreBuffer; friend class StoreBuffer;
}; };
class EvacuationScope BASE_EMBEDDED { class EvacuationScope BASE_EMBEDDED {
public: public:
explicit EvacuationScope(MarkCompactCollector* collector) explicit EvacuationScope(MarkCompactCollector* collector)
...@@ -872,7 +847,6 @@ class EvacuationScope BASE_EMBEDDED { ...@@ -872,7 +847,6 @@ class EvacuationScope BASE_EMBEDDED {
MarkCompactCollector* collector_; MarkCompactCollector* collector_;
}; };
V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment