Commit 2c9fc184 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Refactor MC and introduce MarkCompactCollectorBase

- Dispatch evacuation of a page to its concrete evacuator.
- Create MC base class to accommodate shared state.

BUG=chromium:651354

Review-Url: https://codereview.chromium.org/2828323004
Cr-Commit-Position: refs/heads/master@{#44807}
parent 852a20b0
......@@ -6404,5 +6404,23 @@ int Heap::GetStaticVisitorIdForMap(Map* map) {
return StaticVisitorBase::GetVisitorId(map);
}
const char* AllocationSpaceName(AllocationSpace space) {
switch (space) {
case NEW_SPACE:
return "NEW_SPACE";
case OLD_SPACE:
return "OLD_SPACE";
case CODE_SPACE:
return "CODE_SPACE";
case MAP_SPACE:
return "MAP_SPACE";
case LO_SPACE:
return "LO_SPACE";
default:
UNREACHABLE();
}
return NULL;
}
} // namespace internal
} // namespace v8
......@@ -2655,6 +2655,8 @@ class AllocationObserver {
DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
};
V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
} // namespace internal
} // namespace v8
......
......@@ -283,12 +283,39 @@ class FullEvacuationVerifier : public EvacuationVerifier {
#endif // VERIFY_HEAP
// =============================================================================
// MarkCompactCollector
// MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
// =============================================================================
int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(
int pages, intptr_t live_bytes) {
if (!FLAG_parallel_compaction) return 1;
// Compute the number of needed tasks based on a target compaction time, the
// profiled compaction speed and marked live memory.
//
// The number of parallel compaction tasks is limited by:
// - #evacuation pages
// - #cores
const double kTargetCompactionTimeInMs = .5;
double compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
const int available_cores = Max(
1, static_cast<int>(
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
int tasks;
if (compaction_speed > 0) {
tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
kTargetCompactionTimeInMs);
} else {
tasks = pages;
}
const int tasks_capped_pages = Min(pages, tasks);
return Min(available_cores, tasks_capped_pages);
}
MarkCompactCollector::MarkCompactCollector(Heap* heap)
: // NOLINT
heap_(heap),
: MarkCompactCollectorBase(heap),
page_parallel_job_semaphore_(0),
#ifdef DEBUG
state_(IDLE),
......@@ -616,25 +643,6 @@ bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() {
return num_sweeping_tasks_.Value() != 0;
}
const char* AllocationSpaceName(AllocationSpace space) {
switch (space) {
case NEW_SPACE:
return "NEW_SPACE";
case OLD_SPACE:
return "OLD_SPACE";
case CODE_SPACE:
return "CODE_SPACE";
case MAP_SPACE:
return "MAP_SPACE";
case LO_SPACE:
return "LO_SPACE";
default:
UNREACHABLE();
}
return NULL;
}
void MarkCompactCollector::ComputeEvacuationHeuristics(
size_t area_size, int* target_fragmentation_percent,
size_t* max_evacuated_bytes) {
......@@ -3140,7 +3148,7 @@ class Evacuator : public Malloced {
virtual ~Evacuator() {}
virtual bool EvacuatePage(Page* page, const MarkingState& state) = 0;
bool EvacuatePage(Page* page, const MarkingState& state);
// Merge back locally cached info sequentially. Note that this method needs
// to be called from the main thread.
......@@ -3152,6 +3160,8 @@ class Evacuator : public Malloced {
protected:
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
virtual bool RawEvacuatePage(Page* page, const MarkingState& state) = 0;
inline Heap* heap() { return heap_; }
void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
......@@ -3178,6 +3188,34 @@ class Evacuator : public Malloced {
intptr_t bytes_compacted_;
};
bool Evacuator::EvacuatePage(Page* page, const MarkingState& state) {
bool success = false;
DCHECK(page->SweepingDone());
intptr_t saved_live_bytes = state.live_bytes();
double evacuation_time = 0.0;
{
AlwaysAllocateScope always_allocate(heap()->isolate());
TimedScope timed_scope(&evacuation_time);
success = RawEvacuatePage(page, state);
}
ReportCompactionProgress(evacuation_time, saved_live_bytes);
if (FLAG_trace_evacuation) {
PrintIsolate(
heap()->isolate(),
"evacuation[%p]: page=%p new_space=%d "
"page_evacuation=%d executable=%d contains_age_mark=%d "
"live_bytes=%" V8PRIdPTR " time=%f page_promotion_qualifies=%d\n",
static_cast<void*>(this), static_cast<void*>(page), page->InNewSpace(),
page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
page->Contains(heap()->new_space()->age_mark()), saved_live_bytes,
evacuation_time,
saved_live_bytes > Evacuator::PageEvacuationThreshold());
}
return success;
}
void Evacuator::Finalize() {
heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
heap()->code_space()->MergeCompactionSpace(
......@@ -3201,23 +3239,17 @@ class FullEvacuator : public Evacuator {
FullEvacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
: Evacuator(heap, record_visitor) {}
bool EvacuatePage(Page* page, const MarkingState& state) override;
protected:
bool RawEvacuatePage(Page* page, const MarkingState& state) override;
};
bool FullEvacuator::EvacuatePage(Page* page, const MarkingState& state) {
bool FullEvacuator::RawEvacuatePage(Page* page, const MarkingState& state) {
bool success = false;
DCHECK(page->SweepingDone());
intptr_t saved_live_bytes = state.live_bytes();
double evacuation_time = 0.0;
{
AlwaysAllocateScope always_allocate(heap()->isolate());
TimedScope timed_scope(&evacuation_time);
LiveObjectVisitor object_visitor;
switch (ComputeEvacuationMode(page)) {
case kObjectsNewToOld:
success =
object_visitor.VisitBlackObjects(page, state, &new_space_visitor_,
LiveObjectVisitor::kClearMarkbits);
success = object_visitor.VisitBlackObjects(
page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits);
DCHECK(success);
ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
......@@ -3241,9 +3273,8 @@ bool FullEvacuator::EvacuatePage(Page* page, const MarkingState& state) {
// ArrayBufferTracker will be updated during sweeping.
break;
case kObjectsOldToOld:
success =
object_visitor.VisitBlackObjects(page, state, &old_space_visitor_,
LiveObjectVisitor::kClearMarkbits);
success = object_visitor.VisitBlackObjects(
page, state, &old_space_visitor_, LiveObjectVisitor::kClearMarkbits);
if (!success) {
// Aborted compaction page. We have to record slots here, since we
// might not have recorded them in first place.
......@@ -3265,52 +3296,9 @@ bool FullEvacuator::EvacuatePage(Page* page, const MarkingState& state) {
}
break;
}
}
ReportCompactionProgress(evacuation_time, saved_live_bytes);
if (FLAG_trace_evacuation) {
PrintIsolate(heap()->isolate(),
"evacuation[%p]: page=%p new_space=%d "
"page_evacuation=%d executable=%d contains_age_mark=%d "
"live_bytes=%" V8PRIdPTR " time=%f\n",
static_cast<void*>(this), static_cast<void*>(page),
page->InNewSpace(),
page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
page->Contains(heap()->new_space()->age_mark()),
saved_live_bytes, evacuation_time);
}
return success;
}
int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
intptr_t live_bytes) {
if (!FLAG_parallel_compaction) return 1;
// Compute the number of needed tasks based on a target compaction time, the
// profiled compaction speed and marked live memory.
//
// The number of parallel compaction tasks is limited by:
// - #evacuation pages
// - #cores
const double kTargetCompactionTimeInMs = .5;
double compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
const int available_cores = Max(
1, static_cast<int>(
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
int tasks;
if (compaction_speed > 0) {
tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
kTargetCompactionTimeInMs);
} else {
tasks = pages;
}
const int tasks_capped_pages = Min(pages, tasks);
return Min(available_cores, tasks_capped_pages);
}
class EvacuationJobTraits {
public:
typedef int* PerPageData; // Pointer to number of aborted pages.
......
......@@ -17,20 +17,13 @@
namespace v8 {
namespace internal {
// Callback function, returns whether an object is alive. The heap size
// of the object is returned in size. It optionally updates the offset
// to the first live object in the page (only used for old and map objects).
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// Callback function to mark an object in a given heap.
typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
// Forward declarations.
class CodeFlusher;
class HeapObjectVisitor;
class MarkCompactCollector;
class MinorMarkCompactCollector;
class MarkingVisitor;
class ThreadLocalTop;
class ObjectMarking : public AllStatic {
public:
......@@ -310,10 +303,6 @@ class CodeFlusher {
DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
};
// Defined in isolate.h.
class ThreadLocalTop;
class MarkBitCellIterator BASE_EMBEDDED {
public:
MarkBitCellIterator(MemoryChunk* chunk, MarkingState state) : chunk_(chunk) {
......@@ -422,41 +411,58 @@ class LiveObjectVisitor BASE_EMBEDDED {
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
class MinorMarkCompactCollector {
// Base class for minor and full MC collectors.
class MarkCompactCollectorBase {
public:
explicit MinorMarkCompactCollector(Heap* heap)
: heap_(heap), marking_deque_(heap) {}
virtual ~MarkCompactCollectorBase() {}
virtual void SetUp() = 0;
virtual void TearDown() = 0;
virtual void CollectGarbage() = 0;
void SetUp();
void TearDown();
inline Heap* heap() const { return heap_; }
inline Isolate* isolate() { return heap()->isolate(); }
void CollectGarbage();
protected:
explicit MarkCompactCollectorBase(Heap* heap) : heap_(heap) {}
inline Heap* heap() const { return heap_; }
virtual void MarkLiveObjects() = 0;
// The number of parallel compaction tasks, including the main thread.
int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
Heap* heap_;
};
// Collector for young-generation only.
class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
public:
explicit MinorMarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap), marking_deque_(heap) {}
void SetUp() override;
void TearDown() override;
void CollectGarbage() override;
private:
class RootMarkingVisitor;
inline Isolate* isolate() { return heap()->isolate(); }
inline MarkingDeque* marking_deque() { return &marking_deque_; }
V8_INLINE void MarkObject(HeapObject* obj);
V8_INLINE void PushBlack(HeapObject* obj);
SlotCallbackResult CheckAndMarkObject(Heap* heap, Address slot_address);
void MarkLiveObjects();
void MarkLiveObjects() override;
void ProcessMarkingDeque();
void EmptyMarkingDeque();
Heap* heap_;
MarkingDeque marking_deque_;
friend class StaticYoungGenerationMarkingVisitor;
};
// -------------------------------------------------------------------------
// Mark-Compact collector
class MarkCompactCollector {
// Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase {
public:
class RootMarkingVisitor;
......@@ -543,12 +549,10 @@ class MarkCompactCollector {
static void Initialize();
static SlotCallbackResult CheckAndMarkObject(Heap* heap,
Address slot_address);
void SetUp();
void TearDown();
void SetUp() override;
void TearDown() override;
// Performs a global garbage collection.
void CollectGarbage() override;
void CollectEvacuationCandidates(PagedSpace* space);
......@@ -558,9 +562,6 @@ class MarkCompactCollector {
// choosing spaces to compact.
void Prepare();
// Performs a global garbage collection.
void CollectGarbage();
bool StartCompaction();
void AbortCompaction();
......@@ -573,7 +574,6 @@ class MarkCompactCollector {
static const uint32_t kSingleFreeEncoding = 0;
static const uint32_t kMultiFreeEncoding = 1;
inline Heap* heap() const { return heap_; }
inline Isolate* isolate() const;
CodeFlusher* code_flusher() { return code_flusher_; }
......@@ -659,23 +659,6 @@ class MarkCompactCollector {
// Finishes GC, performs heap verification if enabled.
void Finish();
// -----------------------------------------------------------------------
// Phase 1: Marking live objects.
//
// Before: The heap has been prepared for garbage collection by
// MarkCompactCollector::Prepare() and is otherwise in its
// normal state.
//
// After: Live objects are marked and non-live objects are unmarked.
friend class CodeMarkingVisitor;
friend class IncrementalMarkingMarkingVisitor;
friend class MarkCompactMarkingVisitor;
friend class MarkingVisitor;
friend class RecordMigratedSlotVisitor;
friend class SharedFunctionInfoMarkingVisitor;
friend class StaticYoungGenerationMarkingVisitor;
// Mark code objects that are active on the stack to prevent them
// from being flushed.
void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
......@@ -683,7 +666,7 @@ class MarkCompactCollector {
void PrepareForCodeFlushing();
// Marking operations for objects reachable from roots.
void MarkLiveObjects();
void MarkLiveObjects() override;
// Pushes a black object onto the marking stack and accounts for live bytes.
// Note that this assumes live bytes have not yet been counted.
......@@ -793,26 +776,12 @@ class MarkCompactCollector {
void EvacuateEpilogue();
void EvacuatePagesInParallel();
// The number of parallel compaction tasks, including the main thread.
int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
void EvacuateNewSpaceAndCandidates();
void UpdatePointersAfterEvacuation();
void ReleaseEvacuationCandidates();
#ifdef DEBUG
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);
friend class UnmarkObjectVisitor;
static void UnmarkObject(HeapObject* obj);
#endif
Heap* heap_;
base::Semaphore page_parallel_job_semaphore_;
#ifdef DEBUG
......@@ -854,11 +823,17 @@ class MarkCompactCollector {
Sweeper sweeper_;
friend class CodeMarkingVisitor;
friend class Heap;
friend class IncrementalMarkingMarkingVisitor;
friend class MarkCompactMarkingVisitor;
friend class MarkingVisitor;
friend class RecordMigratedSlotVisitor;
friend class SharedFunctionInfoMarkingVisitor;
friend class StaticYoungGenerationMarkingVisitor;
friend class StoreBuffer;
};
class EvacuationScope BASE_EMBEDDED {
public:
explicit EvacuationScope(MarkCompactCollector* collector)
......@@ -872,7 +847,6 @@ class EvacuationScope BASE_EMBEDDED {
MarkCompactCollector* collector_;
};
V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
} // namespace internal
} // namespace v8
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment