Commit dc1906a7 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Optimize root visitor of MinorMC

The root visitor now collects marked roots in the marking worklist and
filters out objects that are not in the new space.

This reduces average marking time in MinorMC in Richards from 0.08ms
to 0.04ms:

baseline mark:
  len: 22
  min: 0.07
  max: 0.18
  avg: 0.0809090909091
  [0,5[: 22

mark
  len: 22
  min: 0.03
  max: 0.13
  avg: 0.0409090909091
  [0,5[: 22

Bug: chromium:651354
Change-Id: I979e2f5ba331f88029b69bab23978f7fcadb7024
Reviewed-on: https://chromium-review.googlesource.com/1055490
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53128}
parent be2f237d
......@@ -380,6 +380,16 @@ void MarkCompactCollector::MarkRootObject(Root root, HeapObject* obj) {
}
}
#ifdef ENABLE_MINOR_MC
void MinorMarkCompactCollector::MarkRootObject(HeapObject* obj) {
if (heap_->InNewSpace(obj) && non_atomic_marking_state_.WhiteToGrey(obj)) {
worklist_->Push(kMainThread, obj);
}
}
#endif
void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject* obj) {
if (marking_state()->WhiteToGrey(obj)) {
marking_worklist()->Push(obj);
......
......@@ -330,53 +330,6 @@ using MarkCompactMarkingVisitor =
namespace {
// This root visitor walks all roots and creates items bundling objects that
// are then processed later on. Slots have to be dereferenced as they could
// live on the native (C++) stack, which requires filtering out the indirection.
template <class BatchedItem>
class RootMarkingVisitorSeedOnly : public RootVisitor {
public:
explicit RootMarkingVisitorSeedOnly(ItemParallelJob* job) : job_(job) {
buffered_objects_.reserve(kBufferSize);
}
void VisitRootPointer(Root root, const char* description,
Object** p) override {
if (!(*p)->IsHeapObject()) return;
AddObject(*p);
}
void VisitRootPointers(Root root, const char* description, Object** start,
Object** end) override {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
AddObject(*p);
}
}
void FlushObjects() {
job_->AddItem(new BatchedItem(std::move(buffered_objects_)));
// Moving leaves the container in a valid but unspecified state. Reusing the
// container requires a call without precondition that resets the state.
buffered_objects_.clear();
buffered_objects_.reserve(kBufferSize);
}
private:
// Bundling several objects together in items avoids issues with allocating
// and deallocating items; both are operations that are performed on the main
// thread.
static const int kBufferSize = 128;
void AddObject(Object* object) {
buffered_objects_.push_back(object);
if (buffered_objects_.size() == kBufferSize) FlushObjects();
}
ItemParallelJob* job_;
std::vector<Object*> buffered_objects_;
};
int NumberOfAvailableCores() {
static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
// This number of cores should be greater than zero and never change.
......@@ -3668,35 +3621,25 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
public:
explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
: collector_(collector),
marking_state_(collector_->non_atomic_marking_state()) {}
: collector_(collector) {}
void VisitRootPointer(Root root, const char* description,
Object** p) override {
void VisitRootPointer(Root root, const char* description, Object** p) final {
MarkObjectByPointer(p);
}
void VisitRootPointers(Root root, const char* description, Object** start,
Object** end) override {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
Object** end) final {
for (Object** p = start; p < end; p++) {
MarkObjectByPointer(p);
}
}
private:
void MarkObjectByPointer(Object** p) {
V8_INLINE void MarkObjectByPointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
HeapObject* object = HeapObject::cast(*p);
if (!collector_->heap()->InNewSpace(object)) return;
if (marking_state_->WhiteToGrey(object)) {
collector_->main_marking_visitor()->Visit(object);
collector_->ProcessMarkingWorklist();
collector_->MarkRootObject(HeapObject::cast(*p));
}
}
MinorMarkCompactCollector* collector_;
MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
MinorMarkCompactCollector* const collector_;
};
void MinorMarkCompactCollector::CollectGarbage() {
......@@ -4010,24 +3953,6 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
};
class BatchedRootMarkingItem : public MarkingItem {
public:
explicit BatchedRootMarkingItem(std::vector<Object*>&& objects)
: objects_(objects) {}
virtual ~BatchedRootMarkingItem() {}
void Process(YoungGenerationMarkingTask* task) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"BatchedRootMarkingItem::Process");
for (Object* object : objects_) {
task->MarkObject(object);
}
}
private:
std::vector<Object*> objects_;
};
class PageMarkingItem : public MarkingItem {
public:
explicit PageMarkingItem(MemoryChunk* chunk,
......@@ -4133,7 +4058,8 @@ class GlobalHandlesMarkingItem : public MarkingItem {
size_t end_;
};
void MinorMarkCompactCollector::MarkRootSetInParallel() {
void MinorMarkCompactCollector::MarkRootSetInParallel(
RootMarkingVisitor* root_visitor) {
base::AtomicNumber<intptr_t> slots;
{
ItemParallelJob job(isolate()->cancelable_task_manager(),
......@@ -4142,10 +4068,7 @@ void MinorMarkCompactCollector::MarkRootSetInParallel() {
// Seed the root set (roots + old->new set).
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
// Create batches of roots.
RootMarkingVisitorSeedOnly<BatchedRootMarkingItem> root_seed_visitor(
&job);
heap()->IterateRoots(&root_seed_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
heap()->IterateRoots(root_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
// Create batches of global handles.
SeedGlobalHandles<GlobalHandlesMarkingItem>(isolate()->global_handles(),
&job);
......@@ -4154,8 +4077,6 @@ void MinorMarkCompactCollector::MarkRootSetInParallel() {
heap(), [&job, &slots](MemoryChunk* chunk) {
job.AddItem(new PageMarkingItem(chunk, &slots));
});
// Flush any remaining objects in the seeding visitor.
root_seed_visitor.FlushObjects();
}
// Add tasks and run in parallel.
......@@ -4182,7 +4103,7 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
RootMarkingVisitor root_visitor(this);
MarkRootSetInParallel();
MarkRootSetInParallel(&root_visitor);
// Mark rest on the main thread.
{
......
......@@ -260,6 +260,7 @@ class MarkCompactCollectorBase {
inline Isolate* isolate() { return heap()->isolate(); }
protected:
static const int kMainThread = 0;
explicit MarkCompactCollectorBase(Heap* heap)
: heap_(heap), old_to_new_slots_(0) {}
......@@ -439,8 +440,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
using MarkingState = MajorNonAtomicMarkingState;
#endif // V8_CONCURRENT_MARKING
using NonAtomicMarkingState = MajorNonAtomicMarkingState;
static const int kMainThread = 0;
// Wrapper for the shared and bailout worklists.
class MarkingWorklist {
public:
......@@ -931,7 +930,8 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
}
void MarkLiveObjects() override;
void MarkRootSetInParallel();
void MarkRootSetInParallel(RootMarkingVisitor* root_visitor);
V8_INLINE void MarkRootObject(HeapObject* obj);
void ProcessMarkingWorklist() override;
void ClearNonLiveReferences() override;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment