Commit ad815a7b authored by ulan's avatar ulan Committed by Commit bot

[heap] Refactor marking deque.

This patch moves management of marking deque backing store into the
MarkingDeque class, which will simplify unmapping of backing store in
concurrent task.

BUG=

Review-Url: https://codereview.chromium.org/2439063002
Cr-Commit-Position: refs/heads/master@{#40523}
parent 231c8ac0
...@@ -1457,11 +1457,7 @@ void Heap::MarkCompactEpilogue() { ...@@ -1457,11 +1457,7 @@ void Heap::MarkCompactEpilogue() {
PreprocessStackTraces(); PreprocessStackTraces();
DCHECK(incremental_marking()->IsStopped()); DCHECK(incremental_marking()->IsStopped());
// We finished a marking cycle. We can uncommit the marking deque until mark_compact_collector()->marking_deque()->StopUsing();
// we start marking again.
mark_compact_collector()->marking_deque()->Uninitialize();
mark_compact_collector()->EnsureMarkingDequeIsCommitted(
MarkCompactCollector::kMinMarkingDequeSize);
} }
......
...@@ -538,8 +538,7 @@ void IncrementalMarking::StartMarking() { ...@@ -538,8 +538,7 @@ void IncrementalMarking::StartMarking() {
PatchIncrementalMarkingRecordWriteStubs(heap_, mode); PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize( heap_->mark_compact_collector()->marking_deque()->StartUsing();
MarkCompactCollector::kMaxMarkingDequeSize);
ActivateIncrementalWriteBarrier(); ActivateIncrementalWriteBarrier();
......
...@@ -58,8 +58,6 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) ...@@ -58,8 +58,6 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
compacting_(false), compacting_(false),
black_allocation_(false), black_allocation_(false),
have_code_to_deoptimize_(false), have_code_to_deoptimize_(false),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(0),
code_flusher_(nullptr), code_flusher_(nullptr),
sweeper_(heap) { sweeper_(heap) {
} }
...@@ -240,9 +238,7 @@ void MarkCompactCollector::SetUp() { ...@@ -240,9 +238,7 @@ void MarkCompactCollector::SetUp() {
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0); DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0); DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
marking_deque()->SetUp();
EnsureMarkingDequeIsReserved();
EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
if (FLAG_flush_code) { if (FLAG_flush_code) {
code_flusher_ = new CodeFlusher(isolate()); code_flusher_ = new CodeFlusher(isolate());
...@@ -255,7 +251,7 @@ void MarkCompactCollector::SetUp() { ...@@ -255,7 +251,7 @@ void MarkCompactCollector::SetUp() {
void MarkCompactCollector::TearDown() { void MarkCompactCollector::TearDown() {
AbortCompaction(); AbortCompaction();
delete marking_deque_memory_; marking_deque()->TearDown();
delete code_flusher_; delete code_flusher_;
} }
...@@ -783,6 +779,10 @@ void MarkCompactCollector::Prepare() { ...@@ -783,6 +779,10 @@ void MarkCompactCollector::Prepare() {
EnsureSweepingCompleted(); EnsureSweepingCompleted();
} }
if (heap()->incremental_marking()->IsSweeping()) {
heap()->incremental_marking()->Stop();
}
// If concurrent unmapping tasks are still running, we should wait for // If concurrent unmapping tasks are still running, we should wait for
// them here. // them here.
heap()->memory_allocator()->unmapper()->WaitUntilCompleted(); heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
...@@ -799,6 +799,7 @@ void MarkCompactCollector::Prepare() { ...@@ -799,6 +799,7 @@ void MarkCompactCollector::Prepare() {
if (heap_->UsingEmbedderHeapTracer()) { if (heap_->UsingEmbedderHeapTracer()) {
heap_->embedder_heap_tracer()->AbortTracing(); heap_->embedder_heap_tracer()->AbortTracing();
} }
marking_deque()->Clear();
was_marked_incrementally_ = false; was_marked_incrementally_ = false;
} }
...@@ -2106,85 +2107,70 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) { ...@@ -2106,85 +2107,70 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
} }
} }
void MarkingDeque::SetUp() {
void MarkCompactCollector::EnsureMarkingDequeIsReserved() { backing_store_ = new base::VirtualMemory(kMaxSize);
DCHECK(!marking_deque()->in_use()); backing_store_committed_size_ = 0;
if (marking_deque_memory_ == NULL) { if (backing_store_ == nullptr) {
marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize); V8::FatalProcessOutOfMemory("MarkingDeque::SetUp");
marking_deque_memory_committed_ = 0;
}
if (marking_deque_memory_ == NULL) {
V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
} }
} }
void MarkingDeque::TearDown() { delete backing_store_; }
void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) { void MarkingDeque::StartUsing() {
// If the marking deque is too small, we try to allocate a bigger one. if (in_use_) {
// If that fails, make do with a smaller one. // This can happen in mark-compact GC if the incremental marker already
CHECK(!marking_deque()->in_use()); // started using the marking deque.
for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) { return;
base::VirtualMemory* memory = marking_deque_memory_;
size_t currently_committed = marking_deque_memory_committed_;
if (currently_committed == size) return;
if (currently_committed > size) {
bool success = marking_deque_memory_->Uncommit(
reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
currently_committed - size);
if (success) {
marking_deque_memory_committed_ = size;
return;
}
UNREACHABLE();
}
bool success = memory->Commit(
reinterpret_cast<Address>(memory->address()) + currently_committed,
size - currently_committed,
false); // Not executable.
if (success) {
marking_deque_memory_committed_ = size;
return;
}
} }
V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted"); in_use_ = true;
EnsureCommitted();
array_ = reinterpret_cast<HeapObject**>(backing_store_->address());
size_t size = FLAG_force_marking_deque_overflows
? 64 * kPointerSize
: backing_store_committed_size_;
DCHECK(
base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize)));
mask_ = static_cast<int>((size / kPointerSize) - 1);
top_ = bottom_ = 0;
overflowed_ = false;
} }
void MarkingDeque::StopUsing() {
void MarkCompactCollector::InitializeMarkingDeque() { DCHECK(IsEmpty());
DCHECK(!marking_deque()->in_use()); DCHECK(!overflowed_);
DCHECK(marking_deque_memory_committed_ > 0); top_ = bottom_ = mask_ = 0;
Address addr = static_cast<Address>(marking_deque_memory_->address()); Uncommit();
size_t size = marking_deque_memory_committed_; in_use_ = false;
if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
marking_deque()->Initialize(addr, addr + size);
} }
void MarkingDeque::Clear() {
void MarkingDeque::Initialize(Address low, Address high) { DCHECK(in_use_);
DCHECK(!in_use_);
HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
array_ = obj_low;
mask_ = base::bits::RoundDownToPowerOfTwo32(
static_cast<uint32_t>(obj_high - obj_low)) -
1;
top_ = bottom_ = 0; top_ = bottom_ = 0;
overflowed_ = false; overflowed_ = false;
in_use_ = true;
} }
void MarkingDeque::Uncommit() {
DCHECK(in_use_);
bool success = backing_store_->Uncommit(backing_store_->address(),
backing_store_committed_size_);
backing_store_committed_size_ = 0;
CHECK(success);
}
void MarkingDeque::EnsureCommitted() {
DCHECK(in_use_);
if (backing_store_committed_size_ > 0) return;
void MarkingDeque::Uninitialize(bool aborting) { for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
if (!aborting) { if (backing_store_->Commit(backing_store_->address(), size, false)) {
DCHECK(IsEmpty()); backing_store_committed_size_ = size;
DCHECK(!overflowed_); break;
}
}
if (backing_store_committed_size_ == 0) {
V8::FatalProcessOutOfMemory("MarkingDeque::EnsureCommitted");
} }
DCHECK(in_use_);
top_ = bottom_ = 0xdecbad;
in_use_ = false;
} }
class MarkCompactCollector::ObjectStatsVisitor class MarkCompactCollector::ObjectStatsVisitor
...@@ -2260,11 +2246,7 @@ void MarkCompactCollector::MarkLiveObjects() { ...@@ -2260,11 +2246,7 @@ void MarkCompactCollector::MarkLiveObjects() {
if (was_marked_incrementally_) { if (was_marked_incrementally_) {
incremental_marking->Finalize(); incremental_marking->Finalize();
} else { } else {
// Abort any pending incremental activities e.g. incremental sweeping. CHECK(incremental_marking->IsStopped());
incremental_marking->Stop();
if (marking_deque()->in_use()) {
marking_deque()->Uninitialize(true);
}
} }
} }
...@@ -2273,8 +2255,7 @@ void MarkCompactCollector::MarkLiveObjects() { ...@@ -2273,8 +2255,7 @@ void MarkCompactCollector::MarkLiveObjects() {
state_ = MARK_LIVE_OBJECTS; state_ = MARK_LIVE_OBJECTS;
#endif #endif
EnsureMarkingDequeIsCommittedAndInitialize( marking_deque()->StartUsing();
MarkCompactCollector::kMaxMarkingDequeSize);
{ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
......
...@@ -53,15 +53,21 @@ class ObjectMarking : public AllStatic { ...@@ -53,15 +53,21 @@ class ObjectMarking : public AllStatic {
class MarkingDeque { class MarkingDeque {
public: public:
MarkingDeque() MarkingDeque()
: array_(NULL), : backing_store_(nullptr),
backing_store_committed_size_(0),
array_(nullptr),
top_(0), top_(0),
bottom_(0), bottom_(0),
mask_(0), mask_(0),
overflowed_(false), overflowed_(false),
in_use_(false) {} in_use_(false) {}
void Initialize(Address low, Address high); void SetUp();
void Uninitialize(bool aborting = false); void TearDown();
void StartUsing();
void StopUsing();
void Clear();
inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; } inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
...@@ -69,8 +75,6 @@ class MarkingDeque { ...@@ -69,8 +75,6 @@ class MarkingDeque {
bool overflowed() const { return overflowed_; } bool overflowed() const { return overflowed_; }
bool in_use() const { return in_use_; }
void ClearOverflowed() { overflowed_ = false; } void ClearOverflowed() { overflowed_ = false; }
void SetOverflowed() { overflowed_ = true; } void SetOverflowed() { overflowed_ = true; }
...@@ -118,6 +122,14 @@ class MarkingDeque { ...@@ -118,6 +122,14 @@ class MarkingDeque {
void set_top(int top) { top_ = top; } void set_top(int top) { top_ = top; }
private: private:
static const size_t kMaxSize = 4 * MB;
static const size_t kMinSize = 256 * KB;
void EnsureCommitted();
void Uncommit();
base::VirtualMemory* backing_store_;
size_t backing_store_committed_size_;
HeapObject** array_; HeapObject** array_;
// array_[(top - 1) & mask_] is the top element in the deque. The Deque is // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
// empty when top_ == bottom_. It is full when top_ + 1 == bottom // empty when top_ == bottom_. It is full when top_ + 1 == bottom
...@@ -470,21 +482,6 @@ class MarkCompactCollector { ...@@ -470,21 +482,6 @@ class MarkCompactCollector {
MarkingDeque* marking_deque() { return &marking_deque_; } MarkingDeque* marking_deque() { return &marking_deque_; }
static const size_t kMaxMarkingDequeSize = 4 * MB;
static const size_t kMinMarkingDequeSize = 256 * KB;
void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
if (!marking_deque()->in_use()) {
EnsureMarkingDequeIsCommitted(max_size);
InitializeMarkingDeque();
}
}
void EnsureMarkingDequeIsCommitted(size_t max_size);
void EnsureMarkingDequeIsReserved();
void InitializeMarkingDeque();
Sweeper& sweeper() { return sweeper_; } Sweeper& sweeper() { return sweeper_; }
private: private:
...@@ -708,8 +705,6 @@ class MarkCompactCollector { ...@@ -708,8 +705,6 @@ class MarkCompactCollector {
bool have_code_to_deoptimize_; bool have_code_to_deoptimize_;
base::VirtualMemory* marking_deque_memory_;
size_t marking_deque_memory_committed_;
MarkingDeque marking_deque_; MarkingDeque marking_deque_;
CodeFlusher* code_flusher_; CodeFlusher* code_flusher_;
......
...@@ -51,13 +51,9 @@ using v8::Just; ...@@ -51,13 +51,9 @@ using v8::Just;
TEST(MarkingDeque) { TEST(MarkingDeque) {
CcTest::InitializeVM(); CcTest::InitializeVM();
int mem_size = 20 * kPointerSize;
byte* mem = NewArray<byte>(20*kPointerSize);
Address low = reinterpret_cast<Address>(mem);
Address high = low + mem_size;
MarkingDeque s; MarkingDeque s;
s.Initialize(low, high); s.SetUp();
s.StartUsing();
Address original_address = reinterpret_cast<Address>(&s); Address original_address = reinterpret_cast<Address>(&s);
Address current_address = original_address; Address current_address = original_address;
while (!s.IsFull()) { while (!s.IsFull()) {
...@@ -72,7 +68,8 @@ TEST(MarkingDeque) { ...@@ -72,7 +68,8 @@ TEST(MarkingDeque) {
} }
CHECK_EQ(original_address, current_address); CHECK_EQ(original_address, current_address);
DeleteArray(mem); s.StopUsing();
s.TearDown();
} }
TEST(Promotion) { TEST(Promotion) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment