Commit 98f45a40 authored by erikcorry's avatar erikcorry Committed by Commit bot

Never uncommit the whole marking deque in case we can't get it back

R=hpayer@chromium.org
BUG=

Review URL: https://codereview.chromium.org/1133153003

Cr-Commit-Position: refs/heads/master@{#28822}
parent 03f4ddb1
...@@ -1233,9 +1233,17 @@ bool Heap::PerformGarbageCollection( ...@@ -1233,9 +1233,17 @@ bool Heap::PerformGarbageCollection(
isolate_->counters()->objs_since_last_young()->Set(0); isolate_->counters()->objs_since_last_young()->Set(0);
if (collector != SCAVENGER) {
// Callbacks that fire after this point might trigger nested GCs and // Callbacks that fire after this point might trigger nested GCs and
// restart incremental marking, the assertion can't be moved down. // restart incremental marking, the assertion can't be moved down.
DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped()); DCHECK(incremental_marking()->IsStopped());
// We finished a marking cycle. We can uncommit the marking deque until
// we start marking again.
mark_compact_collector_.marking_deque()->Uninitialize();
mark_compact_collector_.EnsureMarkingDequeIsCommitted(
MarkCompactCollector::kMinMarkingDequeSize);
}
gc_post_processing_depth_++; gc_post_processing_depth_++;
{ {
...@@ -1258,9 +1266,6 @@ bool Heap::PerformGarbageCollection( ...@@ -1258,9 +1266,6 @@ bool Heap::PerformGarbageCollection(
SetOldGenerationAllocationLimit( SetOldGenerationAllocationLimit(
PromotedSpaceSizeOfObjects(), PromotedSpaceSizeOfObjects(),
tracer()->CurrentAllocationThroughputInBytesPerMillisecond()); tracer()->CurrentAllocationThroughputInBytesPerMillisecond());
// We finished a marking cycle. We can uncommit the marking deque until
// we start marking again.
mark_compact_collector_.UncommitMarkingDeque();
} }
{ {
......
...@@ -518,7 +518,8 @@ void IncrementalMarking::StartMarking() { ...@@ -518,7 +518,8 @@ void IncrementalMarking::StartMarking() {
PatchIncrementalMarkingRecordWriteStubs(heap_, mode); PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize(); heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize(
MarkCompactCollector::kMaxMarkingDequeSize);
ActivateIncrementalWriteBarrier(); ActivateIncrementalWriteBarrier();
......
...@@ -52,7 +52,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) ...@@ -52,7 +52,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
migration_slots_buffer_(NULL), migration_slots_buffer_(NULL),
heap_(heap), heap_(heap),
marking_deque_memory_(NULL), marking_deque_memory_(NULL),
marking_deque_memory_committed_(false), marking_deque_memory_committed_(0),
code_flusher_(NULL), code_flusher_(NULL),
have_code_to_deoptimize_(false) { have_code_to_deoptimize_(false) {
} }
...@@ -226,7 +226,8 @@ static void VerifyEvacuation(Heap* heap) { ...@@ -226,7 +226,8 @@ static void VerifyEvacuation(Heap* heap) {
void MarkCompactCollector::SetUp() { void MarkCompactCollector::SetUp() {
free_list_old_space_.Reset(new FreeList(heap_->old_space())); free_list_old_space_.Reset(new FreeList(heap_->old_space()));
EnsureMarkingDequeIsCommittedAndInitialize(256 * KB); EnsureMarkingDequeIsReserved();
EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
} }
...@@ -336,6 +337,7 @@ void MarkCompactCollector::CollectGarbage() { ...@@ -336,6 +337,7 @@ void MarkCompactCollector::CollectGarbage() {
DCHECK(state_ == PREPARE_GC); DCHECK(state_ == PREPARE_GC);
MarkLiveObjects(); MarkLiveObjects();
DCHECK(heap_->incremental_marking()->IsStopped()); DCHECK(heap_->incremental_marking()->IsStopped());
// ClearNonLiveReferences can deoptimize code in dependent code arrays. // ClearNonLiveReferences can deoptimize code in dependent code arrays.
...@@ -2149,41 +2151,46 @@ void MarkCompactCollector::RetainMaps() { ...@@ -2149,41 +2151,46 @@ void MarkCompactCollector::RetainMaps() {
} }
void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize( void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
size_t max_size) { DCHECK(!marking_deque_.in_use());
if (marking_deque_memory_ == NULL) {
marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
marking_deque_memory_committed_ = 0;
}
if (marking_deque_memory_ == NULL) {
V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
}
}
void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
// If the marking deque is too small, we try to allocate a bigger one. // If the marking deque is too small, we try to allocate a bigger one.
// If that fails, make do with a smaller one. // If that fails, make do with a smaller one.
for (size_t size = max_size; size >= 256 * KB; size >>= 1) { CHECK(!marking_deque_.in_use());
for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
base::VirtualMemory* memory = marking_deque_memory_; base::VirtualMemory* memory = marking_deque_memory_;
bool is_committed = marking_deque_memory_committed_; size_t currently_committed = marking_deque_memory_committed_;
if (memory == NULL || memory->size() < size) { if (currently_committed == size) return;
// If we don't have memory or we only have small memory, then
// try to reserve a new one. if (currently_committed > size) {
memory = new base::VirtualMemory(size); bool success = marking_deque_memory_->Uncommit(
is_committed = false; reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
} currently_committed - size);
if (is_committed) return; if (success) {
if (memory->IsReserved() && marking_deque_memory_committed_ = size;
memory->Commit(reinterpret_cast<Address>(memory->address()),
memory->size(),
false)) { // Not executable.
if (marking_deque_memory_ != NULL && marking_deque_memory_ != memory) {
delete marking_deque_memory_;
}
marking_deque_memory_ = memory;
marking_deque_memory_committed_ = true;
InitializeMarkingDeque();
return; return;
} else {
// Commit failed, so we are under memory pressure. If this was the
// previously reserved area we tried to commit, then remove references
// to it before deleting it and unreserving it.
if (marking_deque_memory_ == memory) {
marking_deque_memory_ = NULL;
marking_deque_memory_committed_ = false;
} }
delete memory; // Will also unreserve the virtual allocation. UNREACHABLE();
}
bool success = memory->Commit(
reinterpret_cast<Address>(memory->address()) + currently_committed,
size - currently_committed,
false); // Not executable.
if (success) {
marking_deque_memory_committed_ = size;
return;
} }
} }
V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted"); V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
...@@ -2191,23 +2198,37 @@ void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize( ...@@ -2191,23 +2198,37 @@ void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize(
void MarkCompactCollector::InitializeMarkingDeque() { void MarkCompactCollector::InitializeMarkingDeque() {
if (marking_deque_memory_committed_) { DCHECK(!marking_deque_.in_use());
DCHECK(marking_deque_memory_committed_ > 0);
Address addr = static_cast<Address>(marking_deque_memory_->address()); Address addr = static_cast<Address>(marking_deque_memory_->address());
size_t size = marking_deque_memory_->size(); size_t size = marking_deque_memory_committed_;
if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
marking_deque_.Initialize(addr, addr + size); marking_deque_.Initialize(addr, addr + size);
}
} }
void MarkCompactCollector::UncommitMarkingDeque() { void MarkingDeque::Initialize(Address low, Address high) {
if (marking_deque_memory_committed_) { DCHECK(!in_use_);
bool success = marking_deque_memory_->Uncommit( HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
reinterpret_cast<Address>(marking_deque_memory_->address()), HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
marking_deque_memory_->size()); array_ = obj_low;
CHECK(success); mask_ = base::bits::RoundDownToPowerOfTwo32(
marking_deque_memory_committed_ = false; static_cast<uint32_t>(obj_high - obj_low)) -
1;
top_ = bottom_ = 0;
overflowed_ = false;
in_use_ = true;
}
void MarkingDeque::Uninitialize(bool aborting) {
if (!aborting) {
DCHECK(IsEmpty());
DCHECK(!overflowed_);
} }
DCHECK(in_use_);
top_ = bottom_ = 0xdecbad;
in_use_ = false;
} }
...@@ -2228,7 +2249,9 @@ void MarkCompactCollector::MarkLiveObjects() { ...@@ -2228,7 +2249,9 @@ void MarkCompactCollector::MarkLiveObjects() {
} else { } else {
// Abort any pending incremental activities e.g. incremental sweeping. // Abort any pending incremental activities e.g. incremental sweeping.
incremental_marking->Abort(); incremental_marking->Abort();
InitializeMarkingDeque(); if (marking_deque_.in_use()) {
marking_deque_.Uninitialize(true);
}
} }
#ifdef DEBUG #ifdef DEBUG
...@@ -2236,7 +2259,8 @@ void MarkCompactCollector::MarkLiveObjects() { ...@@ -2236,7 +2259,8 @@ void MarkCompactCollector::MarkLiveObjects() {
state_ = MARK_LIVE_OBJECTS; state_ = MARK_LIVE_OBJECTS;
#endif #endif
EnsureMarkingDequeIsCommittedAndInitialize(); EnsureMarkingDequeIsCommittedAndInitialize(
MarkCompactCollector::kMaxMarkingDequeSize);
PrepareForCodeFlushing(); PrepareForCodeFlushing();
......
...@@ -182,18 +182,15 @@ class Marking { ...@@ -182,18 +182,15 @@ class Marking {
class MarkingDeque { class MarkingDeque {
public: public:
MarkingDeque() MarkingDeque()
: array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {} : array_(NULL),
top_(0),
bottom_(0),
mask_(0),
overflowed_(false),
in_use_(false) {}
void Initialize(Address low, Address high) { void Initialize(Address low, Address high);
HeapObject** obj_low = reinterpret_cast<HeapObject**>(low); void Uninitialize(bool aborting = false);
HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
array_ = obj_low;
mask_ = base::bits::RoundDownToPowerOfTwo32(
static_cast<uint32_t>(obj_high - obj_low)) -
1;
top_ = bottom_ = 0;
overflowed_ = false;
}
inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; } inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
...@@ -201,6 +198,8 @@ class MarkingDeque { ...@@ -201,6 +198,8 @@ class MarkingDeque {
bool overflowed() const { return overflowed_; } bool overflowed() const { return overflowed_; }
bool in_use() const { return in_use_; }
void ClearOverflowed() { overflowed_ = false; } void ClearOverflowed() { overflowed_ = false; }
void SetOverflowed() { overflowed_ = true; } void SetOverflowed() { overflowed_ = true; }
...@@ -242,7 +241,6 @@ class MarkingDeque { ...@@ -242,7 +241,6 @@ class MarkingDeque {
INLINE(void UnshiftGrey(HeapObject* object)) { INLINE(void UnshiftGrey(HeapObject* object)) {
DCHECK(object->IsHeapObject()); DCHECK(object->IsHeapObject());
DCHECK(Marking::IsGrey(Marking::MarkBitFrom(object)));
if (IsFull()) { if (IsFull()) {
SetOverflowed(); SetOverflowed();
} else { } else {
...@@ -279,6 +277,7 @@ class MarkingDeque { ...@@ -279,6 +277,7 @@ class MarkingDeque {
int bottom_; int bottom_;
int mask_; int mask_;
bool overflowed_; bool overflowed_;
bool in_use_;
DISALLOW_COPY_AND_ASSIGN(MarkingDeque); DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
}; };
...@@ -720,11 +719,20 @@ class MarkCompactCollector { ...@@ -720,11 +719,20 @@ class MarkCompactCollector {
MarkingDeque* marking_deque() { return &marking_deque_; } MarkingDeque* marking_deque() { return &marking_deque_; }
void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size = 4 * MB); static const size_t kMaxMarkingDequeSize = 4 * MB;
static const size_t kMinMarkingDequeSize = 256 * KB;
void InitializeMarkingDeque(); void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
if (!marking_deque_.in_use()) {
EnsureMarkingDequeIsCommitted(max_size);
InitializeMarkingDeque();
}
}
void EnsureMarkingDequeIsCommitted(size_t max_size);
void EnsureMarkingDequeIsReserved();
void UncommitMarkingDeque(); void InitializeMarkingDeque();
// The following four methods can just be called after marking, when the // The following four methods can just be called after marking, when the
// whole transitive closure is known. They must be called before sweeping // whole transitive closure is known. They must be called before sweeping
...@@ -954,7 +962,7 @@ class MarkCompactCollector { ...@@ -954,7 +962,7 @@ class MarkCompactCollector {
Heap* heap_; Heap* heap_;
base::VirtualMemory* marking_deque_memory_; base::VirtualMemory* marking_deque_memory_;
bool marking_deque_memory_committed_; size_t marking_deque_memory_committed_;
MarkingDeque marking_deque_; MarkingDeque marking_deque_;
CodeFlusher* code_flusher_; CodeFlusher* code_flusher_;
bool have_code_to_deoptimize_; bool have_code_to_deoptimize_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment