Commit 98f45a40 authored by erikcorry's avatar erikcorry Committed by Commit bot

Never uncommit the whole marking deque in case we can't get it back

R=hpayer@chromium.org
BUG=

Review URL: https://codereview.chromium.org/1133153003

Cr-Commit-Position: refs/heads/master@{#28822}
parent 03f4ddb1
......@@ -1233,9 +1233,17 @@ bool Heap::PerformGarbageCollection(
isolate_->counters()->objs_since_last_young()->Set(0);
// Callbacks that fire after this point might trigger nested GCs and
// restart incremental marking, the assertion can't be moved down.
DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
if (collector != SCAVENGER) {
// Callbacks that fire after this point might trigger nested GCs and
// restart incremental marking, the assertion can't be moved down.
DCHECK(incremental_marking()->IsStopped());
// We finished a marking cycle. We can uncommit the marking deque until
// we start marking again.
mark_compact_collector_.marking_deque()->Uninitialize();
mark_compact_collector_.EnsureMarkingDequeIsCommitted(
MarkCompactCollector::kMinMarkingDequeSize);
}
gc_post_processing_depth_++;
{
......@@ -1258,9 +1266,6 @@ bool Heap::PerformGarbageCollection(
SetOldGenerationAllocationLimit(
PromotedSpaceSizeOfObjects(),
tracer()->CurrentAllocationThroughputInBytesPerMillisecond());
// We finished a marking cycle. We can uncommit the marking deque until
// we start marking again.
mark_compact_collector_.UncommitMarkingDeque();
}
{
......
......@@ -518,7 +518,8 @@ void IncrementalMarking::StartMarking() {
PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize();
heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize(
MarkCompactCollector::kMaxMarkingDequeSize);
ActivateIncrementalWriteBarrier();
......
......@@ -52,7 +52,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
migration_slots_buffer_(NULL),
heap_(heap),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(false),
marking_deque_memory_committed_(0),
code_flusher_(NULL),
have_code_to_deoptimize_(false) {
}
......@@ -226,7 +226,8 @@ static void VerifyEvacuation(Heap* heap) {
void MarkCompactCollector::SetUp() {
free_list_old_space_.Reset(new FreeList(heap_->old_space()));
EnsureMarkingDequeIsCommittedAndInitialize(256 * KB);
EnsureMarkingDequeIsReserved();
EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
}
......@@ -336,6 +337,7 @@ void MarkCompactCollector::CollectGarbage() {
DCHECK(state_ == PREPARE_GC);
MarkLiveObjects();
DCHECK(heap_->incremental_marking()->IsStopped());
// ClearNonLiveReferences can deoptimize code in dependent code arrays.
......@@ -2149,41 +2151,46 @@ void MarkCompactCollector::RetainMaps() {
}
void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize(
size_t max_size) {
void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
DCHECK(!marking_deque_.in_use());
if (marking_deque_memory_ == NULL) {
marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
marking_deque_memory_committed_ = 0;
}
if (marking_deque_memory_ == NULL) {
V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
}
}
void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
// If the marking deque is too small, we try to allocate a bigger one.
// If that fails, make do with a smaller one.
for (size_t size = max_size; size >= 256 * KB; size >>= 1) {
CHECK(!marking_deque_.in_use());
for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
base::VirtualMemory* memory = marking_deque_memory_;
bool is_committed = marking_deque_memory_committed_;
size_t currently_committed = marking_deque_memory_committed_;
if (memory == NULL || memory->size() < size) {
// If we don't have memory or we only have small memory, then
// try to reserve a new one.
memory = new base::VirtualMemory(size);
is_committed = false;
}
if (is_committed) return;
if (memory->IsReserved() &&
memory->Commit(reinterpret_cast<Address>(memory->address()),
memory->size(),
false)) { // Not executable.
if (marking_deque_memory_ != NULL && marking_deque_memory_ != memory) {
delete marking_deque_memory_;
if (currently_committed == size) return;
if (currently_committed > size) {
bool success = marking_deque_memory_->Uncommit(
reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
currently_committed - size);
if (success) {
marking_deque_memory_committed_ = size;
return;
}
marking_deque_memory_ = memory;
marking_deque_memory_committed_ = true;
InitializeMarkingDeque();
UNREACHABLE();
}
bool success = memory->Commit(
reinterpret_cast<Address>(memory->address()) + currently_committed,
size - currently_committed,
false); // Not executable.
if (success) {
marking_deque_memory_committed_ = size;
return;
} else {
// Commit failed, so we are under memory pressure. If this was the
// previously reserved area we tried to commit, then remove references
// to it before deleting it and unreserving it.
if (marking_deque_memory_ == memory) {
marking_deque_memory_ = NULL;
marking_deque_memory_committed_ = false;
}
delete memory; // Will also unreserve the virtual allocation.
}
}
V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
......@@ -2191,23 +2198,37 @@ void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize(
void MarkCompactCollector::InitializeMarkingDeque() {
if (marking_deque_memory_committed_) {
Address addr = static_cast<Address>(marking_deque_memory_->address());
size_t size = marking_deque_memory_->size();
if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
marking_deque_.Initialize(addr, addr + size);
}
DCHECK(!marking_deque_.in_use());
DCHECK(marking_deque_memory_committed_ > 0);
Address addr = static_cast<Address>(marking_deque_memory_->address());
size_t size = marking_deque_memory_committed_;
if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
marking_deque_.Initialize(addr, addr + size);
}
void MarkCompactCollector::UncommitMarkingDeque() {
if (marking_deque_memory_committed_) {
bool success = marking_deque_memory_->Uncommit(
reinterpret_cast<Address>(marking_deque_memory_->address()),
marking_deque_memory_->size());
CHECK(success);
marking_deque_memory_committed_ = false;
void MarkingDeque::Initialize(Address low, Address high) {
DCHECK(!in_use_);
HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
array_ = obj_low;
mask_ = base::bits::RoundDownToPowerOfTwo32(
static_cast<uint32_t>(obj_high - obj_low)) -
1;
top_ = bottom_ = 0;
overflowed_ = false;
in_use_ = true;
}
void MarkingDeque::Uninitialize(bool aborting) {
if (!aborting) {
DCHECK(IsEmpty());
DCHECK(!overflowed_);
}
DCHECK(in_use_);
top_ = bottom_ = 0xdecbad;
in_use_ = false;
}
......@@ -2228,7 +2249,9 @@ void MarkCompactCollector::MarkLiveObjects() {
} else {
// Abort any pending incremental activities e.g. incremental sweeping.
incremental_marking->Abort();
InitializeMarkingDeque();
if (marking_deque_.in_use()) {
marking_deque_.Uninitialize(true);
}
}
#ifdef DEBUG
......@@ -2236,7 +2259,8 @@ void MarkCompactCollector::MarkLiveObjects() {
state_ = MARK_LIVE_OBJECTS;
#endif
EnsureMarkingDequeIsCommittedAndInitialize();
EnsureMarkingDequeIsCommittedAndInitialize(
MarkCompactCollector::kMaxMarkingDequeSize);
PrepareForCodeFlushing();
......
......@@ -182,18 +182,15 @@ class Marking {
class MarkingDeque {
public:
MarkingDeque()
: array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {}
void Initialize(Address low, Address high) {
HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
array_ = obj_low;
mask_ = base::bits::RoundDownToPowerOfTwo32(
static_cast<uint32_t>(obj_high - obj_low)) -
1;
top_ = bottom_ = 0;
overflowed_ = false;
}
: array_(NULL),
top_(0),
bottom_(0),
mask_(0),
overflowed_(false),
in_use_(false) {}
void Initialize(Address low, Address high);
void Uninitialize(bool aborting = false);
inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
......@@ -201,6 +198,8 @@ class MarkingDeque {
bool overflowed() const { return overflowed_; }
bool in_use() const { return in_use_; }
void ClearOverflowed() { overflowed_ = false; }
void SetOverflowed() { overflowed_ = true; }
......@@ -242,7 +241,6 @@ class MarkingDeque {
INLINE(void UnshiftGrey(HeapObject* object)) {
DCHECK(object->IsHeapObject());
DCHECK(Marking::IsGrey(Marking::MarkBitFrom(object)));
if (IsFull()) {
SetOverflowed();
} else {
......@@ -279,6 +277,7 @@ class MarkingDeque {
int bottom_;
int mask_;
bool overflowed_;
bool in_use_;
DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
};
......@@ -720,11 +719,20 @@ class MarkCompactCollector {
MarkingDeque* marking_deque() { return &marking_deque_; }
void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size = 4 * MB);
static const size_t kMaxMarkingDequeSize = 4 * MB;
static const size_t kMinMarkingDequeSize = 256 * KB;
void InitializeMarkingDeque();
void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
if (!marking_deque_.in_use()) {
EnsureMarkingDequeIsCommitted(max_size);
InitializeMarkingDeque();
}
}
void UncommitMarkingDeque();
void EnsureMarkingDequeIsCommitted(size_t max_size);
void EnsureMarkingDequeIsReserved();
void InitializeMarkingDeque();
// The following four methods can just be called after marking, when the
// whole transitive closure is known. They must be called before sweeping
......@@ -954,7 +962,7 @@ class MarkCompactCollector {
Heap* heap_;
base::VirtualMemory* marking_deque_memory_;
bool marking_deque_memory_committed_;
size_t marking_deque_memory_committed_;
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
bool have_code_to_deoptimize_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment