Fix free list node ending up on evacuation candidate.

This is a temporary fix which avoids compaction when incremental marking
is restarted during an old-space-step. That could turn the page that
holds the chosen free list node into an evacuation candidate. It could
also cause several other inconsistencies if it happens during scavenge.

R=vegorov@chromium.org

Review URL: http://codereview.chromium.org/8228010

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9585 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 312c534a
...@@ -411,7 +411,7 @@ void IncrementalMarking::Start() { ...@@ -411,7 +411,7 @@ void IncrementalMarking::Start() {
if (heap_->old_pointer_space()->IsSweepingComplete() && if (heap_->old_pointer_space()->IsSweepingComplete() &&
heap_->old_data_space()->IsSweepingComplete()) { heap_->old_data_space()->IsSweepingComplete()) {
StartMarking(); StartMarking(ALLOW_COMPACTION);
} else { } else {
if (FLAG_trace_incremental_marking) { if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start sweeping.\n"); PrintF("[IncrementalMarking] Start sweeping.\n");
...@@ -436,12 +436,12 @@ static void MarkObjectGreyDoNotEnqueue(Object* obj) { ...@@ -436,12 +436,12 @@ static void MarkObjectGreyDoNotEnqueue(Object* obj) {
} }
void IncrementalMarking::StartMarking() { void IncrementalMarking::StartMarking(CompactionFlag flag) {
if (FLAG_trace_incremental_marking) { if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start marking\n"); PrintF("[IncrementalMarking] Start marking\n");
} }
is_compacting_ = !FLAG_never_compact && is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
heap_->mark_compact_collector()->StartCompaction(); heap_->mark_compact_collector()->StartCompaction();
state_ = MARKING; state_ = MARKING;
...@@ -705,7 +705,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes) { ...@@ -705,7 +705,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes) {
if (state_ == SWEEPING) { if (state_ == SWEEPING) {
if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) && if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) &&
heap_->old_data_space()->AdvanceSweeper(bytes_to_process)) { heap_->old_data_space()->AdvanceSweeper(bytes_to_process)) {
StartMarking(); StartMarking(PREVENT_COMPACTION);
} }
} else if (state_ == MARKING) { } else if (state_ == MARKING) {
Map* filler_map = heap_->one_pointer_filler_map(); Map* filler_map = heap_->one_pointer_filler_map();
......
...@@ -206,7 +206,9 @@ class IncrementalMarking { ...@@ -206,7 +206,9 @@ class IncrementalMarking {
void ResetStepCounters(); void ResetStepCounters();
void StartMarking(); enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
void StartMarking(CompactionFlag flag);
void ActivateIncrementalWriteBarrier(PagedSpace* space); void ActivateIncrementalWriteBarrier(PagedSpace* space);
static void ActivateIncrementalWriteBarrier(NewSpace* space); static void ActivateIncrementalWriteBarrier(NewSpace* space);
......
...@@ -1798,6 +1798,11 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { ...@@ -1798,6 +1798,11 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
owner_->heap()->incremental_marking()->OldSpaceStep( owner_->heap()->incremental_marking()->OldSpaceStep(
size_in_bytes - old_linear_size); size_in_bytes - old_linear_size);
// The old-space-step might have finished sweeping and restarted marking.
// Verify that it did not turn the page of the new node into an evacuation
// candidate.
ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
const int kThreshold = IncrementalMarking::kAllocatedThreshold; const int kThreshold = IncrementalMarking::kAllocatedThreshold;
// Memory in the linear allocation area is counted as allocated. We may free // Memory in the linear allocation area is counted as allocated. We may free
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment