Commit 169283de authored by hpayer@chromium.org's avatar hpayer@chromium.org

Perform incremental marking step after free-list allocation and clean-up...

Perform incremental marking step after free-list allocation and clean-up incremental marking start condition.

BUG=

Review URL: https://codereview.chromium.org/14634007

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@14564 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 3cd73ebc
......@@ -490,10 +490,16 @@ bool IncrementalMarking::WorthActivating() {
// debug tests run with incremental marking and some without.
static const intptr_t kActivationThreshold = 0;
#endif
// Only start incremental marking in a safe state: 1) when expose GC is
// deactivated, 2) when incremental marking is turned on, 3) when we are
// currently not in a GC, and 4) when we are currently not serializing
// or deserializing the heap.
return !FLAG_expose_gc &&
FLAG_incremental_marking &&
FLAG_incremental_marking_steps &&
heap_->gc_state() == Heap::NOT_IN_GC &&
!Serializer::enabled() &&
heap_->isolate()->IsInitialized() &&
heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
}
......@@ -561,7 +567,7 @@ void IncrementalMarking::UncommitMarkingDeque() {
}
void IncrementalMarking::Start() {
void IncrementalMarking::Start(CompactionFlag flag) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start\n");
}
......@@ -575,7 +581,7 @@ void IncrementalMarking::Start() {
ResetStepCounters();
if (heap_->IsSweepingComplete()) {
StartMarking(ALLOW_COMPACTION);
StartMarking(flag);
} else {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start sweeping.\n");
......@@ -866,17 +872,9 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
if (IsStopped() && WorthActivating() && heap_->NextGCIsLikelyToBeFull()) {
// Only start incremental marking in a save state: 1) when we are not in
// a GC, 2) when we turned-on incremental marking, 3) when we are
// currently not serializing or deserializing the heap.
if (heap_->gc_state() != Heap::NOT_IN_GC ||
!FLAG_incremental_marking ||
!FLAG_incremental_marking_steps ||
Serializer::enabled() ||
!heap_->isolate()->IsInitialized()) {
return;
}
Start();
// TODO(hpayer): Let's play safe for now, but compaction should be
// in principle possible.
Start(PREVENT_COMPACTION);
} else {
Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
}
......
......@@ -75,7 +75,9 @@ class IncrementalMarking {
bool WorthActivating();
void Start();
enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
void Start(CompactionFlag flag = ALLOW_COMPACTION);
void Stop();
......@@ -223,8 +225,6 @@ class IncrementalMarking {
void ResetStepCounters();
enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
void StartMarking(CompactionFlag flag);
void ActivateIncrementalWriteBarrier(PagedSpace* space);
......
......@@ -2304,14 +2304,6 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// Don't free list allocate if there is linear space available.
ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
int new_node_size = 0;
FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == NULL) return NULL;
int bytes_left = new_node_size - size_in_bytes;
ASSERT(bytes_left >= 0);
int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
......@@ -2321,6 +2313,16 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
owner_->heap()->incremental_marking()->OldSpaceStep(
size_in_bytes - old_linear_size);
int new_node_size = 0;
FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == NULL) {
owner_->SetTop(NULL, NULL);
return NULL;
}
int bytes_left = new_node_size - size_in_bytes;
ASSERT(bytes_left >= 0);
#ifdef DEBUG
for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
reinterpret_cast<Object**>(new_node->address())[i] =
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment