Commit cd8d915c authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Clean up the marking speed heuristics. This reduces the

max heap size on 64 bit from ca. 300Mbytes to ca. 200Mbytes
on Ulan's splay variant.  On 32 bit not much change.
Review URL: http://codereview.chromium.org/8494012

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9906 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 2a1f08a1
......@@ -96,6 +96,7 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
Marking::BlackToGrey(mark_bit);
int obj_size = obj->Size();
MemoryChunk::IncrementLiveBytes(obj->address(), -obj_size);
bytes_scanned_ -= obj_size;
int64_t old_bytes_rescanned = bytes_rescanned_;
bytes_rescanned_ = old_bytes_rescanned + obj_size;
if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
......
......@@ -747,6 +747,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes) {
if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
bytes_scanned_ += bytes_to_process;
double start = 0;
......@@ -757,6 +758,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes) {
if (state_ == SWEEPING) {
if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) &&
heap_->old_data_space()->AdvanceSweeper(bytes_to_process)) {
bytes_scanned_ = 0;
StartMarking(PREVENT_COMPACTION);
}
} else if (state_ == MARKING) {
......@@ -808,37 +810,66 @@ void IncrementalMarking::Step(intptr_t allocated_bytes) {
bool speed_up = false;
if (old_generation_space_available_at_start_of_incremental_ < 10 * MB ||
SpaceLeftInOldSpace() <
old_generation_space_available_at_start_of_incremental_ >> 1) {
// Half of the space that was available is gone while we were
// incrementally marking.
if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
if (FLAG_trace_gc) {
PrintF("Speed up marking after %d steps\n",
static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
}
speed_up = true;
old_generation_space_available_at_start_of_incremental_ =
SpaceLeftInOldSpace();
}
if (heap_->PromotedTotalSize() >
old_generation_space_used_at_start_of_incremental_ << 1) {
// Size of old space doubled while we were incrementally marking.
bool space_left_is_very_small =
(old_generation_space_available_at_start_of_incremental_ < 10 * MB);
bool only_1_nth_of_space_that_was_available_still_left =
(SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
old_generation_space_available_at_start_of_incremental_);
if (space_left_is_very_small ||
only_1_nth_of_space_that_was_available_still_left) {
if (FLAG_trace_gc) PrintF("Speed up marking because of low space left\n");
speed_up = true;
old_generation_space_used_at_start_of_incremental_ =
heap_->PromotedTotalSize();
}
if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0 &&
allocation_marking_factor_ < kMaxAllocationMarkingFactor) {
bool size_of_old_space_multiplied_by_n_during_marking =
(heap_->PromotedTotalSize() >
(allocation_marking_factor_ + 1) *
old_generation_space_used_at_start_of_incremental_);
if (size_of_old_space_multiplied_by_n_during_marking) {
speed_up = true;
if (FLAG_trace_gc) {
PrintF("Speed up marking because of heap size increase\n");
}
}
intptr_t promoted_during_marking = heap_->PromotedTotalSize()
- old_generation_space_used_at_start_of_incremental_;
intptr_t delay = allocation_marking_factor_ * MB;
intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
// We try to scan at at least twice the speed that we are allocating.
if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
if (FLAG_trace_gc) {
PrintF("Speed up marking because marker was not keeping up\n");
}
speed_up = true;
}
if (speed_up) {
if (state_ != MARKING) {
if (FLAG_trace_gc) {
PrintF("Postponing speeding up marking until marking starts\n");
}
} else {
allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
allocation_marking_factor_ =
static_cast<int>(allocation_marking_factor_ * 1.3);
Min(kMaxAllocationMarkingFactor,
static_cast<intptr_t>(allocation_marking_factor_ * 1.3));
if (FLAG_trace_gc) {
PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
}
}
}
if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
double end = OS::TimeCurrentMillis();
......@@ -862,6 +893,7 @@ void IncrementalMarking::ResetStepCounters() {
steps_took_since_last_gc_ = 0;
bytes_rescanned_ = 0;
allocation_marking_factor_ = kInitialAllocationMarkingFactor;
bytes_scanned_ = 0;
}
......
......@@ -96,7 +96,7 @@ class IncrementalMarking {
static const intptr_t kAllocationMarkingFactorSpeedupInterval = 1024;
// This is how much we increase the marking/allocating factor by.
static const intptr_t kAllocationMarkingFactorSpeedup = 2;
static const intptr_t kMaxAllocationMarkingFactor = 1000000000;
static const intptr_t kMaxAllocationMarkingFactor = 1000;
void OldSpaceStep(intptr_t allocated) {
Step(allocated * kFastMarking / kInitialAllocationMarkingFactor);
......@@ -262,6 +262,7 @@ class IncrementalMarking {
int64_t bytes_rescanned_;
bool should_hurry_;
int allocation_marking_factor_;
intptr_t bytes_scanned_;
intptr_t allocated_;
int no_marking_scope_depth_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment