Commit 7a21a70c authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Thread through GC flags in memory reducer and incremental marking.

BUG=chromium:520607
LOG=N

Review URL: https://codereview.chromium.org/1302273002

Cr-Commit-Position: refs/heads/master@{#30287}
parent bb43d6c0
...@@ -740,7 +740,7 @@ void Heap::PreprocessStackTraces() { ...@@ -740,7 +740,7 @@ void Heap::PreprocessStackTraces() {
void Heap::HandleGCRequest() { void Heap::HandleGCRequest() {
if (incremental_marking()->request_type() == if (incremental_marking()->request_type() ==
IncrementalMarking::COMPLETE_MARKING) { IncrementalMarking::COMPLETE_MARKING) {
CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt", CollectAllGarbage(current_gc_flags(), "GC interrupt",
incremental_marking()->CallbackFlags()); incremental_marking()->CallbackFlags());
return; return;
} }
...@@ -4748,10 +4748,14 @@ void Heap::ReduceNewSpaceSize() { ...@@ -4748,10 +4748,14 @@ void Heap::ReduceNewSpaceSize() {
// TODO(ulan): Unify this constant with the similar constant in // TODO(ulan): Unify this constant with the similar constant in
// GCIdleTimeHandler once the change is merged to 4.5. // GCIdleTimeHandler once the change is merged to 4.5.
static const size_t kLowAllocationThroughput = 1000; static const size_t kLowAllocationThroughput = 1000;
size_t allocation_throughput = const size_t allocation_throughput =
tracer()->CurrentAllocationThroughputInBytesPerMillisecond(); tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
if (FLAG_predictable || allocation_throughput == 0) return;
if (allocation_throughput < kLowAllocationThroughput) { if (FLAG_predictable) return;
if (ShouldReduceMemory() ||
((allocation_throughput != 0) &&
(allocation_throughput < kLowAllocationThroughput))) {
new_space_.Shrink(); new_space_.Shrink();
UncommitFromSpace(); UncommitFromSpace();
} }
...@@ -4766,7 +4770,7 @@ void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) { ...@@ -4766,7 +4770,7 @@ void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) {
OverApproximateWeakClosure(comment); OverApproximateWeakClosure(comment);
} else if (incremental_marking()->IsComplete() || } else if (incremental_marking()->IsComplete() ||
(mark_compact_collector_.marking_deque()->IsEmpty())) { (mark_compact_collector_.marking_deque()->IsEmpty())) {
CollectAllGarbage(kNoGCFlags, comment); CollectAllGarbage(current_gc_flags(), comment);
} }
} }
...@@ -4788,7 +4792,8 @@ bool Heap::TryFinalizeIdleIncrementalMarking( ...@@ -4788,7 +4792,8 @@ bool Heap::TryFinalizeIdleIncrementalMarking(
gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact( gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
static_cast<size_t>(idle_time_in_ms), size_of_objects, static_cast<size_t>(idle_time_in_ms), size_of_objects,
final_incremental_mark_compact_speed_in_bytes_per_ms))) { final_incremental_mark_compact_speed_in_bytes_per_ms))) {
CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental"); CollectAllGarbage(current_gc_flags(),
"idle notification: finalize incremental");
return true; return true;
} }
return false; return false;
......
...@@ -2812,7 +2812,8 @@ HEAP_TEST(GCFlags) { ...@@ -2812,7 +2812,8 @@ HEAP_TEST(GCFlags) {
marking->Start(Heap::kReduceMemoryFootprintMask); marking->Start(Heap::kReduceMemoryFootprintMask);
CHECK_NE(0, heap->current_gc_flags() & Heap::kReduceMemoryFootprintMask); CHECK_NE(0, heap->current_gc_flags() & Heap::kReduceMemoryFootprintMask);
heap->Scavenge(); heap->CollectGarbage(NEW_SPACE);
// NewSpace scavenges should not overwrite the flags.
CHECK_NE(0, heap->current_gc_flags() & Heap::kReduceMemoryFootprintMask); CHECK_NE(0, heap->current_gc_flags() & Heap::kReduceMemoryFootprintMask);
heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask); heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment