Commit 26991892 authored by machenbach's avatar machenbach Committed by Commit bot

Revert of Replace reduce-memory mode in idle notification with delayed...

Revert of Replace reduce-memory mode in idle notification with delayed clean-up GC. (patchset #17 id:320001 of https://codereview.chromium.org/1218863002/)

Reason for revert:
[Sheriff] Looks like it blocks the roll (bisected). Speculative revert.
https://codereview.chromium.org/1210293003/

Original issue's description:
> Replace reduce-memory mode in idle notification with delayed clean-up GC.
>
> BUG=490559
> LOG=NO
>
> Committed: https://crrev.com/0ecd9e1bd2c6b519d4e7285f46cb7e844bc2235c
> Cr-Commit-Position: refs/heads/master@{#29451}

TBR=hpayer@chromium.org,ulan@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=490559

Review URL: https://codereview.chromium.org/1226703002

Cr-Commit-Position: refs/heads/master@{#29470}
parent 435b3c87
......@@ -855,8 +855,6 @@ source_set("v8_base") {
"src/heap/mark-compact-inl.h",
"src/heap/mark-compact.cc",
"src/heap/mark-compact.h",
"src/heap/memory-reducer.cc",
"src/heap/memory-reducer.h",
"src/heap/objects-visiting-inl.h",
"src/heap/objects-visiting.cc",
"src/heap/objects-visiting.h",
......
......@@ -50,6 +50,7 @@ void GCIdleTimeHandler::HeapState::Print() {
PrintF("contexts_disposal_rate=%f ", contexts_disposal_rate);
PrintF("size_of_objects=%" V8_PTR_PREFIX "d ", size_of_objects);
PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped);
PrintF("can_start_incremental_marking=%d ", can_start_incremental_marking);
PrintF("sweeping_in_progress=%d ", sweeping_in_progress);
PrintF("has_low_allocation_rate=%d", has_low_allocation_rate);
PrintF("mark_compact_speed=%" V8_PTR_PREFIX "d ",
......@@ -194,15 +195,70 @@ bool GCIdleTimeHandler::ShouldDoOverApproximateWeakClosure(
GCIdleTimeAction GCIdleTimeHandler::NothingOrDone() {
if (idle_times_which_made_no_progress_ >= kMaxNoProgressIdleTimes) {
if (idle_times_which_made_no_progress_per_mode_ >=
kMaxNoProgressIdleTimesPerMode) {
return GCIdleTimeAction::Done();
} else {
idle_times_which_made_no_progress_++;
idle_times_which_made_no_progress_per_mode_++;
return GCIdleTimeAction::Nothing();
}
}
// The idle time handler has three modes and transitions between them
// as shown in the diagram:
//
// kReduceLatency -----> kReduceMemory -----> kDone
// ^ ^ | |
// | | | |
// | +------------------+ |
// | |
// +----------------------------------------+
//
// In kReduceLatency mode the handler only starts incremental marking
// if can_start_incremental_marking is false.
// In kReduceMemory mode the handler can force a new GC cycle by starting
// incremental marking even if can_start_incremental_marking is false. It can
// cause at most X idle GCs.
// In kDone mode the idle time handler does nothing.
//
// The initial mode is kReduceLatency.
//
// kReduceLatency => kReduceMemory transition happens if there were Y
// consecutive long idle notifications without any mutator GC. This is our
// notion of "mutator is idle".
//
// kReduceMemory => kDone transition happens after X idle GCs.
//
// kReduceMemory => kReduceLatency transition happens if N mutator GCs
// were performed meaning that the mutator is active.
//
// kDone => kReduceLatency transition happens if there were M mutator GCs or
// context was disposed.
//
// X = kMaxIdleMarkCompacts
// Y = kLongIdleNotificationsBeforeMutatorIsIdle
// N = #(idle GCs)
// M = kGCsBeforeMutatorIsActive
GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
HeapState heap_state) {
Mode next_mode = NextMode(heap_state);
if (next_mode != mode_) {
mode_ = next_mode;
ResetCounters();
}
UpdateCounters(idle_time_in_ms);
if (mode_ == kDone) {
return GCIdleTimeAction::Done();
} else {
return Action(idle_time_in_ms, heap_state, mode_ == kReduceMemory);
}
}
// The following logic is implemented by the controller:
// (1) If we don't have any idle time, do nothing, unless a context was
// disposed, incremental marking is stopped, and the heap is small. Then do
......@@ -211,18 +267,26 @@ GCIdleTimeAction GCIdleTimeHandler::NothingOrDone() {
// we do nothing until the context disposal rate becomes lower.
// (3) If the new space is almost full and we can affort a scavenge or if the
// next scavenge will very likely take long, then a scavenge is performed.
// (4) If sweeping is in progress and we received a large enough idle time
// (4) If there is currently no MarkCompact idle round going on, we start a
// new idle round if enough garbage was created. Otherwise we do not perform
// garbage collection to keep system utilization low.
// (5) If incremental marking is done, we perform a full garbage collection
// if we are allowed to still do full garbage collections during this idle
// round or if we are not allowed to start incremental marking. Otherwise we
// do not perform garbage collection to keep system utilization low.
// (6) If sweeping is in progress and we received a large enough idle time
// request, we finalize sweeping here.
// (5) If incremental marking is in progress, we perform a marking step. Note,
// (7) If incremental marking is in progress, we perform a marking step. Note,
// that this currently may trigger a full garbage collection.
GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
HeapState heap_state) {
GCIdleTimeAction GCIdleTimeHandler::Action(double idle_time_in_ms,
const HeapState& heap_state,
bool reduce_memory) {
if (static_cast<int>(idle_time_in_ms) <= 0) {
if (heap_state.incremental_marking_stopped) {
if (ShouldDoContextDisposalMarkCompact(
heap_state.contexts_disposed,
heap_state.contexts_disposal_rate)) {
return GCIdleTimeAction::FullGC();
return GCIdleTimeAction::FullGC(false);
}
}
return GCIdleTimeAction::Nothing();
......@@ -243,6 +307,14 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
return GCIdleTimeAction::Scavenge();
}
if (heap_state.incremental_marking_stopped && reduce_memory) {
if (ShouldDoMarkCompact(static_cast<size_t>(idle_time_in_ms),
heap_state.size_of_objects,
heap_state.mark_compact_speed_in_bytes_per_ms)) {
return GCIdleTimeAction::FullGC(reduce_memory);
}
}
if (heap_state.sweeping_in_progress) {
if (heap_state.sweeping_completed) {
return GCIdleTimeAction::FinalizeSweeping();
......@@ -251,16 +323,95 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
}
}
if (!FLAG_incremental_marking || heap_state.incremental_marking_stopped) {
return GCIdleTimeAction::Done();
if (!FLAG_incremental_marking ||
(heap_state.incremental_marking_stopped &&
!heap_state.can_start_incremental_marking && !reduce_memory)) {
return NothingOrDone();
}
size_t step_size = EstimateMarkingStepSize(
static_cast<size_t>(kIncrementalMarkingStepTimeInMs),
heap_state.incremental_marking_speed_in_bytes_per_ms);
return GCIdleTimeAction::IncrementalMarking(step_size);
return GCIdleTimeAction::IncrementalMarking(step_size, reduce_memory);
}
void GCIdleTimeHandler::UpdateCounters(double idle_time_in_ms) {
if (mode_ == kReduceLatency) {
int gcs = scavenges_ + mark_compacts_;
if (gcs > 0) {
// There was a GC since the last notification.
long_idle_notifications_ = 0;
background_idle_notifications_ = 0;
}
idle_mark_compacts_ = 0;
mark_compacts_ = 0;
scavenges_ = 0;
if (idle_time_in_ms >= kMinBackgroundIdleTime) {
background_idle_notifications_++;
} else if (idle_time_in_ms >= kMinLongIdleTime) {
long_idle_notifications_++;
}
}
}
void GCIdleTimeHandler::ResetCounters() {
long_idle_notifications_ = 0;
background_idle_notifications_ = 0;
idle_mark_compacts_ = 0;
mark_compacts_ = 0;
scavenges_ = 0;
idle_times_which_made_no_progress_per_mode_ = 0;
}
bool GCIdleTimeHandler::IsMutatorActive(int contexts_disposed,
int mark_compacts) {
return contexts_disposed > 0 ||
mark_compacts >= kMarkCompactsBeforeMutatorIsActive;
}
bool GCIdleTimeHandler::IsMutatorIdle(int long_idle_notifications,
int background_idle_notifications,
int mutator_gcs) {
return mutator_gcs == 0 &&
(long_idle_notifications >=
kLongIdleNotificationsBeforeMutatorIsIdle ||
background_idle_notifications >=
kBackgroundIdleNotificationsBeforeMutatorIsIdle);
}
GCIdleTimeHandler::Mode GCIdleTimeHandler::NextMode(
const HeapState& heap_state) {
DCHECK(mark_compacts_ >= idle_mark_compacts_);
int mutator_gcs = scavenges_ + mark_compacts_ - idle_mark_compacts_;
switch (mode_) {
case kDone:
DCHECK(idle_mark_compacts_ == 0);
if (IsMutatorActive(heap_state.contexts_disposed, mark_compacts_)) {
return kReduceLatency;
}
break;
case kReduceLatency:
if (IsMutatorIdle(long_idle_notifications_,
background_idle_notifications_, mutator_gcs)) {
return kReduceMemory;
}
break;
case kReduceMemory:
if (idle_mark_compacts_ >= kMaxIdleMarkCompacts ||
(idle_mark_compacts_ > 0 && !next_gc_likely_to_collect_more_)) {
return kDone;
}
if (mutator_gcs > idle_mark_compacts_) {
return kReduceLatency;
}
break;
}
return mode_;
}
}
}
......@@ -27,6 +27,7 @@ class GCIdleTimeAction {
result.type = DONE;
result.parameter = 0;
result.additional_work = false;
result.reduce_memory = false;
return result;
}
......@@ -35,14 +36,17 @@ class GCIdleTimeAction {
result.type = DO_NOTHING;
result.parameter = 0;
result.additional_work = false;
result.reduce_memory = false;
return result;
}
static GCIdleTimeAction IncrementalMarking(intptr_t step_size) {
static GCIdleTimeAction IncrementalMarking(intptr_t step_size,
bool reduce_memory) {
GCIdleTimeAction result;
result.type = DO_INCREMENTAL_MARKING;
result.parameter = step_size;
result.additional_work = false;
result.reduce_memory = reduce_memory;
return result;
}
......@@ -51,14 +55,18 @@ class GCIdleTimeAction {
result.type = DO_SCAVENGE;
result.parameter = 0;
result.additional_work = false;
// TODO(ulan): add reduce_memory argument and shrink new space size if
// reduce_memory = true.
result.reduce_memory = false;
return result;
}
static GCIdleTimeAction FullGC() {
static GCIdleTimeAction FullGC(bool reduce_memory) {
GCIdleTimeAction result;
result.type = DO_FULL_GC;
result.parameter = 0;
result.additional_work = false;
result.reduce_memory = reduce_memory;
return result;
}
......@@ -67,6 +75,7 @@ class GCIdleTimeAction {
result.type = DO_FINALIZE_SWEEPING;
result.parameter = 0;
result.additional_work = false;
result.reduce_memory = false;
return result;
}
......@@ -75,6 +84,7 @@ class GCIdleTimeAction {
GCIdleTimeActionType type;
intptr_t parameter;
bool additional_work;
bool reduce_memory;
};
......@@ -118,8 +128,6 @@ class GCIdleTimeHandler {
// The maximum idle time when frames are rendered is 16.66ms.
static const size_t kMaxFrameRenderingIdleTime = 17;
static const int kMinBackgroundIdleTime = 900;
// We conservatively assume that in the next kTimeUntilNextIdleEvent ms
// no idle notification happens.
static const size_t kTimeUntilNextIdleEvent = 100;
......@@ -139,10 +147,28 @@ class GCIdleTimeHandler {
static const size_t kMinTimeForOverApproximatingWeakClosureInMs;
// The number of idle MarkCompact GCs to perform before transitioning to
// the kDone mode.
static const int kMaxIdleMarkCompacts = 3;
// The number of mutator MarkCompact GCs before transitioning to the
// kReduceLatency mode.
static const int kMarkCompactsBeforeMutatorIsActive = 1;
// Mutator is considered idle if
// 1) there are N idle notification with time >= kMinBackgroundIdleTime,
// 2) or there are M idle notifications with time >= kMinLongIdleTime
// without any mutator GC in between.
// Where N = kBackgroundIdleNotificationsBeforeMutatorIsIdle,
// M = kLongIdleNotificationsBeforeMutatorIsIdle
static const int kMinLongIdleTime = kMaxFrameRenderingIdleTime + 1;
static const int kMinBackgroundIdleTime = 900;
static const int kBackgroundIdleNotificationsBeforeMutatorIsIdle = 2;
static const int kLongIdleNotificationsBeforeMutatorIsIdle = 50;
// Number of times we will return a Nothing action in the current mode
// despite having idle time available before we returning a Done action to
// ensure we don't keep scheduling idle tasks and making no progress.
static const int kMaxNoProgressIdleTimes = 10;
static const int kMaxNoProgressIdleTimesPerMode = 10;
class HeapState {
public:
......@@ -152,6 +178,7 @@ class GCIdleTimeHandler {
double contexts_disposal_rate;
size_t size_of_objects;
bool incremental_marking_stopped;
bool can_start_incremental_marking;
bool sweeping_in_progress;
bool sweeping_completed;
bool has_low_allocation_rate;
......@@ -164,11 +191,26 @@ class GCIdleTimeHandler {
size_t new_space_allocation_throughput_in_bytes_per_ms;
};
GCIdleTimeHandler() : idle_times_which_made_no_progress_(0) {}
GCIdleTimeHandler()
: idle_mark_compacts_(0),
mark_compacts_(0),
scavenges_(0),
long_idle_notifications_(0),
background_idle_notifications_(0),
idle_times_which_made_no_progress_per_mode_(0),
next_gc_likely_to_collect_more_(false),
mode_(kReduceLatency) {}
GCIdleTimeAction Compute(double idle_time_in_ms, HeapState heap_state);
void ResetNoProgressCounter() { idle_times_which_made_no_progress_ = 0; }
void NotifyIdleMarkCompact() { ++idle_mark_compacts_; }
void NotifyMarkCompact(bool next_gc_likely_to_collect_more) {
next_gc_likely_to_collect_more_ = next_gc_likely_to_collect_more;
++mark_compacts_;
}
void NotifyScavenge() { ++scavenges_; }
static size_t EstimateMarkingStepSize(size_t idle_time_in_ms,
size_t marking_speed_in_bytes_per_ms);
......@@ -197,11 +239,36 @@ class GCIdleTimeHandler {
size_t scavenger_speed_in_bytes_per_ms,
size_t new_space_allocation_throughput_in_bytes_per_ms);
enum Mode { kReduceLatency, kReduceMemory, kDone };
Mode mode() { return mode_; }
private:
bool IsMutatorActive(int contexts_disposed, int gcs);
bool IsMutatorIdle(int long_idle_notifications,
int background_idle_notifications, int gcs);
void UpdateCounters(double idle_time_in_ms);
void ResetCounters();
Mode NextMode(const HeapState& heap_state);
GCIdleTimeAction Action(double idle_time_in_ms, const HeapState& heap_state,
bool reduce_memory);
GCIdleTimeAction NothingOrDone();
// Idle notifications with no progress.
int idle_times_which_made_no_progress_;
int idle_mark_compacts_;
int mark_compacts_;
int scavenges_;
// The number of long idle notifications with no GC happening
// between the notifications.
int long_idle_notifications_;
// The number of background idle notifications with no GC happening
// between the notifications.
int background_idle_notifications_;
// Idle notifications with no progress in the current mode.
int idle_times_which_made_no_progress_per_mode_;
bool next_gc_likely_to_collect_more_;
Mode mode_;
DISALLOW_COPY_AND_ASSIGN(GCIdleTimeHandler);
};
......
......@@ -20,7 +20,6 @@
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-reducer.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/store-buffer.h"
......@@ -108,6 +107,8 @@ Heap::Heap()
allocation_timeout_(0),
#endif // DEBUG
old_generation_allocation_limit_(initial_old_generation_size_),
idle_old_generation_allocation_limit_(
kMinimumOldGenerationAllocationLimit),
old_gen_exhausted_(false),
inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
......@@ -143,7 +144,6 @@ Heap::Heap()
store_buffer_(this),
marking_(this),
incremental_marking_(this),
memory_reducer_(this),
full_codegen_bytes_generated_(0),
crankshaft_codegen_bytes_generated_(0),
new_space_allocation_counter_(0),
......@@ -927,11 +927,6 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
}
bool next_gc_likely_to_collect_more = false;
intptr_t committed_memory_before;
if (collector == MARK_COMPACTOR) {
committed_memory_before = CommittedOldGenerationMemory();
}
{
tracer()->Start(collector, gc_reason, collector_reason);
......@@ -953,20 +948,9 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
}
if (collector == MARK_COMPACTOR) {
intptr_t committed_memory_after = CommittedOldGenerationMemory();
intptr_t used_memory_after = PromotedSpaceSizeOfObjects();
MemoryReducer::Event event;
event.type = MemoryReducer::kMarkCompact;
event.time_ms = MonotonicallyIncreasingTimeInMs();
// Trigger one more GC if
// - this GC decreased committed memory,
// - there is high fragmentation,
// - there are live detached contexts.
event.next_gc_likely_to_collect_more =
(committed_memory_before - committed_memory_after) > MB ||
HasHighFragmentation(used_memory_after, committed_memory_after) ||
(detached_contexts()->length() > 0);
memory_reducer_.NotifyMarkCompact(event);
gc_idle_time_handler_.NotifyMarkCompact(next_gc_likely_to_collect_more);
} else {
gc_idle_time_handler_.NotifyScavenge();
}
tracer()->Stop(collector);
......@@ -1001,20 +985,10 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
AgeInlineCaches();
set_retained_maps(ArrayList::cast(empty_fixed_array()));
tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
MemoryReducer::Event event;
event.type = MemoryReducer::kContextDisposed;
event.time_ms = MonotonicallyIncreasingTimeInMs();
memory_reducer_.NotifyContextDisposed(event);
return ++contexts_disposed_;
}
void Heap::StartIdleIncrementalMarking() {
gc_idle_time_handler_.ResetNoProgressCounter();
incremental_marking()->Start(kReduceMemoryFootprintMask);
}
void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
int len) {
if (len == 0) return;
......@@ -4785,21 +4759,6 @@ bool Heap::HasLowAllocationRate() {
}
bool Heap::HasHighFragmentation() {
intptr_t used = PromotedSpaceSizeOfObjects();
intptr_t committed = CommittedOldGenerationMemory();
return HasHighFragmentation(used, committed);
}
bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) {
const intptr_t kSlack = 16 * MB;
// Fragmentation is high if committed > 2 * used + kSlack.
// Rewrite the exression to avoid overflow.
return committed - used > used + kSlack;
}
void Heap::ReduceNewSpaceSize() {
if (!FLAG_predictable && HasLowAllocationRate()) {
new_space_.Shrink();
......@@ -4826,6 +4785,7 @@ bool Heap::TryFinalizeIdleIncrementalMarking(
static_cast<size_t>(idle_time_in_ms), size_of_objects,
final_incremental_mark_compact_speed_in_bytes_per_ms))) {
CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
gc_idle_time_handler_.NotifyIdleMarkCompact();
return true;
}
return false;
......@@ -4856,6 +4816,15 @@ GCIdleTimeHandler::HeapState Heap::ComputeHeapState() {
heap_state.new_space_capacity = new_space_.Capacity();
heap_state.new_space_allocation_throughput_in_bytes_per_ms =
tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond();
heap_state.has_low_allocation_rate = HasLowAllocationRate();
intptr_t limit = old_generation_allocation_limit_;
if (heap_state.has_low_allocation_rate) {
limit = idle_old_generation_allocation_limit_;
}
heap_state.can_start_incremental_marking =
incremental_marking()->CanBeActivated() &&
HeapIsFullEnoughToStartIncrementalMarking(limit) &&
!mark_compact_collector()->sweeping_in_progress();
return heap_state;
}
......@@ -4869,7 +4838,10 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
result = true;
break;
case DO_INCREMENTAL_MARKING: {
DCHECK(!incremental_marking()->IsStopped());
if (incremental_marking()->IsStopped()) {
incremental_marking()->Start(
action.reduce_memory ? kReduceMemoryFootprintMask : kNoGCFlags);
}
double remaining_idle_time_in_ms = 0.0;
do {
incremental_marking()->Step(
......@@ -4890,9 +4862,17 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
break;
}
case DO_FULL_GC: {
DCHECK(contexts_disposed_ > 0);
HistogramTimerScope scope(isolate_->counters()->gc_context());
CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
if (action.reduce_memory) {
isolate_->compilation_cache()->Clear();
}
if (contexts_disposed_) {
HistogramTimerScope scope(isolate_->counters()->gc_context());
CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
} else {
CollectAllGarbage(kReduceMemoryFootprintMask,
"idle notification: finalize idle round");
}
gc_idle_time_handler_.NotifyIdleMarkCompact();
break;
}
case DO_SCAVENGE:
......@@ -5633,14 +5613,23 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
factor = kMinHeapGrowingFactor;
}
// TODO(hpayer): Investigate if idle_old_generation_allocation_limit_ is still
// needed after taking the allocation rate for the old generation limit into
// account.
double idle_factor = Min(factor, kMaxHeapGrowingFactorIdle);
old_generation_allocation_limit_ =
CalculateOldGenerationAllocationLimit(factor, old_gen_size);
idle_old_generation_allocation_limit_ =
CalculateOldGenerationAllocationLimit(idle_factor, old_gen_size);
if (FLAG_trace_gc_verbose) {
PrintIsolate(isolate_, "Grow: old size: %" V8_PTR_PREFIX
"d KB, new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
old_gen_size / KB, old_generation_allocation_limit_ / KB,
factor);
PrintIsolate(
isolate_,
"Grow: old size: %" V8_PTR_PREFIX "d KB, new limit: %" V8_PTR_PREFIX
"d KB (%.1f), new idle limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
old_gen_size / KB, old_generation_allocation_limit_ / KB, factor,
idle_old_generation_allocation_limit_ / KB, idle_factor);
}
}
......
......@@ -16,7 +16,6 @@
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-reducer.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
#include "src/heap/store-buffer.h"
......@@ -832,10 +831,6 @@ class Heap {
// Notify the heap that a context has been disposed.
int NotifyContextDisposed(bool dependant_context);
// Start incremental marking and ensure that idle time handler can perform
// incremental steps.
void StartIdleIncrementalMarking();
inline void increment_scan_on_scavenge_pages() {
scan_on_scavenge_pages_++;
if (FLAG_gc_verbose) {
......@@ -1622,10 +1617,6 @@ class Heap {
// An ArrayBuffer moved from new space to old space.
void PromoteArrayBuffer(Object* buffer);
bool HasLowAllocationRate();
bool HasHighFragmentation();
bool HasHighFragmentation(intptr_t used, intptr_t committed);
protected:
// Methods made available to tests.
......@@ -1786,6 +1777,10 @@ class Heap {
// generation and on every allocation in large object space.
intptr_t old_generation_allocation_limit_;
// The allocation limit when there is >16.66ms idle time in the idle time
// handler.
intptr_t idle_old_generation_allocation_limit_;
// Indicates that an allocation has failed in the old generation since the
// last GC.
bool old_gen_exhausted_;
......@@ -2262,6 +2257,7 @@ class Heap {
bool HasLowYoungGenerationAllocationRate();
bool HasLowOldGenerationAllocationRate();
bool HasLowAllocationRate();
void ReduceNewSpaceSize();
......@@ -2328,8 +2324,6 @@ class Heap {
GCIdleTimeHandler gc_idle_time_handler_;
MemoryReducer memory_reducer_;
// These two counters are monotomically increasing and never reset.
size_t full_codegen_bytes_generated_;
size_t crankshaft_codegen_bytes_generated_;
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/memory-reducer.h"
#include "src/flags.h"
#include "src/heap/heap.h"
#include "src/utils.h"
#include "src/v8.h"
namespace v8 {
namespace internal {
const int MemoryReducer::kLongDelayMs = 5000;
const int MemoryReducer::kShortDelayMs = 500;
const int MemoryReducer::kMaxNumberOfGCs = 3;
void MemoryReducer::TimerTask::Run() {
Heap* heap = memory_reducer_->heap();
Event event;
event.type = kTimer;
event.time_ms = heap->MonotonicallyIncreasingTimeInMs();
event.low_allocation_rate = heap->HasLowAllocationRate();
event.can_start_incremental_gc =
heap->incremental_marking()->IsStopped() &&
heap->incremental_marking()->CanBeActivated();
memory_reducer_->NotifyTimer(event);
}
void MemoryReducer::NotifyTimer(const Event& event) {
DCHECK_EQ(kTimer, event.type);
DCHECK_EQ(kWait, state_.action);
state_ = Step(state_, event);
if (state_.action == kRun) {
DCHECK(heap()->incremental_marking()->IsStopped());
DCHECK(FLAG_incremental_marking);
heap()->StartIdleIncrementalMarking();
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(), "Memory reducer: started GC #%d\n",
state_.started_gcs);
}
} else if (state_.action == kWait) {
// Re-schedule the timer.
ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(), "Memory reducer: waiting for %.f ms\n",
state_.next_gc_start_ms - event.time_ms);
}
}
}
void MemoryReducer::NotifyMarkCompact(const Event& event) {
DCHECK_EQ(kMarkCompact, event.type);
Action old_action = state_.action;
state_ = Step(state_, event);
if (old_action != kWait && state_.action == kWait) {
// If we are transitioning to the WAIT state, start the timer.
ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
}
if (old_action == kRun) {
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(), "Memory reducer: finished GC #%d (%s)\n",
state_.started_gcs,
state_.action == kWait ? "will do more" : "done");
}
}
}
void MemoryReducer::NotifyContextDisposed(const Event& event) {
DCHECK_EQ(kContextDisposed, event.type);
Action old_action = state_.action;
state_ = Step(state_, event);
if (old_action != kWait && state_.action == kWait) {
// If we are transitioning to the WAIT state, start the timer.
ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
}
}
// For specification of this function see the comment for MemoryReducer class.
MemoryReducer::State MemoryReducer::Step(const State& state,
const Event& event) {
if (!FLAG_incremental_marking) {
return State(kDone, 0, 0);
}
switch (state.action) {
case kDone:
if (event.type == kTimer) {
return state;
} else {
DCHECK(event.type == kContextDisposed || event.type == kMarkCompact);
return State(kWait, 0, event.time_ms + kLongDelayMs);
}
case kWait:
if (event.type == kContextDisposed) {
return state;
} else if (event.type == kTimer && event.can_start_incremental_gc &&
event.low_allocation_rate) {
if (state.next_gc_start_ms <= event.time_ms) {
return State(kRun, state.started_gcs + 1, 0.0);
} else {
return state;
}
} else {
return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs);
}
case kRun:
if (event.type != kMarkCompact) {
return state;
} else {
if (state.started_gcs < kMaxNumberOfGCs &&
(event.next_gc_likely_to_collect_more || state.started_gcs == 1)) {
return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs);
} else {
return State(kDone, 0, 0.0);
}
}
}
UNREACHABLE();
return State(kDone, 0, 0); // Make the compiler happy.
}
void MemoryReducer::ScheduleTimer(double delay_ms) {
DCHECK(delay_ms > 0);
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
V8::GetCurrentPlatform()->CallDelayedOnForegroundThread(
isolate, new MemoryReducer::TimerTask(this),
(delay_ms + kSlackMs) / 1000.0);
}
} // internal
} // v8
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_memory_reducer_H
#define V8_HEAP_memory_reducer_H
#include "include/v8-platform.h"
#include "src/base/macros.h"
namespace v8 {
namespace internal {
class Heap;
// The goal of the MemoryReducer class is to detect transition of the mutator
// from high allocation phase to low allocation phase and to collect potential
// garbage created in the high allocation phase.
//
// The class implements an automaton with the following states and transitions.
//
// States:
// - DONE
// - WAIT <started_gcs> <next_gc_start_ms>
// - RUN <started_gcs>
// The <started_gcs> is an integer in range from 0..kMaxNumberOfGCs that stores
// the number of GCs initiated by the MemoryReducer since it left the DONE
// state.
// The <next_gc_start_ms> is a double that stores the earliest time the next GC
// can be initiated by the MemoryReducer.
// The DONE state means that the MemoryReducer is not active.
// The WAIT state means that the MemoryReducer is waiting for mutator allocation
// rate to drop. The check for the allocation rate happens in the timer task
// callback.
// The RUN state means that the MemoryReducer started incremental marking and is
// waiting for it to finish. Incremental marking steps are performed as usual
// in the idle notification and in the mutator.
//
// Transitions:
// DONE -> WAIT 0 (now_ms + long_delay_ms) happens:
// - on context disposal,
// - at the end of mark-compact GC initiated by the mutator.
// This signals that there is potential garbage to be collected.
//
// WAIT n x -> WAIT n (now_ms + long_delay_ms) happens:
// - on mark-compact GC initiated by the mutator,
// - in the timer callback if the mutator allocation rate is high or
// incremental GC is in progress.
//
// WAIT n x -> RUN (n+1) happens:
// - in the timer callback if the mutator allocation rate is low
// and now_ms >= x and there is no incremental GC in progress.
// The MemoryReducer starts incremental marking on this transition.
//
// RUN n -> DONE happens:
// - at end of the incremental GC initiated by the MemoryReducer if
// (n > 1 and there is no more garbage to be collected) or
// n == kMaxNumberOfGCs.
// RUN n -> WAIT n (now_ms + short_delay_ms) happens:
// - at end of the incremental GC initiated by the MemoryReducer if
// (n == 1 or there is more garbage to be collected) and
// n < kMaxNumberOfGCs.
//
// now_ms is the current time, long_delay_ms and short_delay_ms are constants.
class MemoryReducer {
public:
enum Action { kDone, kWait, kRun };
struct State {
State(Action action, int started_gcs, double next_gc_start_ms)
: action(action),
started_gcs(started_gcs),
next_gc_start_ms(next_gc_start_ms) {}
Action action;
int started_gcs;
double next_gc_start_ms;
};
enum EventType {
kTimer,
kMarkCompact,
kContextDisposed,
};
struct Event {
EventType type;
double time_ms;
bool low_allocation_rate;
bool next_gc_likely_to_collect_more;
bool can_start_incremental_gc;
};
explicit MemoryReducer(Heap* heap) : heap_(heap), state_(kDone, 0, 0.0) {}
// Callbacks.
void NotifyTimer(const Event& event);
void NotifyMarkCompact(const Event& event);
void NotifyScavenge(const Event& event);
void NotifyContextDisposed(const Event& event);
// The step function that computes the next state from the current state and
// the incoming event.
static State Step(const State& state, const Event& event);
// Posts a timer task that will call NotifyTimer after the given delay.
void ScheduleTimer(double delay_ms);
static const int kLongDelayMs;
static const int kShortDelayMs;
static const int kMaxNumberOfGCs;
Heap* heap() { return heap_; }
private:
class TimerTask : public v8::Task {
public:
explicit TimerTask(MemoryReducer* memory_reducer)
: memory_reducer_(memory_reducer) {}
virtual ~TimerTask() {}
private:
// v8::Task overrides.
void Run() override;
MemoryReducer* memory_reducer_;
DISALLOW_COPY_AND_ASSIGN(TimerTask);
};
Heap* heap_;
State state_;
DISALLOW_COPY_AND_ASSIGN(MemoryReducer);
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_memory_reducer_H
......@@ -15294,7 +15294,6 @@ static void CreateGarbageInOldSpace() {
// Test that idle notification can be handled and eventually collects garbage.
TEST(TestIdleNotification) {
if (!i::FLAG_incremental_marking) return;
const intptr_t MB = 1024 * 1024;
const double IdlePauseInSeconds = 1.0;
LocalContext env;
......@@ -15305,9 +15304,6 @@ TEST(TestIdleNotification) {
CHECK_GT(size_with_garbage, initial_size + MB);
bool finished = false;
for (int i = 0; i < 200 && !finished; i++) {
if (i < 10 && CcTest::heap()->incremental_marking()->IsStopped()) {
CcTest::heap()->StartIdleIncrementalMarking();
}
finished = env->GetIsolate()->IdleNotificationDeadline(
(v8::base::TimeTicks::HighResolutionNow().ToInternalValue() /
static_cast<double>(v8::base::Time::kMicrosecondsPerSecond)) +
......
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <limits>
#include "src/flags.h"
#include "src/heap/memory-reducer.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
MemoryReducer::State DoneState() {
return MemoryReducer::State(MemoryReducer::kDone, 0, 0.0);
}
MemoryReducer::State WaitState(int started_gcs, double next_gc_start_ms) {
return MemoryReducer::State(MemoryReducer::kWait, started_gcs,
next_gc_start_ms);
}
MemoryReducer::State RunState(int started_gcs, double next_gc_start_ms) {
return MemoryReducer::State(MemoryReducer::kRun, started_gcs,
next_gc_start_ms);
}
MemoryReducer::Event MarkCompactEvent(double time_ms,
bool next_gc_likely_to_collect_more) {
MemoryReducer::Event event;
event.type = MemoryReducer::kMarkCompact;
event.time_ms = time_ms;
event.next_gc_likely_to_collect_more = next_gc_likely_to_collect_more;
return event;
}
MemoryReducer::Event MarkCompactEventGarbageLeft(double time_ms) {
return MarkCompactEvent(time_ms, true);
}
MemoryReducer::Event MarkCompactEventNoGarbageLeft(double time_ms) {
return MarkCompactEvent(time_ms, false);
}
MemoryReducer::Event TimerEvent(double time_ms, bool low_allocation_rate,
bool can_start_incremental_gc) {
MemoryReducer::Event event;
event.type = MemoryReducer::kTimer;
event.time_ms = time_ms;
event.low_allocation_rate = low_allocation_rate;
event.can_start_incremental_gc = can_start_incremental_gc;
return event;
}
MemoryReducer::Event TimerEventLowAllocationRate(double time_ms) {
return TimerEvent(time_ms, true, true);
}
MemoryReducer::Event TimerEventHighAllocationRate(double time_ms) {
return TimerEvent(time_ms, false, true);
}
MemoryReducer::Event TimerEventPendingGC(double time_ms) {
return TimerEvent(time_ms, true, false);
}
MemoryReducer::Event ContextDisposedEvent(double time_ms) {
MemoryReducer::Event event;
event.type = MemoryReducer::kContextDisposed;
event.time_ms = time_ms;
return event;
}
TEST(MemoryReducer, FromDoneToDone) {
MemoryReducer::State state0(DoneState()), state1(DoneState());
state1 = MemoryReducer::Step(state0, TimerEventLowAllocationRate(0));
EXPECT_EQ(MemoryReducer::kDone, state1.action);
state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(0));
EXPECT_EQ(MemoryReducer::kDone, state1.action);
state1 = MemoryReducer::Step(state0, TimerEventPendingGC(0));
EXPECT_EQ(MemoryReducer::kDone, state1.action);
}
TEST(MemoryReducer, FromDoneToWait) {
if (!FLAG_incremental_marking) return;
MemoryReducer::State state0(DoneState()), state1(DoneState());
state1 = MemoryReducer::Step(state0, MarkCompactEventGarbageLeft(0));
EXPECT_EQ(MemoryReducer::kWait, state1.action);
EXPECT_EQ(MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
EXPECT_EQ(0, state1.started_gcs);
state1 = MemoryReducer::Step(state0, MarkCompactEventNoGarbageLeft(0));
EXPECT_EQ(MemoryReducer::kWait, state1.action);
EXPECT_EQ(MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
EXPECT_EQ(0, state1.started_gcs);
state1 = MemoryReducer::Step(state0, ContextDisposedEvent(0));
EXPECT_EQ(MemoryReducer::kWait, state1.action);
EXPECT_EQ(MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
EXPECT_EQ(0, state1.started_gcs);
}
TEST(MemoryReducer, FromWaitToWait) {
if (!FLAG_incremental_marking) return;
MemoryReducer::State state0(WaitState(2, 1000.0)), state1(DoneState());
state1 = MemoryReducer::Step(state0, ContextDisposedEvent(2000));
EXPECT_EQ(MemoryReducer::kWait, state1.action);
EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
EXPECT_EQ(state0.started_gcs, state1.started_gcs);
state1 = MemoryReducer::Step(
state0, TimerEventLowAllocationRate(state0.next_gc_start_ms - 1));
EXPECT_EQ(MemoryReducer::kWait, state1.action);
EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
EXPECT_EQ(state0.started_gcs, state1.started_gcs);
state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(2000));
EXPECT_EQ(MemoryReducer::kWait, state1.action);
EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
EXPECT_EQ(state0.started_gcs, state1.started_gcs);
state1 = MemoryReducer::Step(state0, TimerEventPendingGC(2000));
EXPECT_EQ(MemoryReducer::kWait, state1.action);
EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
EXPECT_EQ(state0.started_gcs, state1.started_gcs);
state1 = MemoryReducer::Step(state0, MarkCompactEventGarbageLeft(2000));
EXPECT_EQ(MemoryReducer::kWait, state1.action);
EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
EXPECT_EQ(state0.started_gcs, state1.started_gcs);
state1 = MemoryReducer::Step(state0, MarkCompactEventNoGarbageLeft(2000));
EXPECT_EQ(MemoryReducer::kWait, state1.action);
EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
EXPECT_EQ(state0.started_gcs, state1.started_gcs);
}
TEST(MemoryReducer, FromWaitToRun) {
if (!FLAG_incremental_marking) return;
MemoryReducer::State state0(WaitState(0, 1000.0)), state1(DoneState());
state1 = MemoryReducer::Step(
state0, TimerEventLowAllocationRate(state0.next_gc_start_ms + 1));
EXPECT_EQ(MemoryReducer::kRun, state1.action);
EXPECT_EQ(0, state1.next_gc_start_ms);
EXPECT_EQ(state0.started_gcs + 1, state1.started_gcs);
}
TEST(MemoryReducer, FromRunToRun) {
if (!FLAG_incremental_marking) return;
MemoryReducer::State state0(RunState(1, 0.0)), state1(DoneState());
state1 = MemoryReducer::Step(state0, TimerEventLowAllocationRate(2000));
EXPECT_EQ(MemoryReducer::kRun, state1.action);
EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
EXPECT_EQ(state0.started_gcs, state1.started_gcs);
state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(2000));
EXPECT_EQ(MemoryReducer::kRun, state1.action);
EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
EXPECT_EQ(state0.started_gcs, state1.started_gcs);
state1 = MemoryReducer::Step(state0, TimerEventPendingGC(2000));
EXPECT_EQ(MemoryReducer::kRun, state1.action);
EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
EXPECT_EQ(state0.started_gcs, state1.started_gcs);
state1 = MemoryReducer::Step(state0, ContextDisposedEvent(2000));
EXPECT_EQ(MemoryReducer::kRun, state1.action);
EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
EXPECT_EQ(state0.started_gcs, state1.started_gcs);
}
TEST(MemoryReducer, FromRunToDone) {
if (!FLAG_incremental_marking) return;
MemoryReducer::State state0(RunState(2, 0.0)), state1(DoneState());
state1 = MemoryReducer::Step(state0, MarkCompactEventNoGarbageLeft(2000));
EXPECT_EQ(MemoryReducer::kDone, state1.action);
EXPECT_EQ(0, state1.next_gc_start_ms);
EXPECT_EQ(0, state1.started_gcs);
state0.started_gcs = MemoryReducer::kMaxNumberOfGCs;
state1 = MemoryReducer::Step(state0, MarkCompactEventGarbageLeft(2000));
EXPECT_EQ(MemoryReducer::kDone, state1.action);
EXPECT_EQ(0, state1.next_gc_start_ms);
EXPECT_EQ(0, state1.started_gcs);
}
TEST(MemoryReducer, FromRunToWait) {
if (!FLAG_incremental_marking) return;
MemoryReducer::State state0(RunState(2, 0.0)), state1(DoneState());
state1 = MemoryReducer::Step(state0, MarkCompactEventGarbageLeft(2000));
EXPECT_EQ(MemoryReducer::kWait, state1.action);
EXPECT_EQ(2000 + MemoryReducer::kShortDelayMs, state1.next_gc_start_ms);
EXPECT_EQ(state0.started_gcs, state1.started_gcs);
state0.started_gcs = 1;
state1 = MemoryReducer::Step(state0, MarkCompactEventNoGarbageLeft(2000));
EXPECT_EQ(MemoryReducer::kWait, state1.action);
EXPECT_EQ(2000 + MemoryReducer::kShortDelayMs, state1.next_gc_start_ms);
EXPECT_EQ(state0.started_gcs, state1.started_gcs);
}
} // namespace internal
} // namespace v8
......@@ -91,7 +91,6 @@
'libplatform/task-queue-unittest.cc',
'libplatform/worker-thread-unittest.cc',
'heap/gc-idle-time-handler-unittest.cc',
'heap/memory-reducer-unittest.cc',
'heap/heap-unittest.cc',
'run-all-unittests.cc',
'test-utils.h',
......
......@@ -675,8 +675,6 @@
'../../src/heap-snapshot-generator-inl.h',
'../../src/heap-snapshot-generator.cc',
'../../src/heap-snapshot-generator.h',
'../../src/heap/memory-reducer.cc',
'../../src/heap/memory-reducer.h',
'../../src/heap/gc-idle-time-handler.cc',
'../../src/heap/gc-idle-time-handler.h',
'../../src/heap/gc-tracer.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment