memory-reducer.cc 8.42 KB
Newer Older
1 2 3 4 5 6
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/heap/memory-reducer.h"

7
#include "src/flags/flags.h"
8
#include "src/heap/gc-tracer.h"
9
#include "src/heap/heap-inl.h"
10
#include "src/heap/incremental-marking.h"
11
#include "src/init/v8.h"
12
#include "src/utils/utils.h"
13 14 15 16

namespace v8 {
namespace internal {

17
const int MemoryReducer::kLongDelayMs = 8000;
18
const int MemoryReducer::kShortDelayMs = 500;
19
const int MemoryReducer::kWatchdogDelayMs = 100000;
20
const int MemoryReducer::kMaxNumberOfGCs = 3;
21 22
const double MemoryReducer::kCommittedMemoryFactor = 1.1;
const size_t MemoryReducer::kCommittedMemoryDelta = 10 * MB;
23

24 25 26 27 28 29 30 31
MemoryReducer::MemoryReducer(Heap* heap)
    : heap_(heap),
      taskrunner_(V8::GetCurrentPlatform()->GetForegroundTaskRunner(
          reinterpret_cast<v8::Isolate*>(heap->isolate()))),
      state_(kDone, 0, 0.0, 0.0, 0),
      js_calls_counter_(0),
      js_calls_sample_time_ms_(0.0) {}

32 33 34
MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
    : CancelableTask(memory_reducer->heap()->isolate()),
      memory_reducer_(memory_reducer) {}
35

36 37

void MemoryReducer::TimerTask::RunInternal() {
38 39
  Heap* heap = memory_reducer_->heap();
  Event event;
40 41
  double time_ms = heap->MonotonicallyIncreasingTimeInMs();
  heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
42 43
                                   heap->OldGenerationAllocationCounter(),
                                   heap->EmbedderAllocationCounter());
44 45 46
  bool low_allocation_rate = heap->HasLowAllocationRate();
  bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
  if (FLAG_trace_gc_verbose) {
47
    heap->isolate()->PrintWithTimestamp(
48
        "Memory reducer: %s, %s\n",
49 50
        low_allocation_rate ? "low alloc" : "high alloc",
        optimize_for_memory ? "background" : "foreground");
51
  }
52
  event.type = kTimer;
53
  event.time_ms = time_ms;
54 55 56
  // The memory reducer will start incremental markig if
  // 1) mutator is likely idle: js call rate is low and allocation rate is low.
  // 2) mutator is in background: optimize for memory flag is set.
57 58
  event.should_start_incremental_gc =
      low_allocation_rate || optimize_for_memory;
59 60
  event.can_start_incremental_gc =
      heap->incremental_marking()->IsStopped() &&
61
      (heap->incremental_marking()->CanBeActivated() || optimize_for_memory);
62
  event.committed_memory = heap->CommittedOldGenerationMemory();
63 64 65 66 67 68 69 70 71 72 73 74
  memory_reducer_->NotifyTimer(event);
}


void MemoryReducer::NotifyTimer(const Event& event) {
  DCHECK_EQ(kTimer, event.type);
  DCHECK_EQ(kWait, state_.action);
  state_ = Step(state_, event);
  if (state_.action == kRun) {
    DCHECK(heap()->incremental_marking()->IsStopped());
    DCHECK(FLAG_incremental_marking);
    if (FLAG_trace_gc_verbose) {
75 76
      heap()->isolate()->PrintWithTimestamp("Memory reducer: started GC #%d\n",
                                            state_.started_gcs);
77
    }
78
    heap()->StartIdleIncrementalMarking(
79 80
        GarbageCollectionReason::kMemoryReducer,
        kGCCallbackFlagCollectAllExternalMemory);
81
  } else if (state_.action == kWait) {
82 83 84 85 86 87 88 89
    if (!heap()->incremental_marking()->IsStopped() &&
        heap()->ShouldOptimizeForMemoryUsage()) {
      // Make progress with pending incremental marking if memory usage has
      // higher priority than latency. This is important for background tabs
      // that do not send idle notifications.
      const int kIncrementalMarkingDelayMs = 500;
      double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
                        kIncrementalMarkingDelayMs;
90
      heap()->incremental_marking()->AdvanceWithDeadline(
91
          deadline, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
92
          StepOrigin::kTask);
93
      heap()->FinalizeIncrementalMarkingIfComplete(
94
          GarbageCollectionReason::kFinalizeMarkingViaTask);
95
    }
96
    // Re-schedule the timer.
97
    ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
98
    if (FLAG_trace_gc_verbose) {
99 100 101
      heap()->isolate()->PrintWithTimestamp(
          "Memory reducer: waiting for %.f ms\n",
          state_.next_gc_start_ms - event.time_ms);
102 103 104 105 106 107 108 109 110 111 112
    }
  }
}


void MemoryReducer::NotifyMarkCompact(const Event& event) {
  DCHECK_EQ(kMarkCompact, event.type);
  Action old_action = state_.action;
  state_ = Step(state_, event);
  if (old_action != kWait && state_.action == kWait) {
    // If we are transitioning to the WAIT state, start the timer.
113
    ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
114 115 116
  }
  if (old_action == kRun) {
    if (FLAG_trace_gc_verbose) {
117 118 119
      heap()->isolate()->PrintWithTimestamp(
          "Memory reducer: finished GC #%d (%s)\n", state_.started_gcs,
          state_.action == kWait ? "will do more" : "done");
120 121 122 123
    }
  }
}

124 125
void MemoryReducer::NotifyPossibleGarbage(const Event& event) {
  DCHECK_EQ(kPossibleGarbage, event.type);
126 127 128 129
  Action old_action = state_.action;
  state_ = Step(state_, event);
  if (old_action != kWait && state_.action == kWait) {
    // If we are transitioning to the WAIT state, start the timer.
130
    ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
131 132 133 134
  }
}


135 136 137 138 139 140
bool MemoryReducer::WatchdogGC(const State& state, const Event& event) {
  return state.last_gc_time_ms != 0 &&
         event.time_ms > state.last_gc_time_ms + kWatchdogDelayMs;
}


141 142 143
// For specification of this function see the comment for MemoryReducer class.
MemoryReducer::State MemoryReducer::Step(const State& state,
                                         const Event& event) {
ulan's avatar
ulan committed
144
  if (!FLAG_incremental_marking || !FLAG_memory_reducer) {
145
    return State(kDone, 0, 0, state.last_gc_time_ms, 0);
146 147 148
  }
  switch (state.action) {
    case kDone:
149
      if (event.type == kTimer) {
150
        return state;
151 152 153 154 155 156 157 158 159 160 161 162
      } else if (event.type == kMarkCompact) {
        if (event.committed_memory <
            Max(static_cast<size_t>(state.committed_memory_at_last_run *
                                    kCommittedMemoryFactor),
                state.committed_memory_at_last_run + kCommittedMemoryDelta)) {
          return state;
        } else {
          return State(kWait, 0, event.time_ms + kLongDelayMs,
                       event.type == kMarkCompact ? event.time_ms
                                                  : state.last_gc_time_ms,
                       0);
        }
163
      } else {
164
        DCHECK_EQ(kPossibleGarbage, event.type);
165 166
        return State(
            kWait, 0, event.time_ms + kLongDelayMs,
167 168
            event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms,
            0);
169 170
      }
    case kWait:
171
      switch (event.type) {
172
        case kPossibleGarbage:
173
          return state;
174 175
        case kTimer:
          if (state.started_gcs >= kMaxNumberOfGCs) {
176 177
            return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms,
                         event.committed_memory);
178
          } else if (event.can_start_incremental_gc &&
179 180
                     (event.should_start_incremental_gc ||
                      WatchdogGC(state, event))) {
181
            if (state.next_gc_start_ms <= event.time_ms) {
182
              return State(kRun, state.started_gcs + 1, 0.0,
183
                           state.last_gc_time_ms, 0);
184 185 186 187
            } else {
              return state;
            }
          } else {
188
            return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
189
                         state.last_gc_time_ms, 0);
190 191
          }
        case kMarkCompact:
192
          return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
193
                       event.time_ms, 0);
194 195 196 197 198 199 200
      }
    case kRun:
      if (event.type != kMarkCompact) {
        return state;
      } else {
        if (state.started_gcs < kMaxNumberOfGCs &&
            (event.next_gc_likely_to_collect_more || state.started_gcs == 1)) {
201
          return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs,
202
                       event.time_ms, 0);
203
        } else {
204 205
          return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms,
                       event.committed_memory);
206 207 208 209 210 211
        }
      }
  }
  UNREACHABLE();
}

212
void MemoryReducer::ScheduleTimer(double delay_ms) {
213
  DCHECK_LT(0, delay_ms);
214
  if (heap()->IsTearingDown()) return;
215 216
  // Leave some room for precision error in task scheduler.
  const double kSlackMs = 100;
217 218 219
  taskrunner_->PostDelayedTask(
      base::make_unique<MemoryReducer::TimerTask>(this),
      (delay_ms + kSlackMs) / 1000.0);
220 221
}

222
void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0, 0); }
223

224 225
}  // namespace internal
}  // namespace v8