Commit bb9b74e7 authored by ulan's avatar ulan Committed by Commit bot

Add memory pressure notification API

Based on CL 1777883002.

BUG=chromium:590975
LOG=NO

Review URL: https://codereview.chromium.org/1813963002

Cr-Commit-Position: refs/heads/master@{#35053}
parent 8b218d64
......@@ -5350,6 +5350,15 @@ class V8_EXPORT PersistentHandleVisitor { // NOLINT
uint16_t class_id) {}
};
/**
* Memory pressure level for the MemoryPressureNotification.
* kNone hints V8 that there is no memory pressure.
* kModerate hints V8 to speed up incremental garbage collection at the cost of
* of higher latency due to garbage collection pauses.
* kCritical hints V8 to free memory as soon as possible. Garbage collection
* pauses at this level will be large.
*/
enum class MemoryPressureLevel { kNone, kModerate, kCritical };
/**
* Isolate represents an isolated instance of the V8 engine. V8 isolates have
......@@ -5590,6 +5599,14 @@ class V8_EXPORT Isolate {
void SetAbortOnUncaughtExceptionCallback(
AbortOnUncaughtExceptionCallback callback);
/**
* Optional notification that the system is running low on memory.
* V8 uses these notifications to guide heuristics.
* It is allowed to call this function from another thread while
* the isolate is executing long running JavaScript code.
*/
void MemoryPressureNotification(MemoryPressureLevel level);
/**
* Methods below this point require holding a lock (using Locker) in
* a multi-threaded environment.
......
......@@ -7657,6 +7657,11 @@ void Isolate::IsolateInBackgroundNotification() {
return isolate->heap()->SetOptimizeForMemoryUsage();
}
void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->heap()->MemoryPressureNotification(level,
Locker::IsLocked(this));
}
void Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
JitCodeEventHandler event_handler) {
......
......@@ -68,7 +68,6 @@ class IdleScavengeObserver : public AllocationObserver {
Heap& heap_;
};
Heap::Heap()
: amount_of_external_allocated_memory_(0),
amount_of_external_allocated_memory_at_last_global_gc_(0),
......@@ -92,6 +91,7 @@ Heap::Heap()
survived_since_last_expansion_(0),
survived_last_scavenge_(0),
always_allocate_scope_count_(0),
memory_pressure_level_(MemoryPressureLevel::kNone),
contexts_disposed_(0),
number_of_disposed_maps_(0),
global_ic_age_(0),
......@@ -790,12 +790,19 @@ class GCCallbacksScope {
void Heap::HandleGCRequest() {
if (incremental_marking()->request_type() ==
IncrementalMarking::COMPLETE_MARKING) {
if (HighMemoryPressure()) {
incremental_marking()->reset_request_type();
CheckMemoryPressure();
} else if (incremental_marking()->request_type() ==
IncrementalMarking::COMPLETE_MARKING) {
incremental_marking()->reset_request_type();
CollectAllGarbage(current_gc_flags_, "GC interrupt",
current_gc_callback_flags_);
} else if (incremental_marking()->IsMarking() &&
} else if (incremental_marking()->request_type() ==
IncrementalMarking::FINALIZATION &&
incremental_marking()->IsMarking() &&
!incremental_marking()->finalize_marking_completed()) {
incremental_marking()->reset_request_type();
FinalizeIncrementalMarking("GC interrupt: finalize incremental marking");
}
}
......@@ -1036,6 +1043,7 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
if (deserialization_complete_) {
memory_reducer_->NotifyMarkCompact(event);
}
memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
}
tracer()->Stop(collector);
......@@ -1455,7 +1463,6 @@ void Heap::MarkCompactEpilogue() {
incremental_marking()->Epilogue();
PreprocessStackTraces();
DCHECK(incremental_marking()->IsStopped());
// We finished a marking cycle. We can uncommit the marking deque until
......@@ -4424,6 +4431,59 @@ bool Heap::RecentIdleNotificationHappened() {
MonotonicallyIncreasingTimeInMs();
}
class MemoryPressureInterruptTask : public CancelableTask {
public:
explicit MemoryPressureInterruptTask(Heap* heap)
: CancelableTask(heap->isolate()), heap_(heap) {}
virtual ~MemoryPressureInterruptTask() {}
private:
// v8::internal::CancelableTask overrides.
void RunInternal() override { heap_->CheckMemoryPressure(); }
Heap* heap_;
DISALLOW_COPY_AND_ASSIGN(MemoryPressureInterruptTask);
};
void Heap::CheckMemoryPressure() {
if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
CollectGarbageOnMemoryPressure("memory pressure");
} else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
StartIdleIncrementalMarking();
}
}
MemoryReducer::Event event;
event.type = MemoryReducer::kPossibleGarbage;
event.time_ms = MonotonicallyIncreasingTimeInMs();
memory_reducer_->NotifyPossibleGarbage(event);
}
void Heap::CollectGarbageOnMemoryPressure(const char* source) {
CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
source);
}
void Heap::MemoryPressureNotification(MemoryPressureLevel level,
bool is_isolate_locked) {
MemoryPressureLevel previous = memory_pressure_level_.Value();
memory_pressure_level_.SetValue(level);
if ((previous != MemoryPressureLevel::kCritical &&
level == MemoryPressureLevel::kCritical) ||
(previous == MemoryPressureLevel::kNone &&
level == MemoryPressureLevel::kModerate)) {
if (is_isolate_locked) {
CheckMemoryPressure();
} else {
ExecutionAccess access(isolate());
isolate()->stack_guard()->RequestGC();
V8::GetCurrentPlatform()->CallOnForegroundThread(
reinterpret_cast<v8::Isolate*>(isolate()),
new MemoryPressureInterruptTask(this));
}
}
}
#ifdef DEBUG
......
......@@ -10,6 +10,7 @@
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
#include "include/v8.h"
#include "src/allocation.h"
#include "src/assert-scope.h"
#include "src/atomic-utils.h"
......@@ -23,6 +24,8 @@
namespace v8 {
namespace internal {
using v8::MemoryPressureLevel;
// Defines all the roots in Heap.
#define STRONG_ROOT_LIST(V) \
V(Map, byte_array_map, ByteArrayMap) \
......@@ -736,6 +739,10 @@ class Heap {
bool IdleNotification(double deadline_in_seconds);
bool IdleNotification(int idle_time_in_ms);
void MemoryPressureNotification(MemoryPressureLevel level,
bool is_isolate_locked);
void CheckMemoryPressure();
double MonotonicallyIncreasingTimeInMs();
void RecordStats(HeapStats* stats, bool take_snapshot = false);
......@@ -750,6 +757,8 @@ class Heap {
if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
if (HighMemoryPressure()) return true;
return false;
}
......@@ -833,7 +842,12 @@ class Heap {
void SetOptimizeForLatency() { optimize_for_memory_usage_ = false; }
void SetOptimizeForMemoryUsage();
bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
bool ShouldOptimizeForMemoryUsage() {
return optimize_for_memory_usage_ || HighMemoryPressure();
}
bool HighMemoryPressure() {
return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
}
// ===========================================================================
// Initialization. ===========================================================
......@@ -1630,6 +1644,8 @@ class Heap {
void CompactRetainedMaps(ArrayList* retained_maps);
void CollectGarbageOnMemoryPressure(const char* source);
// Attempt to over-approximate the weak closure by marking object groups and
// implicit references from global handles, but don't atomically complete
// marking. If we continue to mark incrementally, we might have marked
......@@ -1996,6 +2012,10 @@ class Heap {
// count, as scopes can be acquired from multiple tasks (read: threads).
AtomicNumber<size_t> always_allocate_scope_count_;
// Stores the memory pressure level that set by MemoryPressureNotification
// and reset by a mark-compact garbage collection.
AtomicValue<MemoryPressureLevel> memory_pressure_level_;
// For keeping track of context disposals.
int contexts_disposed_;
......
......@@ -14,6 +14,8 @@
namespace v8 {
namespace internal {
const double IncrementalMarkingJob::kLongDelayInSeconds = 5;
const double IncrementalMarkingJob::kShortDelayInSeconds = 0.5;
void IncrementalMarkingJob::Start(Heap* heap) {
DCHECK(!heap->incremental_marking()->IsStopped());
......@@ -58,8 +60,10 @@ void IncrementalMarkingJob::ScheduleDelayedTask(Heap* heap) {
delayed_task_pending_ = true;
made_progress_since_last_delayed_task_ = false;
auto task = new DelayedTask(heap->isolate(), this);
double delay =
heap->HighMemoryPressure() ? kShortDelayInSeconds : kLongDelayInSeconds;
V8::GetCurrentPlatform()->CallDelayedOnForegroundThread(isolate, task,
kDelayInSeconds);
delay);
}
}
......
......@@ -49,7 +49,8 @@ class IncrementalMarkingJob {
};
// Delay of the delayed task.
static const int kDelayInSeconds = 5;
static const double kLongDelayInSeconds;
static const double kShortDelayInSeconds;
IncrementalMarkingJob()
: idle_task_pending_(false),
......
......@@ -45,7 +45,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
black_allocation_(false),
finalize_marking_completed_(false),
incremental_marking_finalization_rounds_(0),
request_type_(COMPLETE_MARKING) {}
request_type_(NONE) {}
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
HeapObject* value_heap_obj = HeapObject::cast(value);
......
......@@ -29,7 +29,7 @@ class IncrementalMarking {
enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
enum GCRequestType { COMPLETE_MARKING, FINALIZATION };
enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
struct StepActions {
StepActions(CompletionAction complete_action_,
......@@ -80,6 +80,8 @@ class IncrementalMarking {
GCRequestType request_type() const { return request_type_; }
void reset_request_type() { request_type_ = NONE; }
bool CanBeActivated();
bool ShouldActivateEvenWithoutIdleNotification();
......
......@@ -24856,3 +24856,71 @@ TEST(Proxy) {
CHECK(proxy->GetTarget()->SameValue(target));
CHECK(proxy->GetHandler()->IsNull());
}
WeakCallCounterAndPersistent<Value>* CreateGarbageWithWeakCallCounter(
v8::Isolate* isolate, WeakCallCounter* counter) {
v8::Locker locker(isolate);
LocalContext env;
HandleScope scope(isolate);
WeakCallCounterAndPersistent<Value>* val =
new WeakCallCounterAndPersistent<Value>(counter);
val->handle.Reset(isolate, Object::New(isolate));
val->handle.SetWeak(val, &WeakPointerCallback,
v8::WeakCallbackType::kParameter);
return val;
}
class MemoryPressureThread : public v8::base::Thread {
public:
explicit MemoryPressureThread(v8::Isolate* isolate,
v8::MemoryPressureLevel level)
: Thread(Options("MemoryPressureThread")),
isolate_(isolate),
level_(level) {}
virtual void Run() { isolate_->MemoryPressureNotification(level_); }
private:
v8::Isolate* isolate_;
v8::MemoryPressureLevel level_;
};
TEST(MemoryPressure) {
v8::Isolate* isolate = CcTest::isolate();
WeakCallCounter counter(1234);
// Check that critical memory pressure notification sets GC interrupt.
auto garbage = CreateGarbageWithWeakCallCounter(isolate, &counter);
CHECK(!v8::Locker::IsLocked(isolate));
{
v8::Locker locker(isolate);
v8::HandleScope scope(isolate);
LocalContext env;
MemoryPressureThread memory_pressure_thread(
isolate, v8::MemoryPressureLevel::kCritical);
memory_pressure_thread.Start();
memory_pressure_thread.Join();
// This should trigger GC.
CHECK_EQ(0, counter.NumberOfWeakCalls());
CompileRun("(function noop() { return 0; })()");
CHECK_EQ(1, counter.NumberOfWeakCalls());
}
delete garbage;
// Check that critical memory pressure notification triggers GC.
garbage = CreateGarbageWithWeakCallCounter(isolate, &counter);
{
v8::Locker locker(isolate);
// If isolate is locked, memory pressure notification should trigger GC.
CHECK_EQ(1, counter.NumberOfWeakCalls());
isolate->MemoryPressureNotification(v8::MemoryPressureLevel::kCritical);
CHECK_EQ(2, counter.NumberOfWeakCalls());
}
delete garbage;
// Check that moderate memory pressure notification sets GC into memory
// optimizing mode.
isolate->MemoryPressureNotification(v8::MemoryPressureLevel::kModerate);
CHECK(CcTest::i_isolate()->heap()->ShouldOptimizeForMemoryUsage());
// Check that disabling memory pressure returns GC into normal mode.
isolate->MemoryPressureNotification(v8::MemoryPressureLevel::kNone);
CHECK(!CcTest::i_isolate()->heap()->ShouldOptimizeForMemoryUsage());
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment