Commit e27e6fd6 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Implement per-context marking worklist draining

This changes the marking worklist draining for the main thread
marker and the concurrent marker to use the following algorithm in
per-context mode:
1) Pop an object from the marking worklist.
2) Try to infer the native context that owns the objects.
   This is done using a new NativeContextInferrer class.
3) If the inference is successful, then change the active marking
   worklist to the worklist of the inferred native context.
4) Otherwise, keep the current active marking worklist.
5) Visit the object. Newly discovered objects will be pushed
   onto the active marking worklist.
6) Account the object size for the native context corresponding
   to the active marking worklist.
   This is done using a new NativeContextStats class.

The main property of the algorithm is that each object for which
we couldn't infer the native context is either attributed to
the native context retaining it or is not attributed to any native
context.

Bug: chromium:973627

Change-Id: Ide4ab992275d115279f971d89ace657f4c05e176
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1981491
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65663}
parent 250c58dd
......@@ -2297,6 +2297,7 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/marking-worklist.h",
"src/heap/marking.cc",
"src/heap/marking.h",
"src/heap/memory-measurement-inl.h",
"src/heap/memory-measurement.cc",
"src/heap/memory-measurement.h",
"src/heap/memory-reducer.cc",
......
......@@ -955,6 +955,8 @@ DEFINE_IMPLICATION(stress_flush_bytecode, flush_bytecode)
DEFINE_BOOL(use_marking_progress_bar, true,
"Use a progress bar to scan large objects in increments when "
"incremental marking is active.")
DEFINE_BOOL(stress_per_context_marking_worklist, false,
"Use per-context worklist for marking")
DEFINE_BOOL(force_marking_deque_overflows, false,
"force overflows of marking deque by reducing it's size "
"to 64 words")
......
......@@ -17,6 +17,8 @@
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking.h"
#include "src/heap/memory-measurement-inl.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/worklist.h"
......@@ -392,6 +394,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
task_state->mark_compact_epoch, Heap::GetBytecodeFlushMode(),
heap_->local_embedder_heap_tracer()->InUse(), task_state->is_forced_gc,
&task_state->memory_chunk_data);
NativeContextInferrer& native_context_inferrer =
task_state->native_context_inferrer;
NativeContextStats& native_context_stats = task_state->native_context_stats;
double time_ms;
size_t marked_bytes = 0;
if (FLAG_trace_concurrent_marking) {
......@@ -412,7 +417,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
}
}
bool is_per_context_mode = marking_worklists.IsPerContextMode();
bool done = false;
while (!done) {
size_t current_marked_bytes = 0;
......@@ -435,7 +440,18 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
marking_worklists.PushOnHold(object);
} else {
Map map = object.synchronized_map();
current_marked_bytes += visitor.Visit(map, object);
if (is_per_context_mode) {
Address context;
if (native_context_inferrer.Infer(map, object, &context)) {
marking_worklists.SwitchToContext(context);
}
}
size_t visited_size = visitor.Visit(map, object);
if (is_per_context_mode) {
native_context_stats.IncrementSize(marking_worklists.Context(),
visited_size);
}
current_marked_bytes += visited_size;
}
}
marked_bytes += current_marked_bytes;
......@@ -590,6 +606,13 @@ bool ConcurrentMarking::IsStopped() {
return pending_task_count_ == 0;
}
void ConcurrentMarking::FlushNativeContexts(NativeContextStats* main_stats) {
for (int i = 1; i <= total_task_count_; i++) {
main_stats->Merge(task_state_[i].native_context_stats);
task_state_[i].native_context_stats.Clear();
}
}
void ConcurrentMarking::FlushMemoryChunkData(
MajorNonAtomicMarkingState* marking_state) {
DCHECK_EQ(pending_task_count_, 0);
......
......@@ -13,6 +13,7 @@
#include "src/base/platform/mutex.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
......@@ -82,6 +83,8 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
bool Stop(StopRequest stop_request);
void RescheduleTasksIfNeeded();
// Flushes native context sizes to the given table of the main thread.
void FlushNativeContexts(NativeContextStats* main_stats);
// Flushes memory chunk data using the given marking state.
void FlushMemoryChunkData(MajorNonAtomicMarkingState* marking_state);
// This function is called for a new space page that was cleared after
......@@ -103,10 +106,12 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
// The main thread sets this flag to true when it wants the concurrent
// marker to give up the worker thread.
std::atomic<bool> preemption_request;
MemoryChunkDataMap memory_chunk_data;
size_t marked_bytes = 0;
unsigned mark_compact_epoch;
bool is_forced_gc;
MemoryChunkDataMap memory_chunk_data;
NativeContextInferrer native_context_inferrer;
NativeContextStats native_context_stats;
char cache_line_padding[64];
};
class Task;
......
......@@ -6006,6 +6006,17 @@ size_t Heap::NumberOfNativeContexts() {
return result;
}
std::vector<Address> Heap::FindNativeContexts() {
std::vector<Address> result;
Object context = native_contexts_list();
while (!context.IsUndefined(isolate())) {
Context native_context = Context::cast(context);
result.push_back(native_context.ptr());
context = native_context.next_context_link();
}
return result;
}
size_t Heap::NumberOfDetachedContexts() {
// The detached_contexts() array has two entries per detached context.
return detached_contexts().length() / 2;
......
......@@ -581,6 +581,7 @@ class Heap {
Handle<JSPromise> MeasureMemory(Handle<NativeContext> context,
v8::MeasureMemoryMode mode);
std::vector<Address> FindNativeContexts();
// Check new space expansion criteria and expand semispaces if it was hit.
void CheckNewSpaceExpansionCriteria();
......
......@@ -23,6 +23,8 @@
#include "src/heap/mark-compact-inl.h"
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/memory-measurement-inl.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/read-only-heap.h"
......@@ -498,7 +500,11 @@ bool MarkCompactCollector::StartCompaction() {
}
void MarkCompactCollector::StartMarking() {
marking_worklists_holder()->CreateContextWorklists(std::vector<Address>());
std::vector<Address> contexts;
if (FLAG_stress_per_context_marking_worklist) {
contexts = heap()->FindNativeContexts();
}
marking_worklists_holder()->CreateContextWorklists(contexts);
marking_worklists_ = std::make_unique<MarkingWorklists>(
kMainThreadTask, marking_worklists_holder());
marking_visitor_ = std::make_unique<MarkingVisitor>(
......@@ -854,6 +860,7 @@ void MarkCompactCollector::FinishConcurrentMarking(
heap()->concurrent_marking()->Stop(stop_request);
heap()->concurrent_marking()->FlushMemoryChunkData(
non_atomic_marking_state());
heap()->concurrent_marking()->FlushNativeContexts(&native_context_stats_);
}
}
......@@ -885,6 +892,7 @@ void MarkCompactCollector::Finish() {
marking_visitor_.reset();
marking_worklists_.reset();
marking_worklists_holder_.ReleaseContextWorklists();
native_context_stats_.Clear();
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
......@@ -1770,6 +1778,7 @@ template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
size_t MarkCompactCollector::ProcessMarkingWorklist(size_t bytes_to_process) {
HeapObject object;
size_t bytes_processed = 0;
bool is_per_context_mode = marking_worklists()->IsPerContextMode();
while (marking_worklists()->Pop(&object) ||
marking_worklists()->PopOnHold(&object)) {
// Left trimming may result in grey or black filler objects on the marking
......@@ -1794,7 +1803,19 @@ size_t MarkCompactCollector::ProcessMarkingWorklist(size_t bytes_to_process) {
kTrackNewlyDiscoveredObjects) {
AddNewlyDiscovered(object);
}
bytes_processed += marking_visitor_->Visit(object.map(), object);
Map map = object.map();
if (is_per_context_mode) {
Address context;
if (native_context_inferrer_.Infer(map, object, &context)) {
marking_worklists()->SwitchToContext(context);
}
}
size_t visited_size = marking_visitor_->Visit(map, object);
if (is_per_context_mode) {
native_context_stats_.IncrementSize(marking_worklists()->Context(),
visited_size);
}
bytes_processed += visited_size;
if (bytes_to_process && bytes_processed >= bytes_to_process) {
break;
}
......
......@@ -12,6 +12,7 @@
#include "src/heap/marking-visitor.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/marking.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/spaces.h"
#include "src/heap/sweeper.h"
......@@ -770,6 +771,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
std::unique_ptr<MarkingVisitor> marking_visitor_;
std::unique_ptr<MarkingWorklists> marking_worklists_;
NativeContextInferrer native_context_inferrer_;
NativeContextStats native_context_stats_;
// Candidates for pages that should be evacuated.
std::vector<Page*> evacuation_candidates_;
......
......@@ -89,10 +89,10 @@ MarkingWorklists::MarkingWorklists(int task_id, MarkingWorklistsHolder* holder)
active_(shared_),
active_context_(kSharedContext),
task_id_(task_id),
per_context_mode_(false),
is_per_context_mode_(false),
context_worklists_(holder->context_worklists()) {
if (!context_worklists_.empty()) {
per_context_mode_ = true;
is_per_context_mode_ = true;
context_worklists_.push_back({kSharedContext, shared_});
worklist_by_context_.reserve(context_worklists_.size());
for (auto& cw : context_worklists_) {
......@@ -105,7 +105,7 @@ void MarkingWorklists::FlushToGlobal() {
shared_->FlushToGlobal(task_id_);
on_hold_->FlushToGlobal(task_id_);
embedder_->FlushToGlobal(task_id_);
if (per_context_mode_) {
if (is_per_context_mode_) {
for (auto& cw : context_worklists_) {
cw.worklist->FlushToGlobal(task_id_);
}
......@@ -120,7 +120,7 @@ bool MarkingWorklists::IsEmpty() {
!active_->IsGlobalPoolEmpty() || !on_hold_->IsGlobalPoolEmpty()) {
return false;
}
if (!per_context_mode_) {
if (!is_per_context_mode_) {
DCHECK_EQ(active_, shared_);
return true;
}
......@@ -143,7 +143,7 @@ void MarkingWorklists::ShareWorkIfGlobalPoolIsEmpty() {
if (!shared_->IsLocalEmpty(task_id_) && shared_->IsGlobalPoolEmpty()) {
shared_->FlushToGlobal(task_id_);
}
if (per_context_mode_ && shared_ != active_) {
if (is_per_context_mode_ && shared_ != active_) {
if (!active_->IsLocalEmpty(task_id_) && active_->IsGlobalPoolEmpty()) {
active_->FlushToGlobal(task_id_);
}
......@@ -156,7 +156,7 @@ void MarkingWorklists::MergeOnHold() {
}
bool MarkingWorklists::PopContext(HeapObject* object) {
DCHECK(per_context_mode_);
DCHECK(is_per_context_mode_);
// As an optimization we first check only the local segments to avoid locks.
for (auto& cw : context_worklists_) {
if (!cw.worklist->IsLocalEmpty(task_id_)) {
......@@ -173,6 +173,8 @@ bool MarkingWorklists::PopContext(HeapObject* object) {
return true;
}
}
// All worklists are empty. Switch to the default shared worklist.
SwitchToShared();
return false;
}
......
......@@ -134,7 +134,7 @@ class V8_EXPORT_PRIVATE MarkingWorklists {
bool Pop(HeapObject* object) {
if (active_->Pop(task_id_, object)) return true;
if (!per_context_mode_) return false;
if (!is_per_context_mode_) return false;
// The active worklist is empty. Find any other non-empty worklist and
// switch the active worklist to it.
return PopContext(object);
......@@ -176,6 +176,7 @@ class V8_EXPORT_PRIVATE MarkingWorklists {
active_context_ = kSharedContext;
active_ = shared_;
}
bool IsPerContextMode() { return is_per_context_mode_; }
private:
const Address kSharedContext = 0;
......@@ -187,7 +188,7 @@ class V8_EXPORT_PRIVATE MarkingWorklists {
MarkingWorklist* active_;
Address active_context_;
int task_id_;
bool per_context_mode_;
bool is_per_context_mode_;
// Per-context worklists. For simplicity we treat the shared worklist as
// the worklist of dummy kSharedContext.
std::vector<ContextWorklistPair> context_worklists_;
......
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_MEMORY_MEASUREMENT_INL_H_
#define V8_HEAP_MEMORY_MEASUREMENT_INL_H_
#include "src/heap/memory-measurement.h"
#include "src/objects/contexts-inl.h"
#include "src/objects/contexts.h"
#include "src/objects/map-inl.h"
#include "src/objects/map.h"
namespace v8 {
namespace internal {
bool NativeContextInferrer::Infer(Map map, HeapObject object,
Address* native_context) {
switch (map.visitor_id()) {
case kVisitContext:
*native_context = Context::cast(object).native_context().ptr();
return true;
case kVisitNativeContext:
*native_context = object.ptr();
return true;
case kVisitJSFunction:
return InferForJSFunction(map, JSFunction::cast(object), native_context);
case kVisitJSApiObject:
case kVisitJSArrayBuffer:
case kVisitJSObject:
case kVisitJSObjectFast:
case kVisitJSTypedArray:
case kVisitJSWeakCollection:
return InferForJSObject(map, JSObject::cast(object), native_context);
default:
return false;
}
}
} // namespace internal
} // namespace v8
#endif // V8_HEAP_MEMORY_MEASUREMENT_INL_H_
......@@ -76,5 +76,35 @@ Handle<JSPromise> MemoryMeasurement::EnqueueRequest(
return promise;
}
bool NativeContextInferrer::InferForJSFunction(Map map, JSFunction function,
Address* native_context) {
if (function.has_context()) {
*native_context = function.context().native_context().ptr();
return true;
}
return false;
}
bool NativeContextInferrer::InferForJSObject(Map map, JSObject object,
Address* native_context) {
if (map.instance_type() == JS_GLOBAL_OBJECT_TYPE) {
Object maybe_context =
JSGlobalObject::cast(object).native_context_unchecked();
if (maybe_context.IsNativeContext()) {
*native_context = maybe_context.ptr();
return true;
}
}
return false;
}
void NativeContextStats::Clear() { size_by_context_.clear(); }
void NativeContextStats::Merge(const NativeContextStats& other) {
for (const auto& it : other.size_by_context_) {
size_by_context_[it.first] += it.second;
}
}
} // namespace internal
} // namespace v8
......@@ -5,7 +5,11 @@
#ifndef V8_HEAP_MEMORY_MEASUREMENT_H_
#define V8_HEAP_MEMORY_MEASUREMENT_H_
#include <unordered_map>
#include "src/common/globals.h"
#include "src/objects/contexts.h"
#include "src/objects/map.h"
#include "src/objects/objects.h"
namespace v8 {
......@@ -18,11 +22,41 @@ class V8_EXPORT_PRIVATE MemoryMeasurement {
explicit MemoryMeasurement(Isolate* isolate);
Handle<JSPromise> EnqueueRequest(Handle<NativeContext> context,
v8::MeasureMemoryMode mode);
private:
Isolate* isolate_;
};
// Infers the native context for some of the heap objects.
class V8_EXPORT_PRIVATE NativeContextInferrer {
public:
V8_INLINE bool Infer(Map map, HeapObject object, Address* native_context);
private:
bool InferForJSFunction(Map map, JSFunction function,
Address* native_context);
bool InferForJSObject(Map map, JSObject object, Address* native_context);
};
// Maintains mapping from native contexts to their sizes.
class V8_EXPORT_PRIVATE NativeContextStats {
public:
void IncrementSize(Address context, size_t size) {
size_by_context_[context] += size;
}
size_t Get(Address context) const {
const auto it = size_by_context_.find(context);
if (it == size_by_context_.end()) return 0;
return it->second;
}
void Clear();
void Merge(const NativeContextStats& other);
private:
std::unordered_map<Address, size_t> size_by_context_;
};
} // namespace internal
} // namespace v8
......
......@@ -456,6 +456,10 @@ ACCESSORS(JSFunction, raw_feedback_cell, FeedbackCell, kFeedbackCellOffset)
ACCESSORS(JSGlobalObject, native_context, NativeContext, kNativeContextOffset)
ACCESSORS(JSGlobalObject, global_proxy, JSGlobalProxy, kGlobalProxyOffset)
DEF_GETTER(JSGlobalObject, native_context_unchecked, Object) {
return TaggedField<Object, kNativeContextOffset>::load(isolate, *this);
}
FeedbackVector JSFunction::feedback_vector() const {
DCHECK(has_feedback_vector());
return FeedbackVector::cast(raw_feedback_cell().value());
......
......@@ -1225,6 +1225,10 @@ class JSGlobalObject : public JSSpecialObject {
inline bool IsDetached();
// May be called by the concurrent GC when the global object is not
// fully initialized.
DECL_GETTER(native_context_unchecked, Object)
// Dispatched behavior.
DECL_PRINTER(JSGlobalObject)
DECL_VERIFIER(JSGlobalObject)
......
......@@ -145,6 +145,7 @@ v8_source_set("cctest_sources") {
"heap/test-iterators.cc",
"heap/test-lab.cc",
"heap/test-mark-compact.cc",
"heap/test-memory-measurement.cc",
"heap/test-page-promotion.cc",
"heap/test-spaces.cc",
"heap/test-unmapper.cc",
......
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/memory-measurement-inl.h"
#include "src/heap/memory-measurement.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
namespace v8 {
namespace internal {
namespace heap {
namespace {
Handle<NativeContext> GetNativeContext(Isolate* isolate,
v8::Local<v8::Context> v8_context) {
Handle<Context> context = v8::Utils::OpenHandle(*v8_context);
return handle(context->native_context(), isolate);
}
} // anonymous namespace
TEST(NativeContextInferrerGlobalObject) {
LocalContext env;
Isolate* isolate = CcTest::i_isolate();
HandleScope handle_scope(isolate);
Handle<NativeContext> native_context = GetNativeContext(isolate, env.local());
Handle<JSGlobalObject> global =
handle(native_context->global_object(), isolate);
NativeContextInferrer inferrer;
Address inferred_context;
CHECK(inferrer.Infer(global->map(), *global, &inferred_context));
CHECK_EQ(native_context->ptr(), inferred_context);
}
TEST(NativeContextInferrerJSFunction) {
LocalContext env;
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Handle<NativeContext> native_context = GetNativeContext(isolate, env.local());
v8::Local<v8::Value> result = CompileRun("(function () { return 1; })");
Handle<Object> object = Utils::OpenHandle(*result);
Handle<HeapObject> function = Handle<HeapObject>::cast(object);
NativeContextInferrer inferrer;
Address inferred_context;
CHECK(inferrer.Infer(function->map(), *function, &inferred_context));
CHECK_EQ(native_context->ptr(), inferred_context);
}
TEST(NativeContextStatsMerge) {
NativeContextStats stats1, stats2;
Address object = 0;
stats1.IncrementSize(object, 10);
stats2.IncrementSize(object, 20);
stats1.Merge(stats2);
CHECK_EQ(30, stats1.Get(object));
}
} // namespace heap
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment