Commit 38f39a01 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Introduce per-context marking worklists

These worklists are needed for accounting of objects retained by
native contexts for the new memory measurement API.

Bug: chromium:973627
Change-Id: I354c5ebbbac11da4d01800164e15b94a93aa654c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1943158
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65542}
parent 4671cb56
......@@ -3339,7 +3339,8 @@ void Heap::FinalizeIncrementalMarkingIfComplete(
local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
FinalizeIncrementalMarkingIncrementally(gc_reason);
} else if (incremental_marking()->IsComplete() ||
(mark_compact_collector()->marking_worklists()->IsEmpty() &&
(incremental_marking()->IsMarking() &&
mark_compact_collector()->marking_worklists()->IsEmpty() &&
local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking())) {
CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
......@@ -5022,8 +5023,7 @@ void Heap::SetUp() {
scavenger_collector_.reset(new ScavengerCollector(this));
incremental_marking_.reset(
new IncrementalMarking(this, mark_compact_collector_->marking_worklists(),
mark_compact_collector_->weak_objects()));
new IncrementalMarking(this, mark_compact_collector_->weak_objects()));
if (FLAG_concurrent_marking || FLAG_parallel_marking) {
concurrent_marking_.reset(new ConcurrentMarking(
......
......@@ -45,11 +45,9 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
}
IncrementalMarking::IncrementalMarking(Heap* heap,
MarkingWorklists* marking_worklists,
WeakObjects* weak_objects)
: heap_(heap),
collector_(heap->mark_compact_collector()),
marking_worklists_(marking_worklists),
weak_objects_(weak_objects),
initial_old_generation_size_(0),
bytes_marked_(0),
......@@ -64,7 +62,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap,
request_type_(NONE),
new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
old_generation_observer_(this, kOldGenerationAllocatedThreshold) {
DCHECK_NOT_NULL(marking_worklists_);
SetState(STOPPED);
}
......@@ -1083,18 +1080,20 @@ StepResult IncrementalMarking::V8Step(double max_step_size_in_ms,
heap_->local_embedder_heap_tracer()->NotifyV8MarkingWorklistWasEmpty();
}
}
}
if (FLAG_concurrent_marking) {
marking_worklists()->ShareWorkIfGlobalPoolIsEmpty();
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
if (FLAG_concurrent_marking) {
marking_worklists()->ShareWorkIfGlobalPoolIsEmpty();
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
}
double end = heap_->MonotonicallyIncreasingTimeInMs();
double duration = (end - start);
// Note that we report zero bytes here when sweeping was in progress or
// when we just started incremental marking. In these cases we did not
// process the marking deque.
heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
if (state_ == MARKING) {
// Note that we report zero bytes here when sweeping was in progress or
// when we just started incremental marking. In these cases we did not
// process the marking deque.
heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
}
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Step %s %zuKB (%zuKB) in %.1f\n",
......
......@@ -86,8 +86,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;
#endif
IncrementalMarking(Heap* heap, MarkingWorklists* marking_worklists,
WeakObjects* weak_objects);
IncrementalMarking(Heap* heap, WeakObjects* weak_objects);
MarkingState* marking_state() { return &marking_state_; }
......@@ -227,7 +226,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
}
}
MarkingWorklists* marking_worklists() const { return marking_worklists_; }
MarkingWorklists* marking_worklists() const {
return collector_->marking_worklists();
}
void Deactivate();
......@@ -302,7 +303,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
Heap* const heap_;
MarkCompactCollector* const collector_;
MarkingWorklists* const marking_worklists_;
WeakObjects* weak_objects_;
double start_time_ms_;
......
......@@ -429,7 +429,6 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
compacting_(false),
black_allocation_(false),
have_code_to_deoptimize_(false),
marking_worklists_(kMainThreadTask, marking_worklists_holder()),
sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
old_to_new_slots_ = -1;
}
......@@ -499,6 +498,9 @@ bool MarkCompactCollector::StartCompaction() {
}
void MarkCompactCollector::StartMarking() {
marking_worklists_holder()->CreateContextWorklists(std::vector<Address>());
marking_worklists_ = std::make_unique<MarkingWorklists>(
kMainThreadTask, marking_worklists_holder());
marking_visitor_ = std::make_unique<MarkingVisitor>(
marking_state(), marking_worklists(), weak_objects(), heap_, epoch(),
Heap::GetBytecodeFlushMode(),
......@@ -880,6 +882,10 @@ void MarkCompactCollector::Finish() {
heap()->VerifyCountersBeforeConcurrentSweeping();
#endif
marking_visitor_.reset();
marking_worklists_.reset();
marking_worklists_holder_.ReleaseContextWorklists();
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
weak_objects_.next_ephemerons.Clear();
......
......@@ -533,7 +533,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
MarkingWorklistsHolder* marking_worklists_holder() {
return &marking_worklists_holder_;
}
MarkingWorklists* marking_worklists() { return &marking_worklists_; }
MarkingWorklists* marking_worklists() { return marking_worklists_.get(); }
WeakObjects* weak_objects() { return &weak_objects_; }
......@@ -764,12 +764,12 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool have_code_to_deoptimize_;
MarkingWorklistsHolder marking_worklists_holder_;
MarkingWorklists marking_worklists_;
WeakObjects weak_objects_;
EphemeronMarking ephemeron_marking_;
std::unique_ptr<MarkingVisitor> marking_visitor_;
std::unique_ptr<MarkingWorklists> marking_worklists_;
// Candidates for pages that should be evacuated.
std::vector<Page*> evacuation_candidates_;
......
......@@ -17,10 +17,19 @@
namespace v8 {
namespace internal {
MarkingWorklistsHolder::~MarkingWorklistsHolder() {
DCHECK(worklists_.empty());
DCHECK(context_worklists_.empty());
}
void MarkingWorklistsHolder::Clear() {
shared_.Clear();
on_hold_.Clear();
embedder_.Clear();
for (auto cw : context_worklists_) {
cw.worklist->Clear();
}
ReleaseContextWorklists();
}
void MarkingWorklistsHolder::Print() {
......@@ -28,6 +37,24 @@ void MarkingWorklistsHolder::Print() {
PrintWorklist("on_hold", &on_hold_);
}
void MarkingWorklistsHolder::CreateContextWorklists(
const std::vector<Address>& contexts) {
DCHECK(worklists_.empty());
DCHECK(context_worklists_.empty());
worklists_.reserve(contexts.size());
context_worklists_.reserve(contexts.size());
for (Address context : contexts) {
MarkingWorklist* worklist = new MarkingWorklist();
worklists_.push_back(std::unique_ptr<MarkingWorklist>(worklist));
context_worklists_.push_back({context, worklist});
}
}
void MarkingWorklistsHolder::ReleaseContextWorklists() {
context_worklists_.clear();
worklists_.clear();
}
void MarkingWorklistsHolder::PrintWorklist(const char* worklist_name,
MarkingWorklist* worklist) {
#ifdef DEBUG
......@@ -59,20 +86,53 @@ MarkingWorklists::MarkingWorklists(int task_id, MarkingWorklistsHolder* holder)
: shared_(holder->shared()),
on_hold_(holder->on_hold()),
embedder_(holder->embedder()),
task_id_(task_id) {}
active_(shared_),
active_context_(kSharedContext),
task_id_(task_id),
per_context_mode_(false),
context_worklists_(holder->context_worklists()) {
if (!context_worklists_.empty()) {
per_context_mode_ = true;
context_worklists_.push_back({kSharedContext, shared_});
worklist_by_context_.reserve(context_worklists_.size());
for (auto& cw : context_worklists_) {
worklist_by_context_[cw.context] = cw.worklist;
}
}
}
void MarkingWorklists::FlushToGlobal() {
shared_->FlushToGlobal(task_id_);
on_hold_->FlushToGlobal(task_id_);
embedder_->FlushToGlobal(task_id_);
if (per_context_mode_) {
for (auto& cw : context_worklists_) {
cw.worklist->FlushToGlobal(task_id_);
}
}
}
bool MarkingWorklists::IsEmpty() {
// This function checks the on_hold_ worklist, so it works only for the main
// thread.
DCHECK_EQ(kMainThreadTask, task_id_);
return shared_->IsLocalEmpty(task_id_) && on_hold_->IsLocalEmpty(task_id_) &&
shared_->IsGlobalPoolEmpty() && on_hold_->IsGlobalPoolEmpty();
if (!active_->IsLocalEmpty(task_id_) || !on_hold_->IsLocalEmpty(task_id_) ||
!active_->IsGlobalPoolEmpty() || !on_hold_->IsGlobalPoolEmpty()) {
return false;
}
if (!per_context_mode_) {
DCHECK_EQ(active_, shared_);
return true;
}
for (auto& cw : context_worklists_) {
if (!cw.worklist->IsLocalEmpty(task_id_) ||
!cw.worklist->IsGlobalPoolEmpty()) {
active_ = cw.worklist;
active_context_ = cw.context;
return false;
}
}
return true;
}
bool MarkingWorklists::IsEmbedderEmpty() {
......@@ -83,6 +143,11 @@ void MarkingWorklists::ShareWorkIfGlobalPoolIsEmpty() {
if (!shared_->IsLocalEmpty(task_id_) && shared_->IsGlobalPoolEmpty()) {
shared_->FlushToGlobal(task_id_);
}
if (per_context_mode_ && shared_ != active_) {
if (!active_->IsLocalEmpty(task_id_) && active_->IsGlobalPoolEmpty()) {
active_->FlushToGlobal(task_id_);
}
}
}
void MarkingWorklists::MergeOnHold() {
......@@ -90,5 +155,40 @@ void MarkingWorklists::MergeOnHold() {
shared_->MergeGlobalPool(on_hold_);
}
bool MarkingWorklists::PopContext(HeapObject* object) {
DCHECK(per_context_mode_);
// As an optimization we first check only the local segments to avoid locks.
for (auto& cw : context_worklists_) {
if (!cw.worklist->IsLocalEmpty(task_id_)) {
active_ = cw.worklist;
active_context_ = cw.context;
return active_->Pop(task_id_, object);
}
}
// All local segments are empty. Check global segments.
for (auto& cw : context_worklists_) {
if (cw.worklist->Pop(task_id_, object)) {
active_ = cw.worklist;
active_context_ = cw.context;
return true;
}
}
return false;
}
Address MarkingWorklists::SwitchToContextSlow(Address context) {
const auto& it = worklist_by_context_.find(context);
if (V8_UNLIKELY(it == worklist_by_context_.end())) {
// This context was created during marking, so we don't have a worklist
// for it. Use the shared worklist.
active_ = shared_;
active_context_ = kSharedContext;
} else {
active_ = it->second;
active_context_ = context;
}
return active_context_;
}
} // namespace internal
} // namespace v8
......@@ -5,6 +5,9 @@
#ifndef V8_HEAP_MARKING_WORKLIST_H_
#define V8_HEAP_MARKING_WORKLIST_H_
#include <unordered_map>
#include <vector>
#include "src/heap/marking.h"
#include "src/heap/worklist.h"
#include "src/objects/heap-object.h"
......@@ -18,9 +21,49 @@ using EmbedderTracingWorklist = Worklist<HeapObject, 16>;
// The index of the main thread task used by concurrent/parallel GC.
const int kMainThreadTask = 0;
// We piggyback on marking to compute object sizes per native context that is
// needed for the new memory measurement API. The algorithm works as follows:
// 1) At the start of marking we create a marking worklist for each context.
// The existing shared, on_hold, and embedder worklists continue to work
// as they did before, but they hold objects that are not attributed to any
// context yet.
// 2) Each marker has an active worklist where it pushes newly discovered
// objects. Initially the shared worklist is set as active for all markers.
// 3) When a marker pops an object from the active worklist:
// a) It checks if the object has a known context (e.g. JSObjects, Maps,
// Contexts know the context they belong to). If that's the case, then
// the marker changes its active worklist to the worklist corresponding
// to the context of the object.
// b) It account the size of object to the active context.
// c) It visits all pointers in the object and pushes new objects onto the
// active worklist.
// 4) When the active worklist becomes empty the marker selects any other
// non-empty worklist as the active worklist.
// 5) The write barrier pushes onto the shared worklist.
//
// The main invariant for context worklists:
// If object X is in the worklist of context C, then either
// a) X has a context and that context is C.
// b) X is retained by object Y that has context C.
//
// The algorithm allows us to attribute context-independent objects such as
// strings, numbers, FixedArrays to their retaining contexts. The algorithm is
// not precise for context-independent objects that are shared between multiple
// contexts. Such objects may be attributed to any retaining context.
// Named pair of native context address and its marking worklist.
// Since native contexts are allocated in the old generation, their addresses
// a stable across Scavenges and stay valid throughout the marking phase.
struct ContextWorklistPair {
Address context;
MarkingWorklist* worklist;
};
// A helper class that owns all marking worklists.
class MarkingWorklistsHolder {
class V8_EXPORT_PRIVATE MarkingWorklistsHolder {
public:
~MarkingWorklistsHolder();
// Calls the specified callback on each element of the deques and replaces
// the element with the result of the callback. If the callback returns
// nullptr then the element is removed from the deque.
......@@ -30,12 +73,27 @@ class MarkingWorklistsHolder {
shared_.Update(callback);
on_hold_.Update(callback);
embedder_.Update(callback);
for (auto cw : context_worklists_) {
cw.worklist->Update(callback);
}
}
MarkingWorklist* shared() { return &shared_; }
MarkingWorklist* on_hold() { return &on_hold_; }
EmbedderTracingWorklist* embedder() { return &embedder_; }
// A list of (context, worklist) pairs that was set up at the start of
// marking by CreateContextWorklists.
const std::vector<ContextWorklistPair>& context_worklists() {
return context_worklists_;
}
// This should be invoked at the start of marking with the list of contexts
// that require object size accounting.
void CreateContextWorklists(const std::vector<Address>& contexts);
// This should be invoked at the end of marking. All worklists must be
// empty at that point.
void ReleaseContextWorklists();
void Clear();
void Print();
......@@ -56,6 +114,11 @@ class MarkingWorklistsHolder {
// these objects need to be handed over to the embedder to find the full
// transitive closure.
EmbedderTracingWorklist embedder_;
// Per-context worklists.
std::vector<ContextWorklistPair> context_worklists_;
// This is used only for lifetime management of the per-context worklists.
std::vector<std::unique_ptr<MarkingWorklist>> worklists_;
};
// A thread-local view of the marking worklists.
......@@ -64,24 +127,26 @@ class V8_EXPORT_PRIVATE MarkingWorklists {
MarkingWorklists(int task_id, MarkingWorklistsHolder* holder);
void Push(HeapObject object) {
bool success = shared_->Push(task_id_, object);
bool success = active_->Push(task_id_, object);
USE(success);
DCHECK(success);
}
bool Pop(HeapObject* object) { return shared_->Pop(task_id_, object); }
bool Pop(HeapObject* object) {
if (active_->Pop(task_id_, object)) return true;
if (!per_context_mode_) return false;
// The active worklist is empty. Find any other non-empty worklist and
// switch the active worklist to it.
return PopContext(object);
}
void PushOnHold(HeapObject object) {
DCHECK_NE(kMainThreadTask, task_id_);
bool success = on_hold_->Push(task_id_, object);
USE(success);
DCHECK(success);
}
bool PopOnHold(HeapObject* object) {
DCHECK_EQ(kMainThreadTask, task_id_);
return on_hold_->Pop(task_id_, object);
}
bool PopOnHold(HeapObject* object) { return on_hold_->Pop(task_id_, object); }
void PushEmbedder(HeapObject object) {
bool success = embedder_->Push(task_id_, object);
......@@ -99,11 +164,34 @@ class V8_EXPORT_PRIVATE MarkingWorklists {
void MergeOnHold();
void ShareWorkIfGlobalPoolIsEmpty();
// Returns the context of the active worklist.
Address Context() { return active_context_; }
// Switches the active worklist to that of the given context.
Address SwitchToContext(Address context) {
if (context == active_context_) return context;
return SwitchToContextSlow(context);
}
// Switches the active worklist to the shared worklist.
void SwitchToShared() {
active_context_ = kSharedContext;
active_ = shared_;
}
private:
const Address kSharedContext = 0;
bool PopContext(HeapObject* object);
Address SwitchToContextSlow(Address context);
MarkingWorklist* shared_;
MarkingWorklist* on_hold_;
EmbedderTracingWorklist* embedder_;
MarkingWorklist* active_;
Address active_context_;
int task_id_;
bool per_context_mode_;
// Per-context worklists. For simplicity we treat the shared worklist as
// the worklist of dummy kSharedContext.
std::vector<ContextWorklistPair> context_worklists_;
std::unordered_map<Address, MarkingWorklist*> worklist_by_context_;
};
} // namespace internal
......
......@@ -175,6 +175,7 @@ v8_source_set("unittests_sources") {
"heap/heap-unittest.cc",
"heap/item-parallel-job-unittest.cc",
"heap/marking-unittest.cc",
"heap/marking-worklist-unittest.cc",
"heap/memory-reducer-unittest.cc",
"heap/object-stats-unittest.cc",
"heap/scavenge-job-unittest.cc",
......
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <cmath>
#include <limits>
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/marking-worklist.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
using MarkingWorklistTest = TestWithContext;
TEST_F(MarkingWorklistTest, PushPop) {
MarkingWorklistsHolder holder;
MarkingWorklists worklists(kMainThreadTask, &holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worklists.Push(pushed_object);
HeapObject popped_object;
EXPECT_TRUE(worklists.Pop(&popped_object));
EXPECT_EQ(popped_object, pushed_object);
}
TEST_F(MarkingWorklistTest, PushPopOnHold) {
MarkingWorklistsHolder holder;
MarkingWorklists worklists(kMainThreadTask, &holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worklists.PushOnHold(pushed_object);
HeapObject popped_object;
EXPECT_TRUE(worklists.PopOnHold(&popped_object));
EXPECT_EQ(popped_object, pushed_object);
}
TEST_F(MarkingWorklistTest, PushPopEmbedder) {
MarkingWorklistsHolder holder;
MarkingWorklists worklists(kMainThreadTask, &holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worklists.PushEmbedder(pushed_object);
HeapObject popped_object;
EXPECT_TRUE(worklists.PopEmbedder(&popped_object));
EXPECT_EQ(popped_object, pushed_object);
}
TEST_F(MarkingWorklistTest, MergeOnHold) {
MarkingWorklistsHolder holder;
MarkingWorklists main_worklists(kMainThreadTask, &holder);
MarkingWorklists worker_worklists(kMainThreadTask + 1, &holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worker_worklists.PushOnHold(pushed_object);
worker_worklists.FlushToGlobal();
main_worklists.MergeOnHold();
HeapObject popped_object;
EXPECT_TRUE(main_worklists.Pop(&popped_object));
EXPECT_EQ(popped_object, pushed_object);
}
TEST_F(MarkingWorklistTest, ShareWorkIfGlobalPoolIsEmpty) {
MarkingWorklistsHolder holder;
MarkingWorklists main_worklists(kMainThreadTask, &holder);
MarkingWorklists worker_worklists(kMainThreadTask + 1, &holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
main_worklists.Push(pushed_object);
main_worklists.ShareWorkIfGlobalPoolIsEmpty();
HeapObject popped_object;
EXPECT_TRUE(worker_worklists.Pop(&popped_object));
EXPECT_EQ(popped_object, pushed_object);
}
TEST_F(MarkingWorklistTest, ContextWorklistsPushPop) {
const Address context = 0xabcdef;
MarkingWorklistsHolder holder;
holder.CreateContextWorklists({context});
MarkingWorklists worklists(kMainThreadTask, &holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worklists.SwitchToContext(context);
worklists.Push(pushed_object);
worklists.SwitchToShared();
HeapObject popped_object;
EXPECT_TRUE(worklists.Pop(&popped_object));
EXPECT_EQ(popped_object, pushed_object);
holder.ReleaseContextWorklists();
}
TEST_F(MarkingWorklistTest, ContextWorklistsEmpty) {
const Address context = 0xabcdef;
MarkingWorklistsHolder holder;
holder.CreateContextWorklists({context});
MarkingWorklists worklists(kMainThreadTask, &holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
worklists.SwitchToContext(context);
worklists.Push(pushed_object);
EXPECT_FALSE(worklists.IsEmpty());
worklists.SwitchToShared();
EXPECT_FALSE(worklists.IsEmpty());
HeapObject popped_object;
EXPECT_TRUE(worklists.Pop(&popped_object));
EXPECT_EQ(popped_object, pushed_object);
EXPECT_TRUE(worklists.IsEmpty());
holder.ReleaseContextWorklists();
}
TEST_F(MarkingWorklistTest, ContextWorklistCrossTask) {
const Address context1 = 0x1abcdef;
const Address context2 = 0x2abcdef;
MarkingWorklistsHolder holder;
holder.CreateContextWorklists({context1, context2});
MarkingWorklists main_worklists(kMainThreadTask, &holder);
MarkingWorklists worker_worklists(kMainThreadTask + 1, &holder);
HeapObject pushed_object =
ReadOnlyRoots(i_isolate()->heap()).undefined_value();
main_worklists.SwitchToContext(context1);
main_worklists.Push(pushed_object);
main_worklists.ShareWorkIfGlobalPoolIsEmpty();
worker_worklists.SwitchToContext(context2);
HeapObject popped_object;
EXPECT_TRUE(worker_worklists.Pop(&popped_object));
EXPECT_EQ(popped_object, pushed_object);
EXPECT_EQ(context1, worker_worklists.Context());
holder.ReleaseContextWorklists();
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment