Commit 55009830 authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

cppgc, heap: Merge worklist implementations

Bug: chromium:1056170
Change-Id: Ibf561b663c74f9448139fd99945e5f4aea26419b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2390776
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69774}
parent 6f0094d8
......@@ -4251,6 +4251,7 @@ v8_source_set("v8_cppgc_shared") {
sources = [
"src/heap/base/stack.cc",
"src/heap/base/stack.h",
"src/heap/base/worklist.h",
]
if (is_clang || !is_win) {
......@@ -4379,7 +4380,6 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/virtual-memory.cc",
"src/heap/cppgc/virtual-memory.h",
"src/heap/cppgc/visitor.cc",
"src/heap/cppgc/worklist.h",
"src/heap/cppgc/write-barrier.cc",
]
......@@ -4762,6 +4762,15 @@ if (is_component_build) {
configs = [ ":internal_config" ]
public_configs = [ ":external_config" ]
}
v8_component("v8_cppgc_shared_for_testing") {
testonly = true
public_deps = [ ":v8_cppgc_shared" ]
configs = [ ":internal_config" ]
public_configs = [ ":external_config" ]
}
} else {
group("v8") {
public_deps = [
......@@ -4799,6 +4808,14 @@ if (is_component_build) {
public_configs = [ ":external_config" ]
}
group("v8_cppgc_shared_for_testing") {
testonly = true
public_deps = [ ":v8_cppgc_shared" ]
public_configs = [ ":external_config" ]
}
}
v8_executable("d8") {
......
......@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CPPGC_WORKLIST_H_
#define V8_HEAP_CPPGC_WORKLIST_H_
#ifndef V8_HEAP_BASE_WORKLIST_H_
#define V8_HEAP_BASE_WORKLIST_H_
#include <cstddef>
#include <utility>
......@@ -13,8 +13,8 @@
#include "src/base/platform/mutex.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace cppgc {
namespace internal {
namespace heap {
namespace base {
// A global marking worklist that is similar the existing Worklist
// but does not reserve space and keep track of the local segments.
......@@ -405,7 +405,7 @@ bool Worklist<EntryType, SegmentSize>::Local::StealPopSegment() {
return false;
}
} // namespace internal
} // namespace cppgc
} // namespace base
} // namespace heap
#endif // V8_HEAP_CPPGC_WORKLIST_H_
#endif // V8_HEAP_BASE_WORKLIST_H_
......@@ -11,13 +11,13 @@
#include "include/cppgc/visitor.h"
#include "src/base/macros.h"
#include "src/base/platform/time.h"
#include "src/heap/base/worklist.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/incremental-marking-schedule.h"
#include "src/heap/cppgc/marking-state.h"
#include "src/heap/cppgc/marking-visitor.h"
#include "src/heap/cppgc/marking-worklists.h"
#include "src/heap/cppgc/task-handle.h"
#include "src/heap/cppgc/worklist.h"
namespace cppgc {
namespace internal {
......
......@@ -6,7 +6,7 @@
#define V8_HEAP_CPPGC_MARKING_WORKLISTS_H_
#include "include/cppgc/visitor.h"
#include "src/heap/cppgc/worklist.h"
#include "src/heap/base/worklist.h"
namespace cppgc {
namespace internal {
......@@ -25,13 +25,14 @@ class MarkingWorklists {
// Segment size of 512 entries necessary to avoid throughput regressions.
// Since the work list is currently a temporary object this is not a problem.
using MarkingWorklist = Worklist<MarkingItem, 512 /* local entries */>;
using MarkingWorklist =
heap::base::Worklist<MarkingItem, 512 /* local entries */>;
using NotFullyConstructedWorklist =
Worklist<HeapObjectHeader*, 16 /* local entries */>;
heap::base::Worklist<HeapObjectHeader*, 16 /* local entries */>;
using WeakCallbackWorklist =
Worklist<WeakCallbackItem, 64 /* local entries */>;
heap::base::Worklist<WeakCallbackItem, 64 /* local entries */>;
using WriteBarrierWorklist =
Worklist<HeapObjectHeader*, 64 /*local entries */>;
heap::base::Worklist<HeapObjectHeader*, 64 /*local entries */>;
MarkingWorklist* marking_worklist() { return &marking_worklist_; }
NotFullyConstructedWorklist* not_fully_constructed_worklist() {
......
......@@ -12,275 +12,6 @@
namespace v8 {
namespace internal {
template <typename EntryType, int SegmentSize>
void MarkingWorklistImpl<EntryType, SegmentSize>::Push(Segment* segment) {
base::MutexGuard guard(&lock_);
segment->set_next(top_);
set_top(segment);
size_.fetch_add(1, std::memory_order_relaxed);
}
template <typename EntryType, int SegmentSize>
bool MarkingWorklistImpl<EntryType, SegmentSize>::Pop(Segment** segment) {
base::MutexGuard guard(&lock_);
if (top_ != nullptr) {
DCHECK_LT(0U, size_);
size_.fetch_sub(1, std::memory_order_relaxed);
*segment = top_;
set_top(top_->next());
return true;
}
return false;
}
template <typename EntryType, int SegmentSize>
bool MarkingWorklistImpl<EntryType, SegmentSize>::IsEmpty() {
return base::AsAtomicPointer::Relaxed_Load(&top_) == nullptr;
}
template <typename EntryType, int SegmentSize>
size_t MarkingWorklistImpl<EntryType, SegmentSize>::Size() {
// It is safe to read |size_| without a lock since this variable is
// atomic, keeping in mind that threads may not immediately see the new
// value when it is updated.
return size_.load(std::memory_order_relaxed);
}
template <typename EntryType, int SegmentSize>
void MarkingWorklistImpl<EntryType, SegmentSize>::Clear() {
base::MutexGuard guard(&lock_);
size_.store(0, std::memory_order_relaxed);
Segment* current = top_;
while (current != nullptr) {
Segment* tmp = current;
current = current->next();
delete tmp;
}
set_top(nullptr);
}
template <typename EntryType, int SegmentSize>
template <typename Callback>
void MarkingWorklistImpl<EntryType, SegmentSize>::Update(Callback callback) {
base::MutexGuard guard(&lock_);
Segment* prev = nullptr;
Segment* current = top_;
size_t num_deleted = 0;
while (current != nullptr) {
current->Update(callback);
if (current->IsEmpty()) {
DCHECK_LT(0U, size_);
++num_deleted;
if (prev == nullptr) {
top_ = current->next();
} else {
prev->set_next(current->next());
}
Segment* tmp = current;
current = current->next();
delete tmp;
} else {
prev = current;
current = current->next();
}
}
size_.fetch_sub(num_deleted, std::memory_order_relaxed);
}
template <typename EntryType, int SegmentSize>
template <typename Callback>
void MarkingWorklistImpl<EntryType, SegmentSize>::Iterate(Callback callback) {
base::MutexGuard guard(&lock_);
for (Segment* current = top_; current != nullptr; current = current->next()) {
current->Iterate(callback);
}
}
template <typename EntryType, int SegmentSize>
void MarkingWorklistImpl<EntryType, SegmentSize>::Merge(
MarkingWorklistImpl<EntryType, SegmentSize>* other) {
Segment* top = nullptr;
size_t other_size = 0;
{
base::MutexGuard guard(&other->lock_);
if (!other->top_) return;
top = other->top_;
other_size = other->size_.load(std::memory_order_relaxed);
other->size_.store(0, std::memory_order_relaxed);
other->set_top(nullptr);
}
// It's safe to iterate through these segments because the top was
// extracted from |other|.
Segment* end = top;
while (end->next()) end = end->next();
{
base::MutexGuard guard(&lock_);
size_.fetch_add(other_size, std::memory_order_relaxed);
end->set_next(top_);
set_top(top);
}
}
template <typename EntryType, int SegmentSize>
bool MarkingWorklistImpl<EntryType, SegmentSize>::Segment::Push(
EntryType entry) {
if (IsFull()) return false;
entries_[index_++] = entry;
return true;
}
template <typename EntryType, int SegmentSize>
bool MarkingWorklistImpl<EntryType, SegmentSize>::Segment::Pop(
EntryType* entry) {
if (IsEmpty()) return false;
*entry = entries_[--index_];
return true;
}
template <typename EntryType, int SegmentSize>
template <typename Callback>
void MarkingWorklistImpl<EntryType, SegmentSize>::Segment::Update(
Callback callback) {
size_t new_index = 0;
for (size_t i = 0; i < index_; i++) {
if (callback(entries_[i], &entries_[new_index])) {
new_index++;
}
}
index_ = new_index;
}
template <typename EntryType, int SegmentSize>
template <typename Callback>
void MarkingWorklistImpl<EntryType, SegmentSize>::Segment::Iterate(
Callback callback) const {
for (size_t i = 0; i < index_; i++) {
callback(entries_[i]);
}
}
template <typename EntryType, int SegmentSize>
MarkingWorklistImpl<EntryType, SegmentSize>::Local::Local(
MarkingWorklistImpl<EntryType, SegmentSize>* worklist)
: worklist_(worklist),
push_segment_(NewSegment()),
pop_segment_(NewSegment()) {}
template <typename EntryType, int SegmentSize>
MarkingWorklistImpl<EntryType, SegmentSize>::Local::~Local() {
CHECK_IMPLIES(push_segment_, push_segment_->IsEmpty());
CHECK_IMPLIES(pop_segment_, pop_segment_->IsEmpty());
delete push_segment_;
delete pop_segment_;
}
template <typename EntryType, int SegmentSize>
MarkingWorklistImpl<EntryType, SegmentSize>::Local::Local(
MarkingWorklistImpl<EntryType, SegmentSize>::Local&& other) V8_NOEXCEPT {
worklist_ = other.worklist_;
push_segment_ = other.push_segment_;
pop_segment_ = other.pop_segment_;
other.worklist_ = nullptr;
other.push_segment_ = nullptr;
other.pop_segment_ = nullptr;
}
template <typename EntryType, int SegmentSize>
typename MarkingWorklistImpl<EntryType, SegmentSize>::Local&
MarkingWorklistImpl<EntryType, SegmentSize>::Local::operator=(
MarkingWorklistImpl<EntryType, SegmentSize>::Local&& other) V8_NOEXCEPT {
if (this != &other) {
DCHECK_NULL(worklist_);
DCHECK_NULL(push_segment_);
DCHECK_NULL(pop_segment_);
worklist_ = other.worklist_;
push_segment_ = other.push_segment_;
pop_segment_ = other.pop_segment_;
other.worklist_ = nullptr;
other.push_segment_ = nullptr;
other.pop_segment_ = nullptr;
}
return *this;
}
template <typename EntryType, int SegmentSize>
void MarkingWorklistImpl<EntryType, SegmentSize>::Local::Push(EntryType entry) {
if (V8_UNLIKELY(!push_segment_->Push(entry))) {
PublishPushSegment();
bool success = push_segment_->Push(entry);
USE(success);
DCHECK(success);
}
}
template <typename EntryType, int SegmentSize>
bool MarkingWorklistImpl<EntryType, SegmentSize>::Local::Pop(EntryType* entry) {
if (!pop_segment_->Pop(entry)) {
if (!push_segment_->IsEmpty()) {
std::swap(push_segment_, pop_segment_);
} else if (!StealPopSegment()) {
return false;
}
bool success = pop_segment_->Pop(entry);
USE(success);
DCHECK(success);
}
return true;
}
template <typename EntryType, int SegmentSize>
bool MarkingWorklistImpl<EntryType, SegmentSize>::Local::IsLocalEmpty() const {
return push_segment_->IsEmpty() && pop_segment_->IsEmpty();
}
template <typename EntryType, int SegmentSize>
bool MarkingWorklistImpl<EntryType, SegmentSize>::Local::IsGlobalEmpty() const {
return worklist_->IsEmpty();
}
template <typename EntryType, int SegmentSize>
void MarkingWorklistImpl<EntryType, SegmentSize>::Local::Publish() {
if (!push_segment_->IsEmpty()) {
PublishPushSegment();
}
if (!pop_segment_->IsEmpty()) {
PublishPopSegment();
}
}
template <typename EntryType, int SegmentSize>
void MarkingWorklistImpl<EntryType, SegmentSize>::Local::Merge(
MarkingWorklistImpl<EntryType, SegmentSize>::Local* other) {
other->Publish();
worklist_->Merge(other->worklist_);
}
template <typename EntryType, int SegmentSize>
void MarkingWorklistImpl<EntryType, SegmentSize>::Local::PublishPushSegment() {
worklist_->Push(push_segment_);
push_segment_ = NewSegment();
}
template <typename EntryType, int SegmentSize>
void MarkingWorklistImpl<EntryType, SegmentSize>::Local::PublishPopSegment() {
worklist_->Push(pop_segment_);
pop_segment_ = NewSegment();
}
template <typename EntryType, int SegmentSize>
bool MarkingWorklistImpl<EntryType, SegmentSize>::Local::StealPopSegment() {
if (worklist_->IsEmpty()) return false;
Segment* new_segment = nullptr;
if (worklist_->Pop(&new_segment)) {
delete pop_segment_;
pop_segment_ = new_segment;
return true;
}
return false;
}
template <typename Callback>
void MarkingWorklists::Update(Callback callback) {
shared_.Update(callback);
......
......@@ -8,6 +8,7 @@
#include <unordered_map>
#include <vector>
#include "src/heap/base/worklist.h"
#include "src/heap/marking.h"
#include "src/objects/heap-object.h"
......@@ -17,120 +18,8 @@ namespace internal {
// The index of the main thread task used by concurrent/parallel GC.
const int kMainThreadTask = 0;
// A global marking worklist that is similar the existing Worklist
// but does not reserve space and keep track of the local segments.
// Eventually this will replace Worklist after all its current uses
// are migrated.
template <typename EntryType, int SegmentSize>
class MarkingWorklistImpl {
public:
static const int kSegmentSize = SegmentSize;
class Segment;
class Local;
MarkingWorklistImpl() = default;
~MarkingWorklistImpl() { CHECK(IsEmpty()); }
void Push(Segment* segment);
bool Pop(Segment** segment);
// Returns true if the list of segments is empty.
bool IsEmpty();
// Returns the number of segments in the list.
size_t Size();
// Moves the segments of the given marking worklist into this
// marking worklist.
void Merge(MarkingWorklistImpl<EntryType, SegmentSize>* other);
// These functions are not thread-safe. They should be called only
// if all local marking worklists that use the current worklist have
// been published and are empty.
void Clear();
template <typename Callback>
void Update(Callback callback);
template <typename Callback>
void Iterate(Callback callback);
private:
void set_top(Segment* segment) {
base::AsAtomicPointer::Relaxed_Store(&top_, segment);
}
base::Mutex lock_;
Segment* top_ = nullptr;
std::atomic<size_t> size_{0};
};
template <typename EntryType, int SegmentSize>
class MarkingWorklistImpl<EntryType, SegmentSize>::Segment {
public:
static const size_t kSize = SegmentSize;
Segment() = default;
bool Push(EntryType entry);
bool Pop(EntryType* entry);
size_t Size() const { return index_; }
bool IsEmpty() const { return index_ == 0; }
bool IsFull() const { return index_ == kSize; }
void Clear() { index_ = 0; }
template <typename Callback>
void Update(Callback callback);
template <typename Callback>
void Iterate(Callback callback) const;
Segment* next() const { return next_; }
void set_next(Segment* segment) { next_ = segment; }
private:
Segment* next_;
size_t index_;
EntryType entries_[kSize];
};
// A thread-local view of the marking worklist.
template <typename EntryType, int SegmentSize>
class MarkingWorklistImpl<EntryType, SegmentSize>::Local {
public:
Local() = default;
explicit Local(MarkingWorklistImpl<EntryType, SegmentSize>* worklist);
~Local();
Local(Local&&) V8_NOEXCEPT;
Local& operator=(Local&&) V8_NOEXCEPT;
// Disable copying since having multiple copies of the same
// local marking worklist is unsafe.
Local(const Local&) = delete;
Local& operator=(const Local& other) = delete;
void Push(EntryType entry);
bool Pop(EntryType* entry);
bool IsLocalEmpty() const;
bool IsGlobalEmpty() const;
void Publish();
void Merge(MarkingWorklistImpl<EntryType, SegmentSize>::Local* other);
size_t PushSegmentSize() const { return push_segment_->Size(); }
private:
void PublishPushSegment();
void PublishPopSegment();
bool StealPopSegment();
Segment* NewSegment() const {
// Bottleneck for filtering in crash dumps.
return new Segment();
}
MarkingWorklistImpl<EntryType, SegmentSize>* worklist_ = nullptr;
Segment* push_segment_ = nullptr;
Segment* pop_segment_ = nullptr;
};
using MarkingWorklist = MarkingWorklistImpl<HeapObject, 64>;
using EmbedderTracingWorklist = MarkingWorklistImpl<HeapObject, 16>;
using MarkingWorklist = ::heap::base::Worklist<HeapObject, 64>;
using EmbedderTracingWorklist = ::heap::base::Worklist<HeapObject, 16>;
// We piggyback on marking to compute object sizes per native context that is
// needed for the new memory measurement API. The algorithm works as follows:
......
......@@ -20,6 +20,40 @@ if (is_fuchsia) {
}
}
v8_executable("v8_cppgc_shared_unittests") {
testonly = true
configs = [
"../..:external_config",
"../..:internal_config_base",
]
sources = [ "heap/base/run-all-unittests.cc" ]
deps = [
":v8_cppgc_shared_unittests_sources",
"//testing/gmock",
"//testing/gtest",
]
}
v8_source_set("v8_cppgc_shared_unittests_sources") {
testonly = true
sources = [ "heap/base/worklist-unittest.cc" ]
configs = [
"../..:external_config",
"../..:internal_config_base",
]
deps = [
"../..:v8_cppgc_shared_for_testing",
"//testing/gmock",
"//testing/gtest",
]
}
# Stand-alone target for C++ GC unittests. This is used to ensure that it
# builds without V8 as well. They are also included in the regular unittests
# target for simplicity.
......@@ -76,7 +110,6 @@ v8_source_set("cppgc_unittests_sources") {
"heap/cppgc/tests.cc",
"heap/cppgc/tests.h",
"heap/cppgc/visitor-unittest.cc",
"heap/cppgc/worklist-unittest.cc",
"heap/cppgc/write-barrier-unittest.cc",
]
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "testing/gmock/include/gmock/gmock.h"
int main(int argc, char** argv) {
// Don't catch SEH exceptions and continue as the following tests might hang
// in an broken environment on windows.
testing::GTEST_FLAG(catch_exceptions) = false;
// Most unit-tests are multi-threaded, so enable thread-safe death-tests.
testing::FLAGS_gtest_death_test_style = "threadsafe";
testing::InitGoogleMock(&argc, argv);
return RUN_ALL_TESTS();
}
......@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/cppgc/worklist.h"
#include "src/heap/base/worklist.h"
#include "test/unittests/heap/cppgc/tests.h"
namespace cppgc {
namespace internal {
namespace heap {
namespace base {
class SomeObject {};
......@@ -323,5 +324,5 @@ TEST(CppgcWorkListTest, MergeGlobalPool) {
EXPECT_TRUE(worklist2.IsEmpty());
}
} // namespace internal
} // namespace cppgc
} // namespace base
} // namespace heap
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment