Commit 8b9d0138 authored by Maya Lekova's avatar Maya Lekova Committed by Commit Bot

Revert "cppgc: Rework GC info creation"

This reverts commit d76064df.

Reason for revert: Breaking MSAN - https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux%20-%20arm64%20-%20sim%20-%20MSAN/37390/overview

Original change's description:
> cppgc: Rework GC info creation
>
> Previously, GCInfoTrait relied on the non-trivial constructor of a
> static object for registering a new GCInfo object. The generated code
> is required to be thread-safe which is achieved by introducing guard
> variables in the compiler.
>
> The new version is similar to Blink in that it relies on zero
> initialization of a trivially constructible atomic.
>
> Compared to guard variables that are created per GCInfo registration,
> the atomic creates less bloat (~20bytes/type) and also results in a
> better fast path.
>
> Minimum example: https://godbolt.org/z/qrdTf8
>
> Bug: chromium:1056170
> Change-Id: I95efbbf035b655d0440c9477f5391e310e2b71fa
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2764750
> Reviewed-by: Omer Katz <omerkatz@chromium.org>
> Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#73463}

Bug: chromium:1056170
Change-Id: I71960103513d6db7789d752b70727d014c2e6406
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2767020
Auto-Submit: Maya Lekova <mslekova@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Cr-Commit-Position: refs/heads/master@{#73466}
parent 6d5e538d
......@@ -5,8 +5,7 @@
#ifndef INCLUDE_CPPGC_INTERNAL_GC_INFO_H_
#define INCLUDE_CPPGC_INTERNAL_GC_INFO_H_
#include <atomic>
#include <cstdint>
#include <stdint.h>
#include "cppgc/internal/finalizer-trait.h"
#include "cppgc/internal/name-trait.h"
......@@ -18,11 +17,16 @@ namespace internal {
using GCInfoIndex = uint16_t;
// Acquires a new GC info object and returns the index. In addition, also
// updates `registered_index` atomically.
V8_EXPORT GCInfoIndex
EnsureGCInfoIndex(std::atomic<GCInfoIndex>& registered_index,
FinalizationCallback, TraceCallback, NameCallback, bool);
class V8_EXPORT RegisteredGCInfoIndex final {
public:
RegisteredGCInfoIndex(FinalizationCallback finalization_callback,
TraceCallback trace_callback,
NameCallback name_callback, bool has_v_table);
GCInfoIndex GetIndex() const { return index_; }
private:
const GCInfoIndex index_;
};
// Fold types based on finalizer behavior. Note that finalizer characteristics
// align with trace behavior, i.e., destructors are virtual when trace methods
......@@ -55,17 +59,13 @@ struct GCInfoFolding {
// Trait determines how the garbage collector treats objects wrt. to traversing,
// finalization, and naming.
template <typename T>
struct GCInfoTrait final {
struct GCInfoTrait {
static GCInfoIndex Index() {
static_assert(sizeof(T), "T must be fully defined");
static std::atomic<GCInfoIndex>
registered_index; // Uses zero initialization.
const GCInfoIndex index = registered_index.load(std::memory_order_acquire);
return index ? index
: EnsureGCInfoIndex(
registered_index, FinalizerTrait<T>::kCallback,
TraceTrait<T>::Trace, NameTrait<T>::GetName,
std::is_polymorphic<T>::value);
static const RegisteredGCInfoIndex registered_index(
FinalizerTrait<T>::kCallback, TraceTrait<T>::Trace,
NameTrait<T>::GetName, std::is_polymorphic<T>::value);
return registered_index.GetIndex();
}
};
......
......@@ -125,19 +125,11 @@ void GCInfoTable::CheckMemoryIsZeroed(uintptr_t* base, size_t len) {
#endif // DEBUG
}
GCInfoIndex GCInfoTable::RegisterNewGCInfo(
std::atomic<GCInfoIndex>& registered_index, const GCInfo& info) {
GCInfoIndex GCInfoTable::RegisterNewGCInfo(const GCInfo& info) {
// Ensuring a new index involves current index adjustment as well as
// potentially resizing the table. For simplicity we use a lock.
v8::base::MutexGuard guard(&table_mutex_);
// Check the registered index again after taking the lock as some other
// thread may have registered the info at the same time.
GCInfoIndex index = registered_index.load(std::memory_order_relaxed);
if (index) {
return index;
}
if (current_index_ == limit_) {
Resize();
}
......@@ -145,7 +137,6 @@ GCInfoIndex GCInfoTable::RegisterNewGCInfo(
GCInfoIndex new_index = current_index_++;
CHECK_LT(new_index, GCInfoTable::kMaxIndex);
table_[new_index] = info;
registered_index.store(new_index, std::memory_order_release);
return new_index;
}
......
......@@ -54,7 +54,7 @@ class V8_EXPORT GCInfoTable final {
GCInfoTable(const GCInfoTable&) = delete;
GCInfoTable& operator=(const GCInfoTable&) = delete;
GCInfoIndex RegisterNewGCInfo(std::atomic<uint16_t>&, const GCInfo& info);
GCInfoIndex RegisterNewGCInfo(const GCInfo& info);
const GCInfo& GCInfoFromIndex(GCInfoIndex index) const {
DCHECK_GE(index, kMinIndex);
......
......@@ -9,14 +9,12 @@
namespace cppgc {
namespace internal {
GCInfoIndex EnsureGCInfoIndex(std::atomic<GCInfoIndex>& registered_index,
FinalizationCallback finalization_callback,
TraceCallback trace_callback,
NameCallback name_callback, bool has_v_table) {
return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index,
{finalization_callback, trace_callback, name_callback, has_v_table});
}
RegisteredGCInfoIndex::RegisteredGCInfoIndex(
FinalizationCallback finalization_callback, TraceCallback trace_callback,
NameCallback name_callback, bool has_v_table)
: index_(GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
{finalization_callback, trace_callback, name_callback,
has_v_table})) {}
} // namespace internal
} // namespace cppgc
......@@ -20,72 +20,56 @@ namespace {
constexpr GCInfo GetEmptyGCInfo() { return {nullptr, nullptr, nullptr, false}; }
class GCInfoTableTest : public ::testing::Test {
public:
GCInfoIndex RegisterNewGCInfoForTesting(const GCInfo& info) {
// Unused registered index will result in registering a new index.
std::atomic<GCInfoIndex> registered_index{0};
return table().RegisterNewGCInfo(registered_index, info);
}
void SetUp() override {
table_ = std::make_unique<GCInfoTable>(&page_allocator_);
}
void TearDown() override { table_.reset(); }
GCInfoTable& table() { return *table_; }
const GCInfoTable& table() const { return *table_; }
private:
v8::base::PageAllocator page_allocator_;
std::unique_ptr<GCInfoTable> table_;
};
using GCInfoTableDeathTest = GCInfoTableTest;
} // namespace
TEST_F(GCInfoTableTest, InitialEmpty) {
EXPECT_EQ(GCInfoTable::kMinIndex, table().NumberOfGCInfos());
TEST(GCInfoTableTest, InitialEmpty) {
v8::base::PageAllocator page_allocator;
GCInfoTable table(&page_allocator);
EXPECT_EQ(GCInfoTable::kMinIndex, table.NumberOfGCInfos());
}
TEST_F(GCInfoTableTest, ResizeToMaxIndex) {
TEST(GCInfoTableTest, ResizeToMaxIndex) {
v8::base::PageAllocator page_allocator;
GCInfoTable table(&page_allocator);
GCInfo info = GetEmptyGCInfo();
for (GCInfoIndex i = GCInfoTable::kMinIndex; i < GCInfoTable::kMaxIndex;
i++) {
GCInfoIndex index = RegisterNewGCInfoForTesting(info);
GCInfoIndex index = table.RegisterNewGCInfo(info);
EXPECT_EQ(i, index);
}
}
TEST_F(GCInfoTableDeathTest, MoreThanMaxIndexInfos) {
TEST(GCInfoTableDeathTest, MoreThanMaxIndexInfos) {
v8::base::PageAllocator page_allocator;
GCInfoTable table(&page_allocator);
GCInfo info = GetEmptyGCInfo();
// Create GCInfoTable::kMaxIndex entries.
for (GCInfoIndex i = GCInfoTable::kMinIndex; i < GCInfoTable::kMaxIndex;
i++) {
RegisterNewGCInfoForTesting(info);
table.RegisterNewGCInfo(info);
}
EXPECT_DEATH_IF_SUPPORTED(RegisterNewGCInfoForTesting(info), "");
EXPECT_DEATH_IF_SUPPORTED(table.RegisterNewGCInfo(info), "");
}
TEST_F(GCInfoTableDeathTest, OldTableAreaIsReadOnly) {
TEST(GCInfoTableDeathTest, OldTableAreaIsReadOnly) {
v8::base::PageAllocator page_allocator;
GCInfoTable table(&page_allocator);
GCInfo info = GetEmptyGCInfo();
// Use up all slots until limit.
GCInfoIndex limit = table().LimitForTesting();
GCInfoIndex limit = table.LimitForTesting();
// Bail out if initial limit is already the maximum because of large committed
// pages. In this case, nothing can be comitted as read-only.
if (limit == GCInfoTable::kMaxIndex) {
return;
}
for (GCInfoIndex i = GCInfoTable::kMinIndex; i < limit; i++) {
RegisterNewGCInfoForTesting(info);
table.RegisterNewGCInfo(info);
}
EXPECT_EQ(limit, table().LimitForTesting());
RegisterNewGCInfoForTesting(info);
EXPECT_NE(limit, table().LimitForTesting());
EXPECT_EQ(limit, table.LimitForTesting());
table.RegisterNewGCInfo(info);
EXPECT_NE(limit, table.LimitForTesting());
// Old area is now read-only.
auto& first_slot = table().TableSlotForTesting(GCInfoTable::kMinIndex);
auto& first_slot = table.TableSlotForTesting(GCInfoTable::kMinIndex);
EXPECT_DEATH_IF_SUPPORTED(first_slot.finalize = nullptr, "");
}
......@@ -101,9 +85,8 @@ class ThreadRegisteringGCInfoObjects final : public v8::base::Thread {
void Run() final {
GCInfo info = GetEmptyGCInfo();
std::atomic<GCInfoIndex> registered_index;
for (GCInfoIndex i = 0; i < num_registrations_; i++) {
table_->RegisterNewGCInfo(registered_index, info);
table_->RegisterNewGCInfo(info);
}
}
......@@ -114,7 +97,7 @@ class ThreadRegisteringGCInfoObjects final : public v8::base::Thread {
} // namespace
TEST_F(GCInfoTableTest, MultiThreadedResizeToMaxIndex) {
TEST(GCInfoTableTest, MultiThreadedResizeToMaxIndex) {
constexpr size_t num_threads = 4;
constexpr size_t main_thread_initialized = 2;
constexpr size_t gc_infos_to_register =
......@@ -124,15 +107,17 @@ TEST_F(GCInfoTableTest, MultiThreadedResizeToMaxIndex) {
"must sum up to kMaxIndex");
constexpr size_t gc_infos_per_thread = gc_infos_to_register / num_threads;
v8::base::PageAllocator page_allocator;
GCInfoTable table(&page_allocator);
GCInfo info = GetEmptyGCInfo();
for (size_t i = 0; i < main_thread_initialized; i++) {
RegisterNewGCInfoForTesting(info);
table.RegisterNewGCInfo(info);
}
v8::base::Thread* threads[num_threads];
for (size_t i = 0; i < num_threads; i++) {
threads[i] =
new ThreadRegisteringGCInfoObjects(&table(), gc_infos_per_thread);
new ThreadRegisteringGCInfoObjects(&table, gc_infos_per_thread);
}
for (size_t i = 0; i < num_threads; i++) {
CHECK(threads[i]->Start());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment