Commit 19b6e5f4 authored by Anton Bikineev's avatar Anton Bikineev Committed by V8 LUCI CQ

cppgc: Introduce pointer compression based on thread-local base

With caged heap enabled, we can halve Member<> by storing only the least
significant half. The base of the heap is stored in a thread local
variable. The feature has therefore an implication that only single heap
is allowed per thread.

The feature is gated by the new GN arg:
  cppgc_enable_pointer_compression.

Bug: chromium:1325007

Change-Id: Ic7f1ecb7b9ded57caad63d95bbc8e8ad6ad65031
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2739979Reviewed-by: 's avatarAlmothana Athamneh <almuthanna@chromium.org>
Commit-Queue: Almothana Athamneh <almuthanna@chromium.org>
Commit-Queue: Tamer Tas <tmrts@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarTamer Tas <tmrts@chromium.org>
Auto-Submit: Anton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80518}
parent 5d48c41f
......@@ -68,6 +68,7 @@ load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression
# cppgc_enable_caged_heap
# cppgc_enable_check_assignments_in_prefinalizers
# cppgc_enable_object_names
# cppgc_enable_pointer_compression
# cppgc_enable_verify_heap
# cppgc_enable_young_generation
# v8_enable_zone_compression
......@@ -3001,6 +3002,7 @@ filegroup(
"src/heap/cppgc/marking-visitor.h",
"src/heap/cppgc/marking-worklists.cc",
"src/heap/cppgc/marking-worklists.h",
"src/heap/cppgc/member.cc",
"src/heap/cppgc/memory.cc",
"src/heap/cppgc/memory.h",
"src/heap/cppgc/metric-recorder.h",
......
......@@ -573,6 +573,9 @@ assert(!cppgc_enable_caged_heap || v8_current_cpu == "x64" ||
assert(!cppgc_enable_young_generation || cppgc_enable_caged_heap,
"Young generation in CppGC requires caged heap")
assert(!cppgc_enable_pointer_compression || cppgc_enable_caged_heap,
"Pointer compression in CppGC requires caged heap")
if (v8_enable_single_generation == true) {
assert(
v8_enable_unconditional_write_barriers || v8_disable_write_barriers,
......@@ -790,6 +793,7 @@ external_cppgc_defines = [
"CPPGC_SUPPORTS_OBJECT_NAMES",
"CPPGC_CAGED_HEAP",
"CPPGC_YOUNG_GENERATION",
"CPPGC_POINTER_COMPRESSION",
]
enabled_external_cppgc_defines = []
......@@ -806,6 +810,9 @@ if (cppgc_enable_caged_heap) {
if (cppgc_enable_young_generation) {
enabled_external_cppgc_defines += [ "CPPGC_YOUNG_GENERATION" ]
}
if (cppgc_enable_pointer_compression) {
enabled_external_cppgc_defines += [ "CPPGC_POINTER_COMPRESSION" ]
}
disabled_external_cppgc_defines =
external_cppgc_defines - enabled_external_cppgc_defines
......@@ -5718,6 +5725,7 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/marking-visitor.h",
"src/heap/cppgc/marking-worklists.cc",
"src/heap/cppgc/marking-worklists.h",
"src/heap/cppgc/member.cc",
"src/heap/cppgc/memory.cc",
"src/heap/cppgc/memory.h",
"src/heap/cppgc/metric-recorder.h",
......
......@@ -88,6 +88,9 @@ declare_args() {
# Enable young generation in cppgc.
cppgc_enable_young_generation = false
# Enable pointer compression in cppgc.
cppgc_enable_pointer_compression = false
# Enable advanced BigInt algorithms, costing about 10-30 KB binary size
# depending on platform. Disabled on Android to save binary size.
v8_advanced_bigint_algorithms = !is_android
......
......@@ -9,6 +9,7 @@
#include <cstddef>
#include <type_traits>
#include "cppgc/internal/api-constants.h"
#include "cppgc/internal/pointer-policies.h"
#include "cppgc/sentinel-pointer.h"
#include "cppgc/type-traits.h"
......@@ -20,36 +21,138 @@ class Visitor;
namespace internal {
#if defined(CPPGC_POINTER_COMPRESSION)
class CageBaseGlobal final {
public:
V8_INLINE static void Update(uintptr_t base) {
CPPGC_DCHECK(0u ==
(base & (api_constants::kCagedHeapReservationAlignment - 1)));
g_base_ = base;
}
V8_INLINE static uintptr_t Get() { return g_base_; }
private:
static thread_local V8_EXPORT uintptr_t g_base_
__attribute__((require_constant_initialization));
CageBaseGlobal() = delete;
};
class CompressedPointer final {
public:
using Storage = uint32_t;
V8_INLINE CompressedPointer() : value_(0u) {}
V8_INLINE explicit CompressedPointer(const void* ptr)
: value_(Compress(ptr)) {}
V8_INLINE const void* Load() const { return Decompress(value_); }
V8_INLINE const void* LoadAtomic() const {
return Decompress(
reinterpret_cast<const std::atomic<Storage>&>(value_).load(
std::memory_order_relaxed));
}
V8_INLINE Storage LoadRaw() const { return value_; }
V8_INLINE void Store(const void* ptr) { value_ = Compress(ptr); }
V8_INLINE void StoreAtomic(const void* value) {
reinterpret_cast<std::atomic<Storage>&>(value_).store(
Compress(value), std::memory_order_relaxed);
}
V8_INLINE void StoreRaw(Storage value) { value_ = value; }
static V8_INLINE Storage Compress(const void* ptr) {
static constexpr size_t kGigaCageMask =
~(api_constants::kCagedHeapReservationAlignment - 1);
const uintptr_t base = CageBaseGlobal::Get();
CPPGC_DCHECK(base);
CPPGC_DCHECK(!ptr || ptr == kSentinelPointer ||
base == (reinterpret_cast<uintptr_t>(ptr) & kGigaCageMask));
return static_cast<Storage>(reinterpret_cast<uintptr_t>(ptr));
}
static V8_INLINE void* Decompress(Storage ptr) {
const uintptr_t base = CageBaseGlobal::Get();
CPPGC_DCHECK(base);
// We have to preserve nullptr and kSentinel, Members can't point to
// GigaCage metadata.
if (V8_UNLIKELY(ptr <= 1)) return reinterpret_cast<void*>(ptr);
return reinterpret_cast<void*>(base | static_cast<uintptr_t>(ptr));
}
private:
// All constructors initialize `value_`. Do not add a default value here as it
// results in a non-atomic write on some builds, even when the atomic version
// of the constructor is used.
Storage value_;
};
#endif // defined(CPPGC_POINTER_COMPRESSION)
class RawPointer final {
public:
using Storage = uintptr_t;
RawPointer() : value_(0u) {}
explicit RawPointer(const void* ptr)
: value_(reinterpret_cast<uintptr_t>(ptr)) {}
V8_INLINE const void* Load() const {
return reinterpret_cast<const void*>(value_);
}
V8_INLINE const void* LoadAtomic() const {
return reinterpret_cast<const std::atomic<const void*>&>(value_).load(
std::memory_order_relaxed);
}
V8_INLINE Storage LoadRaw() const { return value_; }
V8_INLINE void Store(const void* ptr) {
value_ = reinterpret_cast<uintptr_t>(ptr);
}
V8_INLINE void StoreAtomic(const void* ptr) {
reinterpret_cast<std::atomic<uintptr_t>&>(value_).store(
reinterpret_cast<uintptr_t>(ptr), std::memory_order_relaxed);
}
V8_INLINE void StoreRaw(Storage value) { value_ = value; }
private:
// All constructors initialize `ptr_`. Do not add a default value here as it
// results in a non-atomic write on some builds, even when the atomic version
// of the constructor is used.
uintptr_t value_;
};
// MemberBase always refers to the object as const object and defers to
// BasicMember on casting to the right type as needed.
class MemberBase {
protected:
struct AtomicInitializerTag {};
MemberBase() : raw_(nullptr) {}
MemberBase() = default;
explicit MemberBase(const void* value) : raw_(value) {}
MemberBase(const void* value, AtomicInitializerTag) { SetRawAtomic(value); }
const void** GetRawSlot() const { return &raw_; }
const void* GetRaw() const { return raw_; }
void SetRaw(void* value) { raw_ = value; }
const void* GetRawAtomic() const {
return reinterpret_cast<const std::atomic<const void*>*>(&raw_)->load(
std::memory_order_relaxed);
}
void SetRawAtomic(const void* value) {
reinterpret_cast<std::atomic<const void*>*>(&raw_)->store(
value, std::memory_order_relaxed);
const void** GetRawSlot() const {
return reinterpret_cast<const void**>(const_cast<MemberBase*>(this));
}
const void* GetRaw() const { return raw_.Load(); }
void SetRaw(void* value) { raw_.Store(value); }
const void* GetRawAtomic() const { return raw_.LoadAtomic(); }
void SetRawAtomic(const void* value) { raw_.StoreAtomic(value); }
void ClearFromGC() const { raw_ = nullptr; }
void ClearFromGC() const { raw_.StoreRaw(0u); }
private:
// All constructors initialize `raw_`. Do not add a default value here as it
// results in a non-atomic write on some builds, even when the atomic version
// of the constructor is used.
mutable const void* raw_;
#if defined(CPPGC_POINTER_COMPRESSION)
using Storage = CompressedPointer;
#else // !defined(CPPGC_POINTER_COMPRESSION)
using Storage = RawPointer;
#endif // !defined(CPPGC_POINTER_COMPRESSION)
mutable Storage raw_;
};
// The basic class from which all Member classes are 'generated'.
......
......@@ -8,13 +8,13 @@
#error "Must be compiled with caged heap enabled"
#endif
#include "src/heap/cppgc/caged-heap.h"
#include "include/cppgc/internal/caged-heap-local-data.h"
#include "include/cppgc/member.h"
#include "include/cppgc/platform.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/heap/cppgc/caged-heap.h"
#include "src/heap/cppgc/globals.h"
namespace cppgc {
......@@ -52,6 +52,12 @@ CagedHeap::CagedHeap(HeapBase& heap_base, PageAllocator& platform_allocator)
: reserved_area_(ReserveCagedHeap(platform_allocator)) {
using CagedAddress = CagedHeap::AllocatorType::Address;
#if defined(CPPGC_POINTER_COMPRESSION)
// With pointer compression only single heap per thread is allowed.
CHECK(!CageBaseGlobal::Get());
CageBaseGlobal::Update(reinterpret_cast<uintptr_t>(reserved_area_.address()));
#endif // defined(CPPGC_POINTER_COMPRESSION)
const bool is_not_oom = platform_allocator.SetPermissions(
reserved_area_.address(),
RoundUp(sizeof(CagedHeapLocalData), platform_allocator.CommitPageSize()),
......@@ -77,6 +83,14 @@ CagedHeap::CagedHeap(HeapBase& heap_base, PageAllocator& platform_allocator)
v8::base::PageFreeingMode::kMakeInaccessible);
}
CagedHeap::~CagedHeap() {
#if defined(CPPGC_POINTER_COMPRESSION)
CHECK_EQ(reinterpret_cast<uintptr_t>(reserved_area_.address()),
CageBaseGlobal::Get());
CageBaseGlobal::Update(0u);
#endif // defined(CPPGC_POINTER_COMPRESSION)
}
#if defined(CPPGC_YOUNG_GENERATION)
void CagedHeap::EnableGenerationalGC() {
local_data().is_young_generation_enabled = true;
......
......@@ -38,6 +38,7 @@ class CagedHeap final {
}
CagedHeap(HeapBase& heap, PageAllocator& platform_allocator);
~CagedHeap();
CagedHeap(const CagedHeap&) = delete;
CagedHeap& operator=(const CagedHeap&) = delete;
......
......@@ -53,6 +53,30 @@ bool HasWorkForConcurrentMarking(MarkingWorklists& marking_worklists) {
->IsEmpty();
}
#if defined(CPPGC_POINTER_COMPRESSION)
namespace {
// The concurrent marking task can run from a thread where no cage-base is set.
// Moreover, it can run from a thread which has another heap attached. Make sure
// to set/reset the base. This also works for the main thread joining the
// marking.
class PointerCompressionCageScope final {
public:
explicit PointerCompressionCageScope(HeapBase& heap)
: prev_cage_base_(CageBaseGlobal::Get()) {
CageBaseGlobal::Update(
reinterpret_cast<uintptr_t>(heap.caged_heap().base()));
}
~PointerCompressionCageScope() { CageBaseGlobal::Update(prev_cage_base_); }
private:
const uintptr_t prev_cage_base_;
};
} // namespace
#endif // defined(CPPGC_POINTER_COMPRESSION)
class ConcurrentMarkingTask final : public v8::JobTask {
public:
explicit ConcurrentMarkingTask(ConcurrentMarkerBase&);
......@@ -75,6 +99,9 @@ void ConcurrentMarkingTask::Run(JobDelegate* job_delegate) {
StatsCollector::EnabledConcurrentScope stats_scope(
concurrent_marker_.heap().stats_collector(),
StatsCollector::kConcurrentMark);
#if defined(CPPGC_POINTER_COMPRESSION)
PointerCompressionCageScope cage_base_resetter(concurrent_marker_.heap());
#endif // defined(CPPGC_POINTER_COMPRESSION)
if (!HasWorkForConcurrentMarking(concurrent_marker_.marking_worklists()))
return;
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "include/cppgc/member.h"
namespace cppgc {
namespace internal {
#if defined(CPPGC_POINTER_COMPRESSION)
thread_local uintptr_t CageBaseGlobal::g_base_ = 0u;
#endif // defined(CPPGC_POINTER_COMPRESSION)
} // namespace internal
} // namespace cppgc
......@@ -6,6 +6,7 @@
#include <algorithm>
#include "include/cppgc/member.h"
#include "include/cppgc/visitor.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-object-header.h"
......@@ -34,7 +35,14 @@ void VisitRememberedSlots(const std::set<void*>& slots, const HeapBase& heap,
// or by reintroducing nested allocation scopes that avoid finalization.
DCHECK(!slot_header.template IsInConstruction<AccessMode::kNonAtomic>());
#if defined(CPPGC_POINTER_COMPRESSION)
// Transform slot.
void* value =
CompressedPointer::Decompress(*reinterpret_cast<uint32_t*>(slot));
#else // !defined(CPPGC_POINTER_COMPRESSION)
void* value = *reinterpret_cast<void**>(slot);
#endif // !defined(CPPGC_POINTER_COMPRESSION)
// Slot could be updated to nullptr or kSentinelPointer by the mutator.
if (value == kSentinelPointer || value == nullptr) continue;
......
......@@ -524,6 +524,9 @@ class LinkedNode final : public GarbageCollected<LinkedNode> {
} // namespace
// The following tests create multiple heaps per thread, which is not supported
// with pointer compression enabled.
#if !defined(CPPGC_POINTER_COMPRESSION)
TEST_F(MemberHeapDeathTest, CheckForOffHeapMemberCrashesOnReassignment) {
std::vector<Member<LinkedNode>> off_heap_member;
// Verification state is constructed on first assignment.
......@@ -561,6 +564,7 @@ TEST_F(MemberHeapDeathTest, CheckForOnHeapMemberCrashesOnInitialAssignment) {
"");
}
}
#endif // defined(CPPGC_POINTER_COMPRESSION)
#endif // V8_ENABLE_CHECKS
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment