Commit 727e808f authored by Anton Bikineev's avatar Anton Bikineev Committed by V8 LUCI CQ

cppgc: Enable 2GB cage to speed up compression/decompression

With only 2GB reservation we can make sure that the heap allocated in
such a way, that all the pointer to it have the most significant bit
in the low halfword set. This allows us to quickly distinguish between
normal pointers and nullptr/sentinel when performing sign-extension
inside decompression.

Bug: chromium:1325007
Change-Id: Ie3a653796bb9dc875ec50103e05cb9aaf55515cf
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3793614Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82310}
parent 61637d82
......@@ -41,7 +41,7 @@ constexpr size_t kGuardPageSize = 4096;
static constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
#if defined(CPPGC_CAGED_HEAP)
constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(2) * kGB;
constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
constexpr size_t kCagedHeapNormalPageReservationSize =
......
......@@ -72,7 +72,7 @@ class V8_EXPORT AgeTable final {
__builtin_ctz(static_cast<uint32_t>(kCardSizeInBytes));
#else //! V8_HAS_BUILTIN_CTZ
// Hardcode and check with assert.
12;
11;
#endif // !V8_HAS_BUILTIN_CTZ
static_assert((1 << kGranularityBits) == kCardSizeInBytes);
const size_t entry = offset >> kGranularityBits;
......
......@@ -32,7 +32,7 @@ class V8_EXPORT CagedHeapBase {
}
V8_INLINE static bool AreWithinCage(const void* addr1, const void* addr2) {
static constexpr size_t kHalfWordShift = sizeof(uint32_t) * CHAR_BIT;
static constexpr size_t kHalfWordShift = sizeof(uint32_t) * CHAR_BIT - 1;
static_assert((static_cast<size_t>(1) << kHalfWordShift) ==
api_constants::kCagedHeapReservationSize);
CPPGC_DCHECK(g_heap_base_);
......
......@@ -124,12 +124,12 @@ class CompressedPointer final {
(base & kGigaCageMask) ==
(reinterpret_cast<uintptr_t>(ptr) & kGigaCageMask));
const auto uptr = reinterpret_cast<uintptr_t>(ptr);
// Shift the pointer by one and truncate.
auto compressed = static_cast<IntegralType>(uptr >> 1);
// Truncate the pointer.
auto compressed =
static_cast<IntegralType>(reinterpret_cast<uintptr_t>(ptr));
// Normal compressed pointers must have the MSB set.
CPPGC_DCHECK((!compressed || compressed == kCompressedSentinel) ||
(compressed & 0x80000000));
(compressed & (1 << 31)));
return compressed;
}
......@@ -137,15 +137,14 @@ class CompressedPointer final {
CPPGC_DCHECK(CageBaseGlobal::IsSet());
const uintptr_t base = CageBaseGlobal::Get();
// Treat compressed pointer as signed and cast it to uint64_t, which will
// sign-extend it. Then, shift the result by one. It's important to shift
// the unsigned value, as otherwise it would result in undefined behavior.
const uint64_t mask = static_cast<uint64_t>(static_cast<int32_t>(ptr)) << 1;
// sign-extend it.
const uint64_t mask = static_cast<uint64_t>(static_cast<int32_t>(ptr));
return reinterpret_cast<void*>(mask & base);
}
private:
static constexpr IntegralType kCompressedSentinel =
SentinelPointer::kSentinelValue >> 1;
SentinelPointer::kSentinelValue;
// All constructors initialize `value_`. Do not add a default value here as it
// results in a non-atomic write on some builds, even when the atomic version
// of the constructor is used.
......
......@@ -49,12 +49,11 @@ VirtualMemory ReserveCagedHeap(PageAllocator& platform_allocator) {
static constexpr size_t kAllocationTries = 4;
for (size_t i = 0; i < kAllocationTries; ++i) {
#if defined(CPPGC_POINTER_COMPRESSION)
// If pointer compression is enabled, reserve 2x of cage size and leave the
// half that has the least significant bit of the most significant halfword
// set. This is needed for compression to make sure that compressed normal
// pointers have the most significant bit set to 1, so that on decompression
// the bit will be sign-extended. This saves us a branch and 'or' operation
// during compression.
// If pointer compression is enabled, reserve 2x of cage size and leave only
// the upper half. This is needed to make sure that compressed pointers have
// the most significant bit set to 1, so that on decompression the bit will
// be sign-extended. This saves us a branch and 'or' operation during
// compression.
// TODO(chromium:1325007): Provide API in PageAllocator to left trim
// allocations and return the half of the reservation back to the OS.
static constexpr size_t kTryReserveSize = 2 * kCagedHeapReservationSize;
......
......@@ -73,7 +73,7 @@ constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
constexpr GCInfoIndex kFreeListGCInfoIndex = 0;
constexpr size_t kFreeListEntrySize = 2 * sizeof(uintptr_t);
constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(2) * kGB;
constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
// TODO(v8:12231): To reduce OOM probability, instead of the fixed-size
// reservation consider to use a moving needle implementation or simply
......
......@@ -108,18 +108,6 @@ void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pointer) >>
(sizeof(uint32_t) * CHAR_BIT))));
try_trace(decompressed_high);
// In addition, check half-compressed haldwords, since the compiler is free to
// spill intermediate results of compression/decompression onto the stack.
const uintptr_t base = CagedHeapBase::GetBase();
DCHECK(base);
auto intermediate_decompressed_low = reinterpret_cast<Address>(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pointer)) | base);
try_trace(intermediate_decompressed_low);
auto intermediate_decompressed_high = reinterpret_cast<Address>(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pointer) >>
(sizeof(uint32_t) * CHAR_BIT)) |
base);
try_trace(intermediate_decompressed_high);
#endif // defined(CPPGC_POINTER_COMPRESSION)
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment