Commit 34086876 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr] Set isolate root to the beginning of a 4Gb reservation

With the smi-corrupting decompression approach we don't have to sign
extend Smis anymore and therefore we can switch to zero extending
approach by moving the isolate root to the beginning of the reserved
4Gb region.

Bug: v8:9706
Change-Id: Icd6008fa87d0924519b574fdec445976f742e306
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1835548
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64144}
parent bea464ba
......@@ -329,14 +329,11 @@ class Internals {
#ifdef V8_COMPRESS_POINTERS
// See v8:7703 or src/ptr-compr.* for details about pointer compression.
static constexpr size_t kPtrComprHeapReservationSize = size_t{1} << 32;
static constexpr size_t kPtrComprIsolateRootBias =
kPtrComprHeapReservationSize / 2;
static constexpr size_t kPtrComprIsolateRootAlignment = size_t{1} << 32;
V8_INLINE static internal::Address GetRootFromOnHeapAddress(
internal::Address addr) {
return (addr + kPtrComprIsolateRootBias) &
-static_cast<intptr_t>(kPtrComprIsolateRootAlignment);
return addr & -static_cast<intptr_t>(kPtrComprIsolateRootAlignment);
}
V8_INLINE static internal::Address DecompressTaggedAnyField(
......
......@@ -2706,21 +2706,21 @@ void TurboAssembler::StoreTaggedField(const Register& value,
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressTaggedSigned");
Ldrsw(destination, field_operand);
Ldr(destination.W(), field_operand);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
const Register& source) {
RecordComment("[ DecompressTaggedSigned");
Sxtw(destination, source);
Mov(destination.W(), source.W());
RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressTaggedPointer");
Ldrsw(destination, field_operand);
Ldr(destination.W(), field_operand);
Add(destination, kRootRegister, destination);
RecordComment("]");
}
......@@ -2728,14 +2728,14 @@ void TurboAssembler::DecompressTaggedPointer(const Register& destination,
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const Register& source) {
RecordComment("[ DecompressTaggedPointer");
Add(destination, kRootRegister, Operand(source, SXTW));
Add(destination, kRootRegister, Operand(source, UXTW));
RecordComment("]");
}
void TurboAssembler::DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressAnyTagged");
Ldrsw(destination, field_operand);
Ldr(destination.W(), field_operand);
Add(destination, kRootRegister, destination);
RecordComment("]");
}
......@@ -2743,7 +2743,7 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
void TurboAssembler::DecompressAnyTagged(const Register& destination,
const Register& source) {
RecordComment("[ DecompressAnyTagged");
Add(destination, kRootRegister, Operand(source, SXTW));
Add(destination, kRootRegister, Operand(source, UXTW));
RecordComment("]");
}
......
......@@ -2215,12 +2215,12 @@ TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayDataPtr(
if (COMPRESS_POINTERS_BOOL) {
TNode<Int32T> compressed_base =
LoadObjectField<Int32T>(typed_array, JSTypedArray::kBasePointerOffset);
// Sign extend Int32T to IntPtrT according to current compression scheme
// Zero-extend TaggedT to WordT according to current compression scheme
// so that the addition with |external_pointer| (which already contains
// compensated offset value) below will decompress the tagged value.
// See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for
// details.
base_pointer = ChangeInt32ToIntPtr(compressed_base);
base_pointer = Signed(ChangeUint32ToWord(compressed_base));
} else {
base_pointer =
LoadObjectField<IntPtrT>(typed_array, JSTypedArray::kBasePointerOffset);
......
......@@ -284,21 +284,21 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
void TurboAssembler::DecompressTaggedSigned(Register destination,
Operand field_operand) {
RecordComment("[ DecompressTaggedSigned");
movsxlq(destination, field_operand);
movl(destination, field_operand);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedSigned(Register destination,
Register source) {
RecordComment("[ DecompressTaggedSigned");
movsxlq(destination, source);
movl(destination, source);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(Register destination,
Operand field_operand) {
RecordComment("[ DecompressTaggedPointer");
movsxlq(destination, field_operand);
movl(destination, field_operand);
addq(destination, kRootRegister);
RecordComment("]");
}
......@@ -306,7 +306,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
void TurboAssembler::DecompressTaggedPointer(Register destination,
Register source) {
RecordComment("[ DecompressTaggedPointer");
movsxlq(destination, source);
movl(destination, source);
addq(destination, kRootRegister);
RecordComment("]");
}
......@@ -321,7 +321,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination,
Register scratch) {
DCHECK(!AreAliased(destination, scratch));
RecordComment("[ DecompressAnyTagged");
movsxlq(destination, field_operand);
movl(destination, field_operand);
DecompressRegisterAnyTagged(destination, scratch);
RecordComment("]");
}
......@@ -330,7 +330,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination, Register source,
Register scratch) {
DCHECK(!AreAliased(destination, scratch));
RecordComment("[ DecompressAnyTagged");
movsxlq(destination, source);
movl(destination, source);
DecompressRegisterAnyTagged(destination, scratch);
RecordComment("]");
}
......
......@@ -231,7 +231,7 @@ constexpr int kTaggedSizeLog2 = 2;
// These types define raw and atomic storage types for tagged values stored
// on V8 heap.
using Tagged_t = int32_t;
using Tagged_t = uint32_t;
using AtomicTagged_t = base::Atomic32;
#else
......
......@@ -29,8 +29,7 @@ V8_INLINE Address GetIsolateRoot<Address>(Address on_heap_addr) {
// signed constant instead of 64-bit constant (the problem is that 2Gb looks
// like a negative 32-bit value). It's correct because we will never use
// leftmost address of V8 heap as |on_heap_addr|.
return RoundDown<kPtrComprIsolateRootAlignment>(on_heap_addr +
kPtrComprIsolateRootBias - 1);
return RoundDown<kPtrComprIsolateRootAlignment>(on_heap_addr);
}
template <>
......@@ -54,11 +53,7 @@ V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) {
template <typename TOnHeapAddress>
V8_INLINE Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
Tagged_t raw_value) {
// Current compression scheme requires |raw_value| to be sign-extended
// from int32_t to intptr_t.
intptr_t value = static_cast<intptr_t>(static_cast<int32_t>(raw_value));
Address root = GetIsolateRoot(on_heap_addr);
return root + static_cast<Address>(value);
return GetIsolateRoot(on_heap_addr) + static_cast<Address>(raw_value);
}
// Decompresses any tagged value, preserving both weak- and smi- tags.
......@@ -72,7 +67,6 @@ V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
STATIC_ASSERT(kPtrComprHeapReservationSize ==
Internals::kPtrComprHeapReservationSize);
STATIC_ASSERT(kPtrComprIsolateRootBias == Internals::kPtrComprIsolateRootBias);
STATIC_ASSERT(kPtrComprIsolateRootAlignment ==
Internals::kPtrComprIsolateRootAlignment);
......
......@@ -14,7 +14,6 @@ namespace internal {
// See v8:7703 for details about how pointer compression works.
constexpr size_t kPtrComprHeapReservationSize = size_t{4} * GB;
constexpr size_t kPtrComprIsolateRootBias = kPtrComprHeapReservationSize / 2;
constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB;
} // namespace internal
......
......@@ -5044,12 +5044,12 @@ Node* EffectControlLinearizer::BuildTypedArrayDataPointer(Node* base,
// will be removed by the decompression elimination pass.
base = __ ChangeTaggedToCompressed(base);
base = __ BitcastTaggedToWord(base);
// Sign-extend Tagged_t to IntPtr according to current compression
// Zero-extend Tagged_t to UintPtr according to current compression
// scheme so that the addition with |external_pointer| (which already
// contains compensated offset value) will decompress the tagged value.
// See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for
// details.
base = ChangeInt32ToIntPtr(base);
base = ChangeUint32ToUintPtr(base);
}
return __ UnsafePointerAdd(base, external);
}
......
......@@ -6,6 +6,7 @@
#include "src/base/bounded-page-allocator.h"
#include "src/common/ptr-compr.h"
#include "src/execution/isolate.h"
#include "src/utils/memcopy.h"
#include "src/utils/utils.h"
namespace v8 {
......@@ -38,21 +39,39 @@ IsolateAllocator::~IsolateAllocator() {
}
#if V8_TARGET_ARCH_64_BIT
namespace {
// "IsolateRootBiasPage" is an optional region before the 4Gb aligned
// reservation. This "IsolateRootBiasPage" page is supposed to be used for
// storing part of the Isolate object when Isolate::isolate_root_bias() is
// not zero.
inline size_t GetIsolateRootBiasPageSize(
v8::PageAllocator* platform_page_allocator) {
return RoundUp(Isolate::isolate_root_bias(),
platform_page_allocator->AllocatePageSize());
}
} // namespace
Address IsolateAllocator::InitReservation() {
v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
// Reserve a 4Gb region so that the middle is 4Gb aligned.
// The VirtualMemory API does not support such an constraint so we have to
// implement it manually here.
size_t reservation_size = kPtrComprHeapReservationSize;
size_t base_alignment = kPtrComprIsolateRootAlignment;
const size_t kIsolateRootBiasPageSize =
GetIsolateRootBiasPageSize(platform_page_allocator);
// Reserve a |4Gb + kIsolateRootBiasPageSize| region such as that the
// resevation address plus |kIsolateRootBiasPageSize| is 4Gb aligned.
const size_t reservation_size =
kPtrComprHeapReservationSize + kIsolateRootBiasPageSize;
const size_t base_alignment = kPtrComprIsolateRootAlignment;
const int kMaxAttempts = 4;
for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
Address hint = RoundDown(reinterpret_cast<Address>(
platform_page_allocator->GetRandomMmapAddr()),
base_alignment) +
kPtrComprIsolateRootBias;
base_alignment) -
kIsolateRootBiasPageSize;
// Within this reservation there will be a sub-region with proper alignment.
VirtualMemory padded_reservation(platform_page_allocator,
......@@ -60,12 +79,11 @@ Address IsolateAllocator::InitReservation() {
reinterpret_cast<void*>(hint));
if (!padded_reservation.IsReserved()) break;
// Find such a sub-region inside the reservation that it's middle is
// |base_alignment|-aligned.
// Find properly aligned sub-region inside the reservation.
Address address =
RoundUp(padded_reservation.address() + kPtrComprIsolateRootBias,
RoundUp(padded_reservation.address() + kIsolateRootBiasPageSize,
base_alignment) -
kPtrComprIsolateRootBias;
kIsolateRootBiasPageSize;
CHECK(padded_reservation.InVM(address, reservation_size));
#if defined(V8_OS_FUCHSIA)
......@@ -98,16 +116,16 @@ Address IsolateAllocator::InitReservation() {
if (!reservation.IsReserved()) break;
// The reservation could still be somewhere else but we can accept it
// if the reservation has the required alignment.
Address aligned_address =
RoundUp(reservation.address() + kPtrComprIsolateRootBias,
// if it has the required alignment.
Address address =
RoundUp(reservation.address() + kIsolateRootBiasPageSize,
base_alignment) -
kPtrComprIsolateRootBias;
kIsolateRootBiasPageSize;
if (reservation.address() == aligned_address) {
if (reservation.address() == address) {
reservation_ = std::move(reservation);
CHECK_EQ(reservation_.size(), reservation_size);
return aligned_address;
return address;
}
}
}
......@@ -116,13 +134,18 @@ Address IsolateAllocator::InitReservation() {
return kNullAddress;
}
void IsolateAllocator::CommitPagesForIsolate(Address heap_address) {
CHECK(reservation_.InVM(heap_address, kPtrComprHeapReservationSize));
void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) {
v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
const size_t kIsolateRootBiasPageSize =
GetIsolateRootBiasPageSize(platform_page_allocator);
Address isolate_root = heap_address + kPtrComprIsolateRootBias;
Address isolate_root = heap_reservation_address + kIsolateRootBiasPageSize;
CHECK(IsAligned(isolate_root, kPtrComprIsolateRootAlignment));
v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
CHECK(reservation_.InVM(
heap_reservation_address,
kPtrComprHeapReservationSize + kIsolateRootBiasPageSize));
// Simplify BoundedPageAllocator's life by configuring it to use same page
// size as the Heap will use (MemoryChunk::kPageSize).
......@@ -130,7 +153,7 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_address) {
platform_page_allocator->AllocatePageSize());
page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
platform_page_allocator, heap_address, kPtrComprHeapReservationSize,
platform_page_allocator, isolate_root, kPtrComprHeapReservationSize,
page_size);
page_allocator_ = page_allocator_instance_.get();
......@@ -139,7 +162,7 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_address) {
// Inform the bounded page allocator about reserved pages.
{
Address reserved_region_address = RoundDown(isolate_address, page_size);
Address reserved_region_address = isolate_root;
size_t reserved_region_size =
RoundUp(isolate_end, page_size) - reserved_region_address;
......@@ -163,10 +186,8 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_address) {
PageAllocator::kReadWrite));
if (Heap::ShouldZapGarbage()) {
for (Address address = committed_region_address;
address < committed_region_size; address += kSystemPointerSize) {
base::Memory<Address>(address) = static_cast<Address>(kZapValue);
}
MemsetPointer(reinterpret_cast<Address*>(committed_region_address),
kZapValue, committed_region_size / kSystemPointerSize);
}
}
isolate_memory_ = reinterpret_cast<void*>(isolate_address);
......
......@@ -48,7 +48,7 @@ class V8_EXPORT_PRIVATE IsolateAllocator final {
private:
Address InitReservation();
void CommitPagesForIsolate(Address heap_address);
void CommitPagesForIsolate(Address heap_reservation_address);
// The allocated memory for Isolate instance.
void* isolate_memory_ = nullptr;
......
......@@ -141,13 +141,12 @@ void JSTypedArray::RemoveExternalPointerCompensationForSerialization() {
ACCESSORS(JSTypedArray, base_pointer, Object, kBasePointerOffset)
void* JSTypedArray::DataPtr() {
// Sign extend Tagged_t to intptr_t according to current compression scheme
// Zero-extend Tagged_t to Address according to current compression scheme
// so that the addition with |external_pointer| (which already contains
// compensated offset value) will decompress the tagged value.
// See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for details.
return reinterpret_cast<void*>(
external_pointer() + static_cast<Address>(static_cast<intptr_t>(
static_cast<Tagged_t>(base_pointer().ptr()))));
return reinterpret_cast<void*>(external_pointer() +
static_cast<Tagged_t>(base_pointer().ptr()));
}
void JSTypedArray::SetOffHeapDataPtr(void* base, Address offset) {
......
......@@ -119,7 +119,7 @@ inline void MemsetTagged(ObjectSlot start, Object value, size_t counter) {
#ifdef V8_COMPRESS_POINTERS
Tagged_t raw_value = CompressTagged(value.ptr());
STATIC_ASSERT(kTaggedSize == kInt32Size);
MemsetInt32(start.location(), raw_value, counter);
MemsetInt32(reinterpret_cast<int32_t*>(start.location()), raw_value, counter);
#else
Address raw_value = value.ptr();
MemsetPointer(start.location(), raw_value, counter);
......
......@@ -145,8 +145,7 @@ TEST_F(HeapWithPointerCompressionTest, HeapLayout) {
EXPECT_TRUE(IsAligned(isolate_root, size_t{4} * GB));
// Check that all memory chunks belong this region.
base::AddressRegion heap_reservation(isolate_root - size_t{2} * GB,
size_t{4} * GB);
base::AddressRegion heap_reservation(isolate_root, size_t{4} * GB);
OldGenerationMemoryChunkIterator iter(i_isolate()->heap());
for (;;) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment