Commit 4d64208e authored by Igor Sheludko's avatar Igor Sheludko Committed by V8 LUCI CQ

[ext-code-space] Make the code space external for real

... when the v8_enable_external_code_space build flag is enabled.

Bug: v8:11880
Change-Id: I754c6229dcd25f81ef6dfbedc5885ac025c0aeff
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3164458
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarNico Hartmann <nicohartmann@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77479}
parent 4fb132b8
......@@ -96,6 +96,10 @@ V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
UNREACHABLE();
}
V8_INLINE Address GetPtrComprCageBaseAddress(Address on_heap_addr) {
UNREACHABLE();
}
#endif // V8_COMPRESS_POINTERS
inline PtrComprCageBase GetPtrComprCageBase(HeapObject object) {
......
......@@ -13,6 +13,7 @@
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/safepoint-table.h"
#include "src/common/globals.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frames-inl.h"
#include "src/execution/vm-state-inl.h"
......@@ -1089,6 +1090,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
DCHECK_GE((stack_slots + kBitsPerByte) / kBitsPerByte,
safepoint_entry.entry_size());
int slot_offset = 0;
PtrComprCageBase cage_base(isolate());
for (uint8_t bits : safepoint_entry.iterate_bits()) {
while (bits) {
int bit = base::bits::CountTrailingZeros(bits);
......@@ -1102,11 +1104,39 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
// The spill slot may actually contain weak references so we load/store
// values using spill_slot.location() in order to avoid dealing with
// FullMaybeObjectSlots here.
Tagged_t compressed_value = static_cast<Tagged_t>(*spill_slot.location());
if (!HAS_SMI_TAG(compressed_value)) {
// We don't need to update smi values.
*spill_slot.location() =
DecompressTaggedPointer(isolate(), compressed_value);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
// When external code space is enabled the spill slot could contain both
// Code and non-Code references, which have different cage bases. So
// unconditional decompression of the value might corrupt Code pointers.
// However, given that
// 1) the Code pointers are never compressed by design (because
// otherwise we wouldn't know which cage base to apply for
// decompression, see respective DCHECKs in
// RelocInfo::target_object()),
// 2) there's no need to update the upper part of the full pointer
// because if it was there then it'll stay the same,
// we can avoid updating upper part of the spill slot if it already
// contains full value.
// TODO(v8:11880): Remove this special handling by enforcing builtins
// to use CodeTs instead of Code objects.
Address value = *spill_slot.location();
if (!HAS_SMI_TAG(value) && value <= 0xffffffff) {
// We don't need to update smi values or full pointers.
*spill_slot.location() =
DecompressTaggedPointer(cage_base, static_cast<Tagged_t>(value));
// Ensure that the spill slot contains correct heap object.
DCHECK(HeapObject::cast(Object(*spill_slot.location()))
.map(cage_base)
.IsMap());
}
} else {
Tagged_t compressed_value =
static_cast<Tagged_t>(*spill_slot.location());
if (!HAS_SMI_TAG(compressed_value)) {
// We don't need to update smi values.
*spill_slot.location() =
DecompressTaggedPointer(cage_base, compressed_value);
}
}
#endif
v->VisitRootPointer(Root::kStackRoots, nullptr, spill_slot);
......
......@@ -32,7 +32,7 @@
#include "src/codegen/compilation-cache.h"
#include "src/codegen/flush-instruction-cache.h"
#include "src/common/assert-scope.h"
#include "src/common/ptr-compr.h"
#include "src/common/ptr-compr-inl.h"
#include "src/compiler-dispatcher/lazy-compile-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/date/date.h"
......@@ -3612,6 +3612,10 @@ class BigIntPlatform : public bigint::Platform {
};
} // namespace
VirtualMemoryCage* Isolate::GetPtrComprCodeCageForTesting() {
return V8_EXTERNAL_CODE_SPACE_BOOL ? heap_.code_range() : GetPtrComprCage();
}
bool Isolate::Init(SnapshotData* startup_snapshot_data,
SnapshotData* read_only_snapshot_data,
SnapshotData* shared_heap_snapshot_data, bool can_rehash) {
......@@ -3727,6 +3731,13 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
GetShortBuiltinsCallRegion().contains(heap_.code_region());
}
}
#if V8_EXTERNAL_CODE_SPACE
if (heap_.code_range()) {
code_cage_base_ = GetPtrComprCageBaseAddress(heap_.code_range()->base());
} else {
code_cage_base_ = cage_base();
}
#endif // V8_EXTERNAL_CODE_SPACE
isolate_data_.external_reference_table()->Init(this);
......
......@@ -1081,7 +1081,16 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return isolate_data()->cage_base();
}
Address code_cage_base() const { return cage_base(); }
// When pointer compression and external code space are on, this is the base
// address of the cage where the code space is allocated. Otherwise, it
// defaults to cage_base().
Address code_cage_base() const {
#if V8_EXTERNAL_CODE_SPACE
return code_cage_base_;
#else
return cage_base();
#endif // V8_EXTERNAL_CODE_SPACE
}
// When pointer compression is on, the PtrComprCage used by this
// Isolate. Otherwise nullptr.
......@@ -1091,6 +1100,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
const VirtualMemoryCage* GetPtrComprCage() const {
return isolate_allocator_->GetPtrComprCage();
}
VirtualMemoryCage* GetPtrComprCodeCageForTesting();
// Generated code can embed this address to get access to the isolate-specific
// data (for example, roots, external references, builtins, etc.).
......@@ -2116,6 +2126,12 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// favor memory over runtime performance.
bool memory_savings_mode_active_ = false;
#if V8_EXTERNAL_CODE_SPACE
// Base address of the pointer compression cage containing external code
// space, when external code space is enabled.
Address code_cage_base_ = 0;
#endif
// Time stamp at initialization.
double time_millis_at_init_ = 0;
......
......@@ -4,10 +4,12 @@
#include "src/heap/code-range.h"
#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/heap-inl.h"
#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
......@@ -85,6 +87,9 @@ CodeRange::~CodeRange() { Free(); }
bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
size_t requested) {
DCHECK_NE(requested, 0);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
page_allocator = GetPlatformPageAllocator();
}
if (requested <= kMinimumCodeRangeSize) {
requested = kMinimumCodeRangeSize;
......@@ -107,7 +112,9 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
// is enabled so that InitReservation would not break the alignment in
// GetAddressHint().
params.base_alignment =
VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
V8_EXTERNAL_CODE_SPACE_BOOL
? base::bits::RoundUpToPowerOfTwo(requested)
: VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
params.base_bias_size = reserved_area;
params.page_size = MemoryChunk::kPageSize;
params.requested_start_hint = GetCodeRangeAddressHint()->GetAddressHint(
......@@ -115,6 +122,16 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
if (!VirtualMemoryCage::InitReservation(params)) return false;
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
// Ensure that the code range does not cross the 4Gb boundary and thus
// default compression scheme of truncating the Code pointers to 32-bit
// still work.
Address base = page_allocator_->begin();
Address last = base + page_allocator_->size() - 1;
CHECK_EQ(GetPtrComprCageBaseAddress(base),
GetPtrComprCageBaseAddress(last));
}
// On some platforms, specifically Win64, we need to reserve some pages at
// the beginning of an executable space. See
// https://cs.chromium.org/chromium/src/components/crash/content/
......
......@@ -85,14 +85,63 @@ V8_INLINE constexpr bool IsFreeSpaceOrFiller(InstanceType instance_type) {
} // namespace InstanceTypeChecker
// INSTANCE_TYPE_CHECKERS macro defines some "types" that do not have
// respective C++ classes (see TypedArrayConstructor, FixedArrayExact) or
// the respective C++ counterpart is actually a template (see HashTable).
// So in order to be able to customize IsType() implementations for specific
// types, we declare a parallel set of "types" that can be compared using
// std::is_same<>.
namespace InstanceTypeTraits {
#define DECL_TYPE(type, ...) class type;
INSTANCE_TYPE_CHECKERS(DECL_TYPE)
TORQUE_INSTANCE_CHECKERS_MULTIPLE_FULLY_DEFINED(DECL_TYPE)
TORQUE_INSTANCE_CHECKERS_MULTIPLE_ONLY_DECLARED(DECL_TYPE)
HEAP_OBJECT_TYPE_LIST(DECL_TYPE)
#undef DECL_TYPE
} // namespace InstanceTypeTraits
#define TYPE_CHECKER(type, ...) \
bool HeapObject::Is##type() const { \
PtrComprCageBase cage_base = GetPtrComprCageBase(*this); \
return HeapObject::Is##type(cage_base); \
} \
/* The cage_base passed here is supposed to be the base of the pointer */ \
/* compression cage where the Map space is allocated. */ \
/* However when external code space is enabled it's not always the case */ \
/* yet and the predicate has to work if the cage_base corresponds to the */ \
/* cage containing external code space. */ \
/* TODO(v8:11880): Ensure that the cage_base value always corresponds to */ \
/* the main pointer compression cage. */ \
bool HeapObject::Is##type(PtrComprCageBase cage_base) const { \
if (V8_EXTERNAL_CODE_SPACE_BOOL) { \
if (IsCodeObject(*this)) { \
/* Code space contains only Code objects and free space fillers. */ \
if (std::is_same<InstanceTypeTraits::type, \
InstanceTypeTraits::Code>::value || \
std::is_same<InstanceTypeTraits::type, \
InstanceTypeTraits::FreeSpace>::value || \
std::is_same<InstanceTypeTraits::type, \
InstanceTypeTraits::FreeSpaceOrFiller>::value) { \
/* Code space objects are never read-only, so it's safe to query */ \
/* heap value in order to compute proper cage base. */ \
Heap* heap = GetHeapFromWritableObject(*this); \
Map map_object = map(Isolate::FromHeap(heap)); \
return InstanceTypeChecker::Is##type(map_object.instance_type()); \
} \
/* For all the other queries we can return false. */ \
return false; \
} \
/* Fallback to checking map instance type. */ \
} \
Map map_object = map(cage_base); \
return InstanceTypeChecker::Is##type(map_object.instance_type()); \
}
// TODO(v8:7786): For instance types that have a single map instance on the
// roots, and when that map is a embedded in the binary, compare against the map
// pointer rather than looking up the instance type.
#define TYPE_CHECKER(type, ...) \
DEF_GETTER(HeapObject, Is##type, bool) { \
return InstanceTypeChecker::Is##type(map(cage_base).instance_type()); \
}
INSTANCE_TYPE_CHECKERS(TYPE_CHECKER)
#undef TYPE_CHECKER
......
......@@ -5221,7 +5221,7 @@ void ImplementationVisitor::GenerateClassVerifiers(
}
// Second, verify that this object is what it claims to be.
cc_contents << " CHECK(o.Is" << name << "());\n";
cc_contents << " CHECK(o.Is" << name << "(isolate));\n";
// Third, verify its properties.
for (auto f : type->fields()) {
......
......@@ -45,7 +45,7 @@ UNINITIALIZED_TEST(PtrComprCageCodeRange) {
v8::Isolate* isolate = v8::Isolate::New(create_params);
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
VirtualMemoryCage* cage = i_isolate->GetPtrComprCage();
VirtualMemoryCage* cage = i_isolate->GetPtrComprCodeCageForTesting();
if (i_isolate->RequiresCodeRange()) {
CHECK(!i_isolate->heap()->code_region().is_empty());
CHECK(cage->reservation()->InVM(i_isolate->heap()->code_region().begin(),
......
......@@ -143,6 +143,9 @@ TEST_F(HeapTest, HeapLayout) {
Address cage_base = i_isolate()->cage_base();
EXPECT_TRUE(IsAligned(cage_base, size_t{4} * GB));
Address code_cage_base = i_isolate()->code_cage_base();
EXPECT_TRUE(IsAligned(code_cage_base, size_t{4} * GB));
#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
Address isolate_root = i_isolate()->isolate_root();
EXPECT_EQ(cage_base, isolate_root);
......@@ -150,6 +153,7 @@ TEST_F(HeapTest, HeapLayout) {
// Check that all memory chunks belong this region.
base::AddressRegion heap_reservation(cage_base, size_t{4} * GB);
base::AddressRegion code_reservation(code_cage_base, size_t{4} * GB);
SafepointScope scope(i_isolate()->heap());
OldGenerationMemoryChunkIterator iter(i_isolate()->heap());
......@@ -159,7 +163,13 @@ TEST_F(HeapTest, HeapLayout) {
Address address = chunk->address();
size_t size = chunk->area_end() - address;
EXPECT_TRUE(heap_reservation.contains(address, size));
AllocationSpace owner_id = chunk->owner_identity();
if (V8_EXTERNAL_CODE_SPACE_BOOL &&
(owner_id == CODE_SPACE || owner_id == CODE_LO_SPACE)) {
EXPECT_TRUE(code_reservation.contains(address, size));
} else {
EXPECT_TRUE(heap_reservation.contains(address, size));
}
}
}
#endif // V8_COMPRESS_POINTERS
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment