Commit 6c65e858 authored by Shu-yu Guo's avatar Shu-yu Guo Committed by V8 LUCI CQ

[ptr-cage] Turn on shared pointer cage by default for arm64 and x64

Bug: v8:11460
Change-Id: I9ab419b5e90fbe677e1d63b41699d90552839e98
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2873226
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74422}
parent 4b21f15b
......@@ -360,7 +360,7 @@ if (v8_enable_pointer_compression == "") {
v8_current_cpu == "arm64" || v8_current_cpu == "x64"
}
if (v8_enable_pointer_compression_shared_cage == "") {
v8_enable_pointer_compression_shared_cage = false
v8_enable_pointer_compression_shared_cage = v8_enable_pointer_compression
}
if (v8_enable_fast_torque == "") {
v8_enable_fast_torque = v8_enable_fast_mksnapshot
......@@ -432,7 +432,8 @@ assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations,
assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity,
"Control-flow integrity is only supported on arm64")
if (v8_enable_shared_ro_heap && v8_enable_pointer_compression) {
if (v8_enable_shared_ro_heap && v8_enable_pointer_compression &&
!v8_enable_pointer_compression_shared_cage) {
assert(
is_linux || is_chromeos || is_android,
"Sharing read-only heap with pointer compression is only supported on Linux or Android")
......
......@@ -384,7 +384,6 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Initialize the pointer cage base register.
// TODO(syg): Actually make a cage.
__ LoadRootRelative(kPtrComprCageBaseRegister,
IsolateData::cage_base_offset());
#endif
......
......@@ -577,6 +577,11 @@ void UnregisterNonABICompliantCodeRange(void* start) {
ExceptionHandlerRecord* record =
reinterpret_cast<ExceptionHandlerRecord*>(start);
CHECK(::RtlDeleteFunctionTable(record->runtime_function));
// Unprotect reserved page.
DWORD old_protect;
CHECK(VirtualProtect(start, sizeof(ExceptionHandlerRecord),
PAGE_READWRITE, &old_protect));
}
#endif // V8_OS_WIN_X64
} else {
......@@ -585,6 +590,11 @@ void UnregisterNonABICompliantCodeRange(void* start) {
if (record->dynamic_table) {
DeleteGrowableFunctionTable(record->dynamic_table);
}
// Unprotect reserved page.
DWORD old_protect;
CHECK(VirtualProtect(start, sizeof(CodeRangeUnwindingRecord),
PAGE_READWRITE, &old_protect));
}
}
......
......@@ -3053,7 +3053,8 @@ void Isolate::Deinit() {
#if defined(V8_OS_WIN64)
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
heap()->memory_allocator() && RequiresCodeRange()) {
heap()->memory_allocator() && RequiresCodeRange() &&
heap()->code_range()->AtomicDecrementUnwindInfoUseCount() == 1) {
const base::AddressRegion& code_region = heap()->code_region();
void* start = reinterpret_cast<void*>(code_region.begin());
win64_unwindinfo::UnregisterNonABICompliantCodeRange(start);
......@@ -3784,7 +3785,8 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
}
#if defined(V8_OS_WIN64)
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
heap()->code_range()->AtomicIncrementUnwindInfoUseCount() == 0) {
const base::AddressRegion& code_region = heap()->code_region();
void* start = reinterpret_cast<void*>(code_region.begin());
size_t size_in_bytes = code_region.size();
......
......@@ -84,8 +84,21 @@ class CodeRange final : public VirtualMemoryCage {
const uint8_t* embedded_blob_code,
size_t embedded_blob_code_size);
// Initializes the process-wide code range if RequiresProcessWideCodeRange()
// is true.
#ifdef V8_OS_WIN64
// 64-bit Windows needs to track how many Isolates are using the CodeRange for
// registering and unregistering of unwind info. Note that even though
// CodeRanges are used with std::shared_ptr, std::shared_ptr::use_count should
// not be used for synchronization as it's usually implemented with a relaxed
// read.
uint32_t AtomicIncrementUnwindInfoUseCount() {
return unwindinfo_use_count_.fetch_add(1, std::memory_order_acq_rel);
}
uint32_t AtomicDecrementUnwindInfoUseCount() {
return unwindinfo_use_count_.fetch_sub(1, std::memory_order_acq_rel);
}
#endif // V8_OS_WIN64
static void InitializeProcessWideCodeRangeOnce(
v8::PageAllocator* page_allocator, size_t requested_size);
......@@ -97,6 +110,10 @@ class CodeRange final : public VirtualMemoryCage {
// Used when short builtin calls are enabled, where embedded builtins are
// copied into the CodeRange so calls can be nearer.
uint8_t* embedded_blob_code_copy_ = nullptr;
#ifdef V8_OS_WIN64
std::atomic<uint32_t> unwindinfo_use_count_{0};
#endif
};
} // namespace internal
......
......@@ -892,6 +892,8 @@ class Heap {
const base::AddressRegion& code_region();
CodeRange* code_range() { return code_range_.get(); }
LocalHeap* main_thread_local_heap() { return main_thread_local_heap_; }
// ===========================================================================
......
......@@ -500,9 +500,9 @@ KNOWN_OBJECTS = {
# Lower 32 bits of first page addresses for various heap spaces.
HEAP_FIRST_PAGES = {
0x08100000: "old_space",
0x08140000: "map_space",
0x08040000: "read_only_space",
0x080c0000: "old_space",
0x08100000: "map_space",
0x08000000: "read_only_space",
}
# List of known V8 Frame Markers.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment