Commit baa4ba3f authored by Shu-yu Guo's avatar Shu-yu Guo Committed by V8 LUCI CQ

Reland^4 "[ptr-cage] Turn on shared pointer cage by default for arm64 and x64"

This is a reland of 8b74fd45

Changes since revert:

- Reverted a61aa491 for not fixing the
  jitless toggling issue on Chromium Win64

- Fix jitless toggling on Win64 by checking FLAG_jitless in
  EmbeddedDataWithMaybeRemappedEmbeddedBuiltins

Original change's description:
> Reland^3 "[ptr-cage] Turn on shared pointer cage by default for arm64 and x64""
>
> This is a reland of 054ff044
>
> Change since revert:
>
> - Remove assignment to FLAG_enable_short_builtins in test since
>   it's write-once in CFI.
>
> Original change's description:
> > Reland^2 "[ptr-cage] Turn on shared pointer cage by default for arm64 and x64"
> >
> > This is a reland of 1f504c36
> >
> > Changes since revert:
> >
> > - Removed disabling of RO heap sharing when --stress-snapshot is passed;
> >   was fixed by f4a6c628
> > - Fixed crashing tests that caused revert separately in
> >   a61aa491
> >
> > Original change's description:
> > > > [ptr-cage] Turn on shared pointer cage by default for arm64 and x64
> > > >
> > > > Reviewed-on:
> > > https://chromium-review.googlesource.com/c/v8/v8/+/2873226
> > > > Reviewed-by: Igor Sheludko <ishell@chromium.org>
> > > > Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> > > > Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
> > > > Cr-Commit-Position: refs/heads/master@{#74422}
> > >
> > > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2878855
> > > Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> > > Reviewed-by: Adam Klein <adamk@chromium.org>
> > > Reviewed-by: Igor Sheludko <ishell@chromium.org>
> > > Reviewed-by: Dan Elphick <delphick@chromium.org>
> > > Cr-Commit-Position: refs/heads/master@{#74448}
> >
> > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2891460
> > Reviewed-by: Adam Klein <adamk@chromium.org>
> > Commit-Queue: Shu-yu Guo <syg@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#74546}
>
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2893567
> Reviewed-by: Shu-yu Guo <syg@chromium.org>
> Reviewed-by: Adam Klein <adamk@chromium.org>
> Commit-Queue: Shu-yu Guo <syg@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#74548}

TBR=ishell@chromium.org

Bug: v8:11460
Change-Id: Ied925de5f886a906b1ca178365aee73155e679cb
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2891697Reviewed-by: 's avatarShu-yu Guo <syg@chromium.org>
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74560}
parent 8613ac24
...@@ -352,7 +352,7 @@ if (v8_enable_pointer_compression == "") { ...@@ -352,7 +352,7 @@ if (v8_enable_pointer_compression == "") {
v8_current_cpu == "arm64" || v8_current_cpu == "x64" v8_current_cpu == "arm64" || v8_current_cpu == "x64"
} }
if (v8_enable_pointer_compression_shared_cage == "") { if (v8_enable_pointer_compression_shared_cage == "") {
v8_enable_pointer_compression_shared_cage = false v8_enable_pointer_compression_shared_cage = v8_enable_pointer_compression
} }
if (v8_enable_fast_torque == "") { if (v8_enable_fast_torque == "") {
v8_enable_fast_torque = v8_enable_fast_mksnapshot v8_enable_fast_torque = v8_enable_fast_mksnapshot
...@@ -425,7 +425,8 @@ assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations, ...@@ -425,7 +425,8 @@ assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations,
assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity, assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity,
"Control-flow integrity is only supported on arm64") "Control-flow integrity is only supported on arm64")
if (v8_enable_shared_ro_heap && v8_enable_pointer_compression) { if (v8_enable_shared_ro_heap && v8_enable_pointer_compression &&
!v8_enable_pointer_compression_shared_cage) {
assert( assert(
is_linux || is_chromeos || is_android, is_linux || is_chromeos || is_android,
"Sharing read-only heap with pointer compression is only supported on Linux or Android") "Sharing read-only heap with pointer compression is only supported on Linux or Android")
...@@ -447,6 +448,10 @@ assert( ...@@ -447,6 +448,10 @@ assert(
!v8_enable_pointer_compression_shared_cage || v8_enable_pointer_compression, !v8_enable_pointer_compression_shared_cage || v8_enable_pointer_compression,
"Can't share a pointer compression cage if pointers aren't compressed") "Can't share a pointer compression cage if pointers aren't compressed")
assert(!v8_enable_pointer_compression_shared_cage || v8_current_cpu == "x64" ||
v8_current_cpu == "arm64",
"Sharing a pointer compression cage is only supported on x64 and arm64")
assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers, assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers,
"Write barriers can't be both enabled and disabled") "Write barriers can't be both enabled and disabled")
......
...@@ -384,7 +384,6 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, ...@@ -384,7 +384,6 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Initialize the pointer cage base register. // Initialize the pointer cage base register.
// TODO(syg): Actually make a cage.
__ LoadRootRelative(kPtrComprCageBaseRegister, __ LoadRootRelative(kPtrComprCageBaseRegister,
IsolateData::cage_base_offset()); IsolateData::cage_base_offset());
#endif #endif
......
...@@ -577,6 +577,11 @@ void UnregisterNonABICompliantCodeRange(void* start) { ...@@ -577,6 +577,11 @@ void UnregisterNonABICompliantCodeRange(void* start) {
ExceptionHandlerRecord* record = ExceptionHandlerRecord* record =
reinterpret_cast<ExceptionHandlerRecord*>(start); reinterpret_cast<ExceptionHandlerRecord*>(start);
CHECK(::RtlDeleteFunctionTable(record->runtime_function)); CHECK(::RtlDeleteFunctionTable(record->runtime_function));
// Unprotect reserved page.
DWORD old_protect;
CHECK(VirtualProtect(start, sizeof(ExceptionHandlerRecord),
PAGE_READWRITE, &old_protect));
} }
#endif // V8_OS_WIN_X64 #endif // V8_OS_WIN_X64
} else { } else {
...@@ -585,6 +590,11 @@ void UnregisterNonABICompliantCodeRange(void* start) { ...@@ -585,6 +590,11 @@ void UnregisterNonABICompliantCodeRange(void* start) {
if (record->dynamic_table) { if (record->dynamic_table) {
DeleteGrowableFunctionTable(record->dynamic_table); DeleteGrowableFunctionTable(record->dynamic_table);
} }
// Unprotect reserved page.
DWORD old_protect;
CHECK(VirtualProtect(start, sizeof(CodeRangeUnwindingRecord),
PAGE_READWRITE, &old_protect));
} }
} }
......
...@@ -3101,7 +3101,8 @@ void Isolate::Deinit() { ...@@ -3101,7 +3101,8 @@ void Isolate::Deinit() {
#if defined(V8_OS_WIN64) #if defined(V8_OS_WIN64)
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() && if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
heap()->memory_allocator() && RequiresCodeRange()) { heap()->memory_allocator() && RequiresCodeRange() &&
heap()->code_range()->AtomicDecrementUnwindInfoUseCount() == 1) {
const base::AddressRegion& code_region = heap()->code_region(); const base::AddressRegion& code_region = heap()->code_region();
void* start = reinterpret_cast<void*>(code_region.begin()); void* start = reinterpret_cast<void*>(code_region.begin());
win64_unwindinfo::UnregisterNonABICompliantCodeRange(start); win64_unwindinfo::UnregisterNonABICompliantCodeRange(start);
...@@ -3832,7 +3833,8 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data, ...@@ -3832,7 +3833,8 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
} }
#if defined(V8_OS_WIN64) #if defined(V8_OS_WIN64)
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange()) { if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
heap()->code_range()->AtomicIncrementUnwindInfoUseCount() == 0) {
const base::AddressRegion& code_region = heap()->code_region(); const base::AddressRegion& code_region = heap()->code_region();
void* start = reinterpret_cast<void*>(code_region.begin()); void* start = reinterpret_cast<void*>(code_region.begin());
size_t size_in_bytes = code_region.size(); size_t size_in_bytes = code_region.size();
......
...@@ -88,6 +88,21 @@ class CodeRange final : public VirtualMemoryCage { ...@@ -88,6 +88,21 @@ class CodeRange final : public VirtualMemoryCage {
return embedded_blob_code_copy_.load(std::memory_order_acquire); return embedded_blob_code_copy_.load(std::memory_order_acquire);
} }
#ifdef V8_OS_WIN64
// 64-bit Windows needs to track how many Isolates are using the CodeRange for
// registering and unregistering of unwind info. Note that even though
// CodeRanges are used with std::shared_ptr, std::shared_ptr::use_count should
// not be used for synchronization as it's usually implemented with a relaxed
// read.
uint32_t AtomicIncrementUnwindInfoUseCount() {
return unwindinfo_use_count_.fetch_add(1, std::memory_order_acq_rel);
}
uint32_t AtomicDecrementUnwindInfoUseCount() {
return unwindinfo_use_count_.fetch_sub(1, std::memory_order_acq_rel);
}
#endif // V8_OS_WIN64
bool InitReservation(v8::PageAllocator* page_allocator, size_t requested); bool InitReservation(v8::PageAllocator* page_allocator, size_t requested);
void Free(); void Free();
...@@ -105,14 +120,12 @@ class CodeRange final : public VirtualMemoryCage { ...@@ -105,14 +120,12 @@ class CodeRange final : public VirtualMemoryCage {
const uint8_t* embedded_blob_code, const uint8_t* embedded_blob_code,
size_t embedded_blob_code_size); size_t embedded_blob_code_size);
// Initializes the process-wide code range if RequiresProcessWideCodeRange()
// is true.
static void InitializeProcessWideCodeRangeOnce( static void InitializeProcessWideCodeRangeOnce(
v8::PageAllocator* page_allocator, size_t requested_size); v8::PageAllocator* page_allocator, size_t requested_size);
// If InitializeProcessWideCodeRangeOnce has been called, returns the // If InitializeProcessWideCodeRangeOnce has been called, returns the
// initialized CodeRange. Otherwise returns an empty std::shared_ptr. // initialized CodeRange. Otherwise returns an empty std::shared_ptr.
static std::shared_ptr<CodeRange> GetProcessWideCodeRange(); V8_EXPORT_PRIVATE static std::shared_ptr<CodeRange> GetProcessWideCodeRange();
private: private:
// Used when short builtin calls are enabled, where embedded builtins are // Used when short builtin calls are enabled, where embedded builtins are
...@@ -122,6 +135,10 @@ class CodeRange final : public VirtualMemoryCage { ...@@ -122,6 +135,10 @@ class CodeRange final : public VirtualMemoryCage {
// When sharing a CodeRange among Isolates, calls to RemapEmbeddedBuiltins may // When sharing a CodeRange among Isolates, calls to RemapEmbeddedBuiltins may
// race during Isolate::Init. // race during Isolate::Init.
base::Mutex remap_embedded_builtins_mutex_; base::Mutex remap_embedded_builtins_mutex_;
#ifdef V8_OS_WIN64
std::atomic<uint32_t> unwindinfo_use_count_{0};
#endif
}; };
} // namespace internal } // namespace internal
......
...@@ -894,6 +894,8 @@ class Heap { ...@@ -894,6 +894,8 @@ class Heap {
const base::AddressRegion& code_region(); const base::AddressRegion& code_region();
CodeRange* code_range() { return code_range_.get(); }
LocalHeap* main_thread_local_heap() { return main_thread_local_heap_; } LocalHeap* main_thread_local_heap() { return main_thread_local_heap_; }
// =========================================================================== // ===========================================================================
......
...@@ -159,6 +159,7 @@ inline EmbeddedData EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(Code code) { ...@@ -159,6 +159,7 @@ inline EmbeddedData EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(Code code) {
// shared CodeRange. When short builtin calls are enabled, there is a single // shared CodeRange. When short builtin calls are enabled, there is a single
// copy of the re-embedded builtins in the shared CodeRange, so use that if // copy of the re-embedded builtins in the shared CodeRange, so use that if
// it's present. // it's present.
if (FLAG_jitless) return EmbeddedData::FromBlob();
CodeRange* code_range = CodeRange::GetProcessWideCodeRange().get(); CodeRange* code_range = CodeRange::GetProcessWideCodeRange().get();
return (code_range && code_range->embedded_blob_code_copy() != nullptr) return (code_range && code_range->embedded_blob_code_copy() != nullptr)
? EmbeddedData::FromBlob(code_range) ? EmbeddedData::FromBlob(code_range)
......
...@@ -500,9 +500,9 @@ KNOWN_OBJECTS = { ...@@ -500,9 +500,9 @@ KNOWN_OBJECTS = {
# Lower 32 bits of first page addresses for various heap spaces. # Lower 32 bits of first page addresses for various heap spaces.
HEAP_FIRST_PAGES = { HEAP_FIRST_PAGES = {
0x08100000: "old_space", 0x080c0000: "old_space",
0x08140000: "map_space", 0x08100000: "map_space",
0x08040000: "read_only_space", 0x08000000: "read_only_space",
} }
# List of known V8 Frame Markers. # List of known V8 Frame Markers.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment