Commit aca0c208 authored by Shu-yu Guo's avatar Shu-yu Guo Committed by V8 LUCI CQ

Reland "[heap] Tie process-wide CodeRange lifetime to any remaining Heaps"

This is a reland of 1532f8ff

Changes since revert:
 - Fix race in initialization

Original change's description:
> [heap] Tie process-wide CodeRange lifetime to any remaining Heaps
>
> Currently the process-wide CodeRange, once created, lives until process
> shutdown. This CL changes it to be alive as long as there is a Heap,
> when the last Heap is gone it gets destroyed and will be recreated the
> next time a Heap is created. This behavior is shared with
> SingleCopyReadOnlyArtifacts.
>
> Bug: v8:11929
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2989103
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Commit-Queue: Shu-yu Guo <syg@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#75522}

Bug: v8:11929
Change-Id: If250d8901044bcba1f7d7f797b398c29cc2c5a61
Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3003910
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75616}
parent dfa9fbc5
...@@ -14,8 +14,15 @@ namespace internal { ...@@ -14,8 +14,15 @@ namespace internal {
namespace { namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(std::shared_ptr<CodeRange>, // Mutex for creating process_wide_code_range_.
GetProcessWideCodeRangeCage) base::LazyMutex process_wide_code_range_creation_mutex_ =
LAZY_MUTEX_INITIALIZER;
// Weak pointer holding the process-wide CodeRange, if one has been created. All
// Heaps hold a std::shared_ptr to this, so this is destroyed when no Heaps
// remain.
base::LazyInstance<std::weak_ptr<CodeRange>>::type process_wide_code_range_ =
LAZY_INSTANCE_INITIALIZER;
DEFINE_LAZY_LEAKY_OBJECT_GETTER(CodeRangeAddressHint, GetCodeRangeAddressHint) DEFINE_LAZY_LEAKY_OBJECT_GETTER(CodeRangeAddressHint, GetCodeRangeAddressHint)
...@@ -153,19 +160,24 @@ uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate, ...@@ -153,19 +160,24 @@ uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate,
} }
// static // static
void CodeRange::InitializeProcessWideCodeRangeOnce( std::shared_ptr<CodeRange> CodeRange::EnsureProcessWideCodeRange(
v8::PageAllocator* page_allocator, size_t requested_size) { v8::PageAllocator* page_allocator, size_t requested_size) {
*GetProcessWideCodeRangeCage() = std::make_shared<CodeRange>(); base::MutexGuard guard(process_wide_code_range_creation_mutex_.Pointer());
if (!GetProcessWideCodeRange()->InitReservation(page_allocator, std::shared_ptr<CodeRange> code_range = process_wide_code_range_.Get().lock();
requested_size)) { if (!code_range) {
code_range = std::make_shared<CodeRange>();
if (!code_range->InitReservation(page_allocator, requested_size)) {
V8::FatalProcessOutOfMemory( V8::FatalProcessOutOfMemory(
nullptr, "Failed to reserve virtual memory for CodeRange"); nullptr, "Failed to reserve virtual memory for CodeRange");
} }
*process_wide_code_range_.Pointer() = code_range;
}
return code_range;
} }
// static // static
std::shared_ptr<CodeRange> CodeRange::GetProcessWideCodeRange() { std::shared_ptr<CodeRange> CodeRange::GetProcessWideCodeRange() {
return *GetProcessWideCodeRangeCage(); return process_wide_code_range_.Get().lock();
} }
} // namespace internal } // namespace internal
......
...@@ -120,7 +120,7 @@ class CodeRange final : public VirtualMemoryCage { ...@@ -120,7 +120,7 @@ class CodeRange final : public VirtualMemoryCage {
const uint8_t* embedded_blob_code, const uint8_t* embedded_blob_code,
size_t embedded_blob_code_size); size_t embedded_blob_code_size);
static void InitializeProcessWideCodeRangeOnce( static std::shared_ptr<CodeRange> EnsureProcessWideCodeRange(
v8::PageAllocator* page_allocator, size_t requested_size); v8::PageAllocator* page_allocator, size_t requested_size);
// If InitializeProcessWideCodeRangeOnce has been called, returns the // If InitializeProcessWideCodeRangeOnce has been called, returns the
......
...@@ -5429,10 +5429,6 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath( ...@@ -5429,10 +5429,6 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
FatalProcessOutOfMemory("CALL_AND_RETRY_LAST"); FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
} }
namespace {
V8_DECLARE_ONCE(initialize_shared_code_range_once);
} // namespace
void Heap::SetUp() { void Heap::SetUp() {
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
allocation_timeout_ = NextAllocationTimeout(); allocation_timeout_ = NextAllocationTimeout();
...@@ -5465,10 +5461,8 @@ void Heap::SetUp() { ...@@ -5465,10 +5461,8 @@ void Heap::SetUp() {
// When sharing a pointer cage among Isolates, also share the // When sharing a pointer cage among Isolates, also share the
// CodeRange. isolate_->page_allocator() is the process-wide pointer // CodeRange. isolate_->page_allocator() is the process-wide pointer
// compression cage's PageAllocator. // compression cage's PageAllocator.
base::CallOnce(&initialize_shared_code_range_once, code_range_ = CodeRange::EnsureProcessWideCodeRange(
&CodeRange::InitializeProcessWideCodeRangeOnce,
isolate_->page_allocator(), requested_size); isolate_->page_allocator(), requested_size);
code_range_ = CodeRange::GetProcessWideCodeRange();
} else { } else {
code_range_ = std::make_shared<CodeRange>(); code_range_ = std::make_shared<CodeRange>();
if (!code_range_->InitReservation(isolate_->page_allocator(), if (!code_range_->InitReservation(isolate_->page_allocator(),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment