Commit 57b777ff authored by Shu-yu Guo's avatar Shu-yu Guo Committed by V8 LUCI CQ

[heap] Don't get the ForegroundTaskRunner in shared isolate

This CL removes dependence on the ForegroundTaskRunner in the shared
isolate because the shared isolate doesn't have one associated with it.

There are 3 places:

1. The memory reducer is no longer allocated on the shared isolate.

2. The WasmEngine is no longer initialized for the shared isolate, which
   never executes user code.

3. Ability to post non-nestable tasks is no longer required on the
   shared isolate. This is required for FinalizationRegistry, and
   the shared isolate never executes user code.

Bug: v8:11708
Change-Id: I4b0cf2c8dc8686ccc7b7d24e6c9e12eb4b9d03d6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3864275
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82876}
parent 247b33e9
......@@ -8690,9 +8690,9 @@ void Isolate::Initialize(Isolate* v8_isolate,
i_isolate->set_embedder_wrapper_object_index(
params.embedder_wrapper_object_index);
if (!i::V8::GetCurrentPlatform()
->GetForegroundTaskRunner(v8_isolate)
->NonNestableTasksEnabled()) {
if (!i_isolate->is_shared() && !i::V8::GetCurrentPlatform()
->GetForegroundTaskRunner(v8_isolate)
->NonNestableTasksEnabled()) {
FATAL(
"The current platform's foreground task runner does not have "
"non-nestable tasks enabled. The embedder must provide one.");
......
......@@ -3563,7 +3563,7 @@ void Isolate::Deinit() {
debug()->Unload();
#if V8_ENABLE_WEBASSEMBLY
wasm::GetWasmEngine()->DeleteCompileJobsOnIsolate(this);
if (!is_shared()) wasm::GetWasmEngine()->DeleteCompileJobsOnIsolate(this);
BackingStore::RemoveSharedWasmMemoryObjects(this);
#endif // V8_ENABLE_WEBASSEMBLY
......@@ -3669,7 +3669,7 @@ void Isolate::Deinit() {
#endif // defined(V8_OS_WIN)
#if V8_ENABLE_WEBASSEMBLY
wasm::GetWasmEngine()->RemoveIsolate(this);
if (!is_shared()) wasm::GetWasmEngine()->RemoveIsolate(this);
#endif // V8_ENABLE_WEBASSEMBLY
TearDownEmbeddedBlob();
......@@ -4274,7 +4274,7 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
#endif // V8_COMPRESS_POINTERS
#if V8_ENABLE_WEBASSEMBLY
wasm::GetWasmEngine()->AddIsolate(this);
if (!is_shared()) wasm::GetWasmEngine()->AddIsolate(this);
#endif // V8_ENABLE_WEBASSEMBLY
#if defined(V8_OS_WIN) && defined(V8_ENABLE_ETW_STACK_WALKING)
......
......@@ -1909,17 +1909,19 @@ bool Heap::CollectGarbage(AllocationSpace space,
// in-between.
size_t used_memory_after = OldGenerationSizeOfObjects();
size_t committed_memory_after = CommittedOldGenerationMemory();
MemoryReducer::Event event;
event.type = MemoryReducer::kMarkCompact;
event.time_ms = MonotonicallyIncreasingTimeInMs();
// Trigger one more GC if
// - this GC decreased committed memory,
// - there is high fragmentation,
event.next_gc_likely_to_collect_more =
(committed_memory_before > committed_memory_after + MB) ||
HasHighFragmentation(used_memory_after, committed_memory_after);
event.committed_memory = committed_memory_after;
memory_reducer_->NotifyMarkCompact(event);
if (memory_reducer_ != nullptr) {
MemoryReducer::Event event;
event.type = MemoryReducer::kMarkCompact;
event.time_ms = MonotonicallyIncreasingTimeInMs();
// Trigger one more GC if
// - this GC decreased committed memory,
// - there is high fragmentation,
event.next_gc_likely_to_collect_more =
(committed_memory_before > committed_memory_after + MB) ||
HasHighFragmentation(used_memory_after, committed_memory_after);
event.committed_memory = committed_memory_after;
memory_reducer_->NotifyMarkCompact(event);
}
if (initial_max_old_generation_size_ < max_old_generation_size() &&
used_memory_after < initial_max_old_generation_size_threshold_) {
set_max_old_generation_size(initial_max_old_generation_size_);
......@@ -1997,10 +1999,12 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
tracer()->ResetSurvivalEvents();
old_generation_size_configured_ = false;
set_old_generation_allocation_limit(initial_old_generation_size_);
MemoryReducer::Event event;
event.type = MemoryReducer::kPossibleGarbage;
event.time_ms = MonotonicallyIncreasingTimeInMs();
memory_reducer_->NotifyPossibleGarbage(event);
if (memory_reducer_ != nullptr) {
MemoryReducer::Event event;
event.type = MemoryReducer::kPossibleGarbage;
event.time_ms = MonotonicallyIncreasingTimeInMs();
memory_reducer_->NotifyPossibleGarbage(event);
}
}
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
if (!isolate()->context().is_null()) {
......@@ -2072,10 +2076,12 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
case IncrementalMarkingLimit::kFallbackForEmbedderLimit:
// This is a fallback case where no appropriate limits have been
// configured yet.
MemoryReducer::Event event;
event.type = MemoryReducer::kPossibleGarbage;
event.time_ms = MonotonicallyIncreasingTimeInMs();
memory_reducer()->NotifyPossibleGarbage(event);
if (memory_reducer() != nullptr) {
MemoryReducer::Event event;
event.type = MemoryReducer::kPossibleGarbage;
event.time_ms = MonotonicallyIncreasingTimeInMs();
memory_reducer()->NotifyPossibleGarbage(event);
}
break;
case IncrementalMarkingLimit::kNoLimit:
break;
......@@ -3780,6 +3786,7 @@ void Heap::ActivateMemoryReducerIfNeeded() {
// - there was no mark compact since the start.
// - the committed memory can be potentially reduced.
// 2 pages for the old, code, and map space + 1 page for new space.
if (memory_reducer_ == nullptr) return;
const int kMinCommittedMemory = 7 * Page::kPageSize;
if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
isolate()->IsIsolateInBackground()) {
......@@ -5114,7 +5121,7 @@ Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
return Heap::HeapGrowingMode::kConservative;
}
if (memory_reducer()->ShouldGrowHeapSlowly()) {
if (memory_reducer() != nullptr && memory_reducer()->ShouldGrowHeapSlowly()) {
return Heap::HeapGrowingMode::kSlow;
}
......@@ -5454,7 +5461,7 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info,
gc_idle_time_handler_.reset(new GCIdleTimeHandler());
stack_ = std::make_unique<::heap::base::Stack>();
memory_measurement_.reset(new MemoryMeasurement(isolate()));
memory_reducer_.reset(new MemoryReducer(this));
if (!IsShared()) memory_reducer_.reset(new MemoryReducer(this));
if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) {
live_object_stats_.reset(new ObjectStats(this));
dead_object_stats_.reset(new ObjectStats(this));
......@@ -5602,7 +5609,8 @@ void Heap::NotifyOldGenerationExpansion(AllocationSpace space,
isolate()->AddCodeMemoryChunk(chunk);
}
const size_t kMemoryReducerActivationThreshold = 1 * MB;
if (old_generation_capacity_after_bootstrap_ && ms_count_ == 0 &&
if (memory_reducer() != nullptr && old_generation_capacity_after_bootstrap_ &&
ms_count_ == 0 &&
OldGenerationCapacity() >= old_generation_capacity_after_bootstrap_ +
kMemoryReducerActivationThreshold &&
FLAG_memory_reducer_for_small_heaps) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment