Commit 6df1aec6 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap, infra] Remove --local-heaps and --concurrent-allocation flags

The flags are enabled by default and have stable coverage.
This also removes the corresponding bots.

Bug: v8:10315
Change-Id: Icce01383050dff758b6554db8e0c3589d6e5459c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2658324
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72457}
parent 5a2c53f9
...@@ -294,7 +294,6 @@ ...@@ -294,7 +294,6 @@
{'name': 'test262', 'shards': 7}, {'name': 'test262', 'shards': 7},
{'name': 'v8testing', 'shards': 3}, {'name': 'v8testing', 'shards': 3},
{'name': 'v8testing', 'variant': 'extra', 'shards': 3}, {'name': 'v8testing', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'variant': 'no_local_heaps'},
{'name': 'v8testing', 'variant': 'slow_path'}, {'name': 'v8testing', 'variant': 'slow_path'},
], ],
}, },
...@@ -447,7 +446,6 @@ ...@@ -447,7 +446,6 @@
{'name': 'test262', 'variant': 'default', 'shards': 3}, {'name': 'test262', 'variant': 'default', 'shards': 3},
{'name': 'v8testing', 'shards': 5}, {'name': 'v8testing', 'shards': 5},
{'name': 'v8testing', 'variant': 'extra', 'shards': 3}, {'name': 'v8testing', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'variant': 'no_local_heaps'},
{'name': 'v8testing', 'variant': 'slow_path'}, {'name': 'v8testing', 'variant': 'slow_path'},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation'}, {'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
], ],
...@@ -1201,7 +1199,6 @@ ...@@ -1201,7 +1199,6 @@
{'name': 'test262', 'shards': 5}, {'name': 'test262', 'shards': 5},
{'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2}, {'name': 'v8testing', 'variant': 'extra', 'shards': 2},
{'name': 'v8testing', 'variant': 'no_local_heaps', 'shards': 1},
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
], ],
}, },
...@@ -1227,7 +1224,6 @@ ...@@ -1227,7 +1224,6 @@
{'name': 'test262', 'variant': 'default', 'shards': 3}, {'name': 'test262', 'variant': 'default', 'shards': 3},
{'name': 'v8testing', 'shards': 5}, {'name': 'v8testing', 'shards': 5},
{'name': 'v8testing', 'variant': 'extra', 'shards': 3}, {'name': 'v8testing', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'variant': 'no_local_heaps', 'shards': 1},
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 1}, {'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 1},
], ],
......
...@@ -1036,19 +1036,8 @@ DEFINE_BOOL(concurrent_marking, V8_CONCURRENT_MARKING_BOOL, ...@@ -1036,19 +1036,8 @@ DEFINE_BOOL(concurrent_marking, V8_CONCURRENT_MARKING_BOOL,
"use concurrent marking") "use concurrent marking")
DEFINE_BOOL(concurrent_array_buffer_sweeping, true, DEFINE_BOOL(concurrent_array_buffer_sweeping, true,
"concurrently sweep array buffers") "concurrently sweep array buffers")
DEFINE_BOOL(concurrent_allocation, true, "concurrently allocate in old space")
DEFINE_BOOL(stress_concurrent_allocation, false, DEFINE_BOOL(stress_concurrent_allocation, false,
"start background threads that allocate memory") "start background threads that allocate memory")
DEFINE_BOOL(local_heaps, true, "allow heap access from background tasks")
// Since the local_heaps flag is enabled by default, we defined reverse
// implications to simplify disabling the flag.
DEFINE_NEG_NEG_IMPLICATION(local_heaps, turbo_direct_heap_access)
DEFINE_NEG_NEG_IMPLICATION(local_heaps, stress_concurrent_inlining)
DEFINE_NEG_NEG_IMPLICATION(local_heaps, concurrent_inlining)
DEFINE_NEG_NEG_IMPLICATION(local_heaps, concurrent_allocation)
DEFINE_NEG_NEG_IMPLICATION(concurrent_allocation,
finalize_streaming_on_background)
DEFINE_NEG_NEG_IMPLICATION(concurrent_allocation, stress_concurrent_allocation)
DEFINE_BOOL(parallel_marking, V8_CONCURRENT_MARKING_BOOL, DEFINE_BOOL(parallel_marking, V8_CONCURRENT_MARKING_BOOL,
"use parallel marking in atomic pause") "use parallel marking in atomic pause")
DEFINE_INT(ephemeron_fixpoint_iterations, 10, DEFINE_INT(ephemeron_fixpoint_iterations, 10,
......
...@@ -46,24 +46,22 @@ bool HandleBase::IsDereferenceAllowed() const { ...@@ -46,24 +46,22 @@ bool HandleBase::IsDereferenceAllowed() const {
if (isolate->IsBuiltinsTableHandleLocation(location_)) return true; if (isolate->IsBuiltinsTableHandleLocation(location_)) return true;
if (!AllowHandleDereference::IsAllowed()) return false; if (!AllowHandleDereference::IsAllowed()) return false;
if (FLAG_local_heaps) { LocalHeap* local_heap = isolate->CurrentLocalHeap();
LocalHeap* local_heap = isolate->CurrentLocalHeap();
// Local heap can't access handles when parked
if (!local_heap->IsHandleDereferenceAllowed()) {
StdoutStream{} << "Cannot dereference handle owned by "
<< "non-running local heap\n";
return false;
}
// We are pretty strict with handle dereferences on background threads: A // Local heap can't access handles when parked
// background local heap is only allowed to dereference its own local or if (!local_heap->IsHandleDereferenceAllowed()) {
// persistent handles. StdoutStream{} << "Cannot dereference handle owned by "
if (!local_heap->is_main_thread()) { << "non-running local heap\n";
// The current thread owns the handle and thus can dereference it. return false;
return local_heap->ContainsPersistentHandle(location_) || }
local_heap->ContainsLocalHandle(location_);
} // We are pretty strict with handle dereferences on background threads: A
// background local heap is only allowed to dereference its own local or
// persistent handles.
if (!local_heap->is_main_thread()) {
// The current thread owns the handle and thus can dereference it.
return local_heap->ContainsPersistentHandle(location_) ||
local_heap->ContainsLocalHandle(location_);
} }
// If LocalHeap::Current() is null, we're on the main thread -- if we were to // If LocalHeap::Current() is null, we're on the main thread -- if we were to
// check main thread HandleScopes here, we should additionally check the // check main thread HandleScopes here, we should additionally check the
......
...@@ -122,7 +122,7 @@ void PersistentHandlesList::Remove(PersistentHandles* persistent_handles) { ...@@ -122,7 +122,7 @@ void PersistentHandlesList::Remove(PersistentHandles* persistent_handles) {
} }
void PersistentHandlesList::Iterate(RootVisitor* visitor, Isolate* isolate) { void PersistentHandlesList::Iterate(RootVisitor* visitor, Isolate* isolate) {
DCHECK_IMPLIES(FLAG_local_heaps, isolate->heap()->safepoint()->IsActive()); DCHECK(isolate->heap()->safepoint()->IsActive());
base::MutexGuard guard(&persistent_handles_mutex_); base::MutexGuard guard(&persistent_handles_mutex_);
for (PersistentHandles* current = persistent_handles_head_; current; for (PersistentHandles* current = persistent_handles_head_; current;
current = current->next_) { current = current->next_) {
......
...@@ -21,8 +21,6 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int object_size, ...@@ -21,8 +21,6 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int object_size,
AllocationAlignment alignment, AllocationAlignment alignment,
AllocationOrigin origin) { AllocationOrigin origin) {
// TODO(dinfuehr): Add support for allocation observers // TODO(dinfuehr): Add support for allocation observers
CHECK(FLAG_concurrent_allocation);
#ifdef DEBUG #ifdef DEBUG
local_heap_->VerifyCurrent(); local_heap_->VerifyCurrent();
#endif #endif
......
...@@ -61,7 +61,6 @@ void StressConcurrentAllocatorTask::RunInternal() { ...@@ -61,7 +61,6 @@ void StressConcurrentAllocatorTask::RunInternal() {
// static // static
void StressConcurrentAllocatorTask::Schedule(Isolate* isolate) { void StressConcurrentAllocatorTask::Schedule(Isolate* isolate) {
CHECK(FLAG_local_heaps && FLAG_concurrent_allocation);
auto task = std::make_unique<StressConcurrentAllocatorTask>(isolate); auto task = std::make_unique<StressConcurrentAllocatorTask>(isolate);
const double kDelayInSeconds = 0.1; const double kDelayInSeconds = 0.1;
V8::GetCurrentPlatform()->CallDelayedOnWorkerThread(std::move(task), V8::GetCurrentPlatform()->CallDelayedOnWorkerThread(std::move(task),
......
...@@ -1989,7 +1989,6 @@ GCTracer::Scope::ScopeId CollectorScopeId(GarbageCollector collector) { ...@@ -1989,7 +1989,6 @@ GCTracer::Scope::ScopeId CollectorScopeId(GarbageCollector collector) {
size_t Heap::PerformGarbageCollection( size_t Heap::PerformGarbageCollection(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) { GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
DisallowJavascriptExecution no_js(isolate()); DisallowJavascriptExecution no_js(isolate());
base::Optional<SafepointScope> optional_safepoint_scope;
if (IsYoungGenerationCollector(collector)) { if (IsYoungGenerationCollector(collector)) {
CompleteSweepingYoung(collector); CompleteSweepingYoung(collector);
...@@ -2008,9 +2007,7 @@ size_t Heap::PerformGarbageCollection( ...@@ -2008,9 +2007,7 @@ size_t Heap::PerformGarbageCollection(
TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain); TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
if (FLAG_local_heaps) { SafepointScope safepoint_scope(this);
optional_safepoint_scope.emplace(this);
}
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
if (FLAG_verify_heap) { if (FLAG_verify_heap) {
...@@ -3289,7 +3286,6 @@ void Heap::MakeHeapIterable() { ...@@ -3289,7 +3286,6 @@ void Heap::MakeHeapIterable() {
} }
void Heap::MakeLocalHeapLabsIterable() { void Heap::MakeLocalHeapLabsIterable() {
if (!FLAG_local_heaps) return;
safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) { safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->MakeLinearAllocationAreaIterable(); local_heap->MakeLinearAllocationAreaIterable();
}); });
...@@ -4498,10 +4494,8 @@ void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) { ...@@ -4498,10 +4494,8 @@ void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
isolate_->handle_scope_implementer()->Iterate(v); isolate_->handle_scope_implementer()->Iterate(v);
#endif #endif
if (FLAG_local_heaps) { safepoint_->Iterate(&left_trim_visitor);
safepoint_->Iterate(&left_trim_visitor); safepoint_->Iterate(v);
safepoint_->Iterate(v);
}
isolate_->persistent_handles_list()->Iterate(&left_trim_visitor, isolate_); isolate_->persistent_handles_list()->Iterate(&left_trim_visitor, isolate_);
isolate_->persistent_handles_list()->Iterate(v, isolate_); isolate_->persistent_handles_list()->Iterate(v, isolate_);
......
...@@ -261,11 +261,9 @@ void IncrementalMarking::StartBlackAllocation() { ...@@ -261,11 +261,9 @@ void IncrementalMarking::StartBlackAllocation() {
heap()->old_space()->MarkLinearAllocationAreaBlack(); heap()->old_space()->MarkLinearAllocationAreaBlack();
heap()->map_space()->MarkLinearAllocationAreaBlack(); heap()->map_space()->MarkLinearAllocationAreaBlack();
heap()->code_space()->MarkLinearAllocationAreaBlack(); heap()->code_space()->MarkLinearAllocationAreaBlack();
if (FLAG_local_heaps) { heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) { local_heap->MarkLinearAllocationAreaBlack();
local_heap->MarkLinearAllocationAreaBlack(); });
});
}
if (FLAG_trace_incremental_marking) { if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp( heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation started\n"); "[IncrementalMarking] Black allocation started\n");
...@@ -277,11 +275,8 @@ void IncrementalMarking::PauseBlackAllocation() { ...@@ -277,11 +275,8 @@ void IncrementalMarking::PauseBlackAllocation() {
heap()->old_space()->UnmarkLinearAllocationArea(); heap()->old_space()->UnmarkLinearAllocationArea();
heap()->map_space()->UnmarkLinearAllocationArea(); heap()->map_space()->UnmarkLinearAllocationArea();
heap()->code_space()->UnmarkLinearAllocationArea(); heap()->code_space()->UnmarkLinearAllocationArea();
if (FLAG_local_heaps) { heap()->safepoint()->IterateLocalHeaps(
heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) { [](LocalHeap* local_heap) { local_heap->UnmarkLinearAllocationArea(); });
local_heap->UnmarkLinearAllocationArea();
});
}
if (FLAG_trace_incremental_marking) { if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp( heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation paused\n"); "[IncrementalMarking] Black allocation paused\n");
...@@ -600,19 +595,17 @@ void IncrementalMarking::Stop() { ...@@ -600,19 +595,17 @@ void IncrementalMarking::Stop() {
is_compacting_ = false; is_compacting_ = false;
FinishBlackAllocation(); FinishBlackAllocation();
if (FLAG_local_heaps) { // Merge live bytes counters of background threads
// Merge live bytes counters of background threads for (auto pair : background_live_bytes_) {
for (auto pair : background_live_bytes_) { MemoryChunk* memory_chunk = pair.first;
MemoryChunk* memory_chunk = pair.first; intptr_t live_bytes = pair.second;
intptr_t live_bytes = pair.second;
if (live_bytes) { if (live_bytes) {
marking_state()->IncrementLiveBytes(memory_chunk, live_bytes); marking_state()->IncrementLiveBytes(memory_chunk, live_bytes);
}
} }
background_live_bytes_.clear();
} }
background_live_bytes_.clear();
} }
......
...@@ -52,7 +52,7 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind, ...@@ -52,7 +52,7 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
marking_barrier_(new MarkingBarrier(this)), marking_barrier_(new MarkingBarrier(this)),
old_space_allocator_(this, heap->old_space()) { old_space_allocator_(this, heap->old_space()) {
heap_->safepoint()->AddLocalHeap(this, [this] { heap_->safepoint()->AddLocalHeap(this, [this] {
if (FLAG_local_heaps && !is_main_thread()) { if (!is_main_thread()) {
WriteBarrier::SetForThread(marking_barrier_.get()); WriteBarrier::SetForThread(marking_barrier_.get());
if (heap_->incremental_marking()->IsMarking()) { if (heap_->incremental_marking()->IsMarking()) {
marking_barrier_->Activate( marking_barrier_->Activate(
...@@ -75,7 +75,7 @@ LocalHeap::~LocalHeap() { ...@@ -75,7 +75,7 @@ LocalHeap::~LocalHeap() {
heap_->safepoint()->RemoveLocalHeap(this, [this] { heap_->safepoint()->RemoveLocalHeap(this, [this] {
old_space_allocator_.FreeLinearAllocationArea(); old_space_allocator_.FreeLinearAllocationArea();
if (FLAG_local_heaps && !is_main_thread()) { if (!is_main_thread()) {
marking_barrier_->Publish(); marking_barrier_->Publish();
WriteBarrier::ClearForThread(marking_barrier_.get()); WriteBarrier::ClearForThread(marking_barrier_.get());
} }
......
...@@ -858,11 +858,9 @@ void MarkCompactCollector::Prepare() { ...@@ -858,11 +858,9 @@ void MarkCompactCollector::Prepare() {
space->PrepareForMarkCompact(); space->PrepareForMarkCompact();
} }
if (FLAG_local_heaps) { // Fill and reset all background thread LABs
// Fill and reset all background thread LABs heap_->safepoint()->IterateLocalHeaps(
heap_->safepoint()->IterateLocalHeaps( [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
[](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
}
// All objects are guaranteed to be initialized in atomic pause // All objects are guaranteed to be initialized in atomic pause
heap()->new_lo_space()->ResetPendingObject(); heap()->new_lo_space()->ResetPendingObject();
......
...@@ -94,36 +94,27 @@ void MarkingBarrier::RecordRelocSlot(Code host, RelocInfo* rinfo, ...@@ -94,36 +94,27 @@ void MarkingBarrier::RecordRelocSlot(Code host, RelocInfo* rinfo,
// static // static
void MarkingBarrier::ActivateAll(Heap* heap, bool is_compacting) { void MarkingBarrier::ActivateAll(Heap* heap, bool is_compacting) {
heap->marking_barrier()->Activate(is_compacting); heap->marking_barrier()->Activate(is_compacting);
if (FLAG_local_heaps) { heap->safepoint()->IterateLocalHeaps([is_compacting](LocalHeap* local_heap) {
heap->safepoint()->IterateLocalHeaps( local_heap->marking_barrier()->Activate(is_compacting);
[is_compacting](LocalHeap* local_heap) { });
local_heap->marking_barrier()->Activate(is_compacting);
});
}
} }
// static // static
void MarkingBarrier::DeactivateAll(Heap* heap) { void MarkingBarrier::DeactivateAll(Heap* heap) {
heap->marking_barrier()->Deactivate(); heap->marking_barrier()->Deactivate();
if (FLAG_local_heaps) { heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) { local_heap->marking_barrier()->Deactivate();
local_heap->marking_barrier()->Deactivate(); });
});
}
} }
// static // static
void MarkingBarrier::PublishAll(Heap* heap) { void MarkingBarrier::PublishAll(Heap* heap) {
heap->marking_barrier()->Publish(); heap->marking_barrier()->Publish();
if (FLAG_local_heaps) { heap->safepoint()->IterateLocalHeaps(
heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) { [](LocalHeap* local_heap) { local_heap->marking_barrier()->Publish(); });
local_heap->marking_barrier()->Publish();
});
}
} }
void MarkingBarrier::Publish() { void MarkingBarrier::Publish() {
DCHECK_IMPLIES(!is_main_thread_barrier_, FLAG_local_heaps);
if (is_activated_) { if (is_activated_) {
worklist_.Publish(); worklist_.Publish();
for (auto& it : typed_slots_map_) { for (auto& it : typed_slots_map_) {
...@@ -153,7 +144,6 @@ void MarkingBarrier::DeactivateSpace(NewSpace* space) { ...@@ -153,7 +144,6 @@ void MarkingBarrier::DeactivateSpace(NewSpace* space) {
void MarkingBarrier::Deactivate() { void MarkingBarrier::Deactivate() {
is_activated_ = false; is_activated_ = false;
is_compacting_ = false; is_compacting_ = false;
DCHECK_IMPLIES(!is_main_thread_barrier_, FLAG_local_heaps);
if (is_main_thread_barrier_) { if (is_main_thread_barrier_) {
DeactivateSpace(heap_->old_space()); DeactivateSpace(heap_->old_space());
DeactivateSpace(heap_->map_space()); DeactivateSpace(heap_->map_space());
...@@ -191,7 +181,6 @@ void MarkingBarrier::ActivateSpace(NewSpace* space) { ...@@ -191,7 +181,6 @@ void MarkingBarrier::ActivateSpace(NewSpace* space) {
void MarkingBarrier::Activate(bool is_compacting) { void MarkingBarrier::Activate(bool is_compacting) {
DCHECK(!is_activated_); DCHECK(!is_activated_);
DCHECK(worklist_.IsLocalEmpty()); DCHECK(worklist_.IsLocalEmpty());
DCHECK_IMPLIES(!is_main_thread_barrier_, FLAG_local_heaps);
is_compacting_ = is_compacting; is_compacting_ = is_compacting;
is_activated_ = true; is_activated_ = true;
if (is_main_thread_barrier_) { if (is_main_thread_barrier_) {
......
...@@ -429,7 +429,7 @@ void NewSpace::ResetParkedAllocationBuffers() { ...@@ -429,7 +429,7 @@ void NewSpace::ResetParkedAllocationBuffers() {
void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); } void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
void NewSpace::Grow() { void NewSpace::Grow() {
DCHECK_IMPLIES(FLAG_local_heaps, heap()->safepoint()->IsActive()); DCHECK(heap()->safepoint()->IsActive());
// Double the semispace size but only up to maximum capacity. // Double the semispace size but only up to maximum capacity.
DCHECK(TotalCapacity() < MaximumCapacity()); DCHECK(TotalCapacity() < MaximumCapacity());
size_t new_capacity = std::min( size_t new_capacity = std::min(
......
...@@ -326,9 +326,7 @@ class V8_EXPORT_PRIVATE PagedSpace ...@@ -326,9 +326,7 @@ class V8_EXPORT_PRIVATE PagedSpace
base::Optional<base::MutexGuard> guard_; base::Optional<base::MutexGuard> guard_;
}; };
bool SupportsConcurrentAllocation() { bool SupportsConcurrentAllocation() { return !is_local_space(); }
return FLAG_concurrent_allocation && !is_local_space();
}
// Set space linear allocation area. // Set space linear allocation area.
void SetTopAndLimit(Address top, Address limit); void SetTopAndLimit(Address top, Address limit);
......
...@@ -18,8 +18,6 @@ GlobalSafepoint::GlobalSafepoint(Heap* heap) ...@@ -18,8 +18,6 @@ GlobalSafepoint::GlobalSafepoint(Heap* heap)
: heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {} : heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {}
void GlobalSafepoint::EnterSafepointScope() { void GlobalSafepoint::EnterSafepointScope() {
if (!FLAG_local_heaps) return;
if (++active_safepoint_scopes_ > 1) return; if (++active_safepoint_scopes_ > 1) return;
TimedHistogramScope timer( TimedHistogramScope timer(
...@@ -54,8 +52,6 @@ void GlobalSafepoint::EnterSafepointScope() { ...@@ -54,8 +52,6 @@ void GlobalSafepoint::EnterSafepointScope() {
} }
void GlobalSafepoint::LeaveSafepointScope() { void GlobalSafepoint::LeaveSafepointScope() {
if (!FLAG_local_heaps) return;
DCHECK_GT(active_safepoint_scopes_, 0); DCHECK_GT(active_safepoint_scopes_, 0);
if (--active_safepoint_scopes_ > 0) return; if (--active_safepoint_scopes_ > 0) return;
......
...@@ -681,14 +681,14 @@ size_t StringTable::GetCurrentMemoryUsage() const { ...@@ -681,14 +681,14 @@ size_t StringTable::GetCurrentMemoryUsage() const {
void StringTable::IterateElements(RootVisitor* visitor) { void StringTable::IterateElements(RootVisitor* visitor) {
// This should only happen during garbage collection when background threads // This should only happen during garbage collection when background threads
// are paused, so the load can be relaxed. // are paused, so the load can be relaxed.
DCHECK_IMPLIES(FLAG_local_heaps, isolate_->heap()->safepoint()->IsActive()); DCHECK(isolate_->heap()->safepoint()->IsActive());
data_.load(std::memory_order_relaxed)->IterateElements(visitor); data_.load(std::memory_order_relaxed)->IterateElements(visitor);
} }
void StringTable::DropOldData() { void StringTable::DropOldData() {
// This should only happen during garbage collection when background threads // This should only happen during garbage collection when background threads
// are paused, so the load can be relaxed. // are paused, so the load can be relaxed.
DCHECK_IMPLIES(FLAG_local_heaps, isolate_->heap()->safepoint()->IsActive()); DCHECK(isolate_->heap()->safepoint()->IsActive());
DCHECK_NE(isolate_->heap()->gc_state(), Heap::NOT_IN_GC); DCHECK_NE(isolate_->heap()->gc_state(), Heap::NOT_IN_GC);
data_.load(std::memory_order_relaxed)->DropPreviousData(); data_.load(std::memory_order_relaxed)->DropPreviousData();
} }
...@@ -696,7 +696,7 @@ void StringTable::DropOldData() { ...@@ -696,7 +696,7 @@ void StringTable::DropOldData() {
void StringTable::NotifyElementsRemoved(int count) { void StringTable::NotifyElementsRemoved(int count) {
// This should only happen during garbage collection when background threads // This should only happen during garbage collection when background threads
// are paused, so the load can be relaxed. // are paused, so the load can be relaxed.
DCHECK_IMPLIES(FLAG_local_heaps, isolate_->heap()->safepoint()->IsActive()); DCHECK(isolate_->heap()->safepoint()->IsActive());
DCHECK_NE(isolate_->heap()->gc_state(), Heap::NOT_IN_GC); DCHECK_NE(isolate_->heap()->gc_state(), Heap::NOT_IN_GC);
data_.load(std::memory_order_relaxed)->ElementsRemoved(count); data_.load(std::memory_order_relaxed)->ElementsRemoved(count);
} }
......
...@@ -22,10 +22,6 @@ SerializerTester::SerializerTester(const char* source) ...@@ -22,10 +22,6 @@ SerializerTester::SerializerTester(const char* source)
: canonical_(main_isolate()) { : canonical_(main_isolate()) {
// The tests only make sense in the context of concurrent compilation. // The tests only make sense in the context of concurrent compilation.
FLAG_concurrent_inlining = true; FLAG_concurrent_inlining = true;
// --local-heaps is enabled by default, but some bots disable it.
// Ensure that it is enabled here because we have reverse implication
// from --no-local-heaps to --no-concurrent-inlining.
if (!FLAG_local_heaps) FLAG_local_heaps = true;
// The tests don't make sense when optimizations are turned off. // The tests don't make sense when optimizations are turned off.
FLAG_opt = true; FLAG_opt = true;
// We need the IC to feed it to the serializer. // We need the IC to feed it to the serializer.
......
...@@ -243,12 +243,6 @@ bool InCorrectGeneration(HeapObject object) { ...@@ -243,12 +243,6 @@ bool InCorrectGeneration(HeapObject object) {
: i::Heap::InYoungGeneration(object); : i::Heap::InYoungGeneration(object);
} }
void EnsureFlagLocalHeapsEnabled() {
// Avoid data race in concurrent thread by only setting the flag to true if
// not already enabled.
if (!FLAG_local_heaps) FLAG_local_heaps = true;
}
void GrowNewSpace(Heap* heap) { void GrowNewSpace(Heap* heap) {
SafepointScope scope(heap); SafepointScope scope(heap);
if (!heap->new_space()->IsAtMaximumCapacity()) { if (!heap->new_space()->IsAtMaximumCapacity()) {
......
...@@ -70,8 +70,6 @@ void InvokeScavenge(Isolate* isolate = nullptr); ...@@ -70,8 +70,6 @@ void InvokeScavenge(Isolate* isolate = nullptr);
void InvokeMarkSweep(Isolate* isolate = nullptr); void InvokeMarkSweep(Isolate* isolate = nullptr);
void EnsureFlagLocalHeapsEnabled();
void GrowNewSpace(Heap* heap); void GrowNewSpace(Heap* heap);
void GrowNewSpaceToMaximumCapacity(Heap* heap); void GrowNewSpaceToMaximumCapacity(Heap* heap);
......
...@@ -83,8 +83,6 @@ class ConcurrentAllocationThread final : public v8::base::Thread { ...@@ -83,8 +83,6 @@ class ConcurrentAllocationThread final : public v8::base::Thread {
UNINITIALIZED_TEST(ConcurrentAllocationInOldSpace) { UNINITIALIZED_TEST(ConcurrentAllocationInOldSpace) {
FLAG_max_old_space_size = 32; FLAG_max_old_space_size = 32;
FLAG_concurrent_allocation = true;
FLAG_local_heaps = true;
FLAG_stress_concurrent_allocation = false; FLAG_stress_concurrent_allocation = false;
v8::Isolate::CreateParams create_params; v8::Isolate::CreateParams create_params;
...@@ -118,8 +116,6 @@ UNINITIALIZED_TEST(ConcurrentAllocationInOldSpace) { ...@@ -118,8 +116,6 @@ UNINITIALIZED_TEST(ConcurrentAllocationInOldSpace) {
UNINITIALIZED_TEST(ConcurrentAllocationInOldSpaceFromMainThread) { UNINITIALIZED_TEST(ConcurrentAllocationInOldSpaceFromMainThread) {
FLAG_max_old_space_size = 4; FLAG_max_old_space_size = 4;
FLAG_concurrent_allocation = true;
FLAG_local_heaps = true;
FLAG_stress_concurrent_allocation = false; FLAG_stress_concurrent_allocation = false;
v8::Isolate::CreateParams create_params; v8::Isolate::CreateParams create_params;
...@@ -167,8 +163,6 @@ class LargeObjectConcurrentAllocationThread final : public v8::base::Thread { ...@@ -167,8 +163,6 @@ class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
UNINITIALIZED_TEST(ConcurrentAllocationInLargeSpace) { UNINITIALIZED_TEST(ConcurrentAllocationInLargeSpace) {
FLAG_max_old_space_size = 32; FLAG_max_old_space_size = 32;
FLAG_concurrent_allocation = true;
FLAG_local_heaps = true;
FLAG_stress_concurrent_allocation = false; FLAG_stress_concurrent_allocation = false;
v8::Isolate::CreateParams create_params; v8::Isolate::CreateParams create_params;
...@@ -243,9 +237,6 @@ class ConcurrentBlackAllocationThread final : public v8::base::Thread { ...@@ -243,9 +237,6 @@ class ConcurrentBlackAllocationThread final : public v8::base::Thread {
}; };
UNINITIALIZED_TEST(ConcurrentBlackAllocation) { UNINITIALIZED_TEST(ConcurrentBlackAllocation) {
FLAG_concurrent_allocation = true;
FLAG_local_heaps = true;
v8::Isolate::CreateParams create_params; v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params); v8::Isolate* isolate = v8::Isolate::New(create_params);
...@@ -310,8 +301,6 @@ UNINITIALIZED_TEST(ConcurrentWriteBarrier) { ...@@ -310,8 +301,6 @@ UNINITIALIZED_TEST(ConcurrentWriteBarrier) {
return; return;
} }
ManualGCScope manual_gc_scope; ManualGCScope manual_gc_scope;
FLAG_concurrent_allocation = true;
FLAG_local_heaps = true;
v8::Isolate::CreateParams create_params; v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
...@@ -377,8 +366,6 @@ UNINITIALIZED_TEST(ConcurrentRecordRelocSlot) { ...@@ -377,8 +366,6 @@ UNINITIALIZED_TEST(ConcurrentRecordRelocSlot) {
} }
FLAG_manual_evacuation_candidates_selection = true; FLAG_manual_evacuation_candidates_selection = true;
ManualGCScope manual_gc_scope; ManualGCScope manual_gc_scope;
FLAG_concurrent_allocation = true;
FLAG_local_heaps = true;
v8::Isolate::CreateParams create_params; v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
......
...@@ -7116,7 +7116,6 @@ TEST(Regress978156) { ...@@ -7116,7 +7116,6 @@ TEST(Regress978156) {
} }
TEST(GarbageCollectionWithLocalHeap) { TEST(GarbageCollectionWithLocalHeap) {
EnsureFlagLocalHeapsEnabled();
ManualGCScope manual_gc_scope; ManualGCScope manual_gc_scope;
CcTest::InitializeVM(); CcTest::InitializeVM();
......
...@@ -70,7 +70,6 @@ class ConcurrentSearchThread final : public v8::base::Thread { ...@@ -70,7 +70,6 @@ class ConcurrentSearchThread final : public v8::base::Thread {
// Uses linear search on a flat object, with up to 8 elements. // Uses linear search on a flat object, with up to 8 elements.
TEST(LinearSearchFlatObject) { TEST(LinearSearchFlatObject) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
...@@ -122,7 +121,6 @@ TEST(LinearSearchFlatObject) { ...@@ -122,7 +121,6 @@ TEST(LinearSearchFlatObject) {
// Uses linear search on a flat object, which has more than 8 elements. // Uses linear search on a flat object, which has more than 8 elements.
TEST(LinearSearchFlatObject_ManyElements) { TEST(LinearSearchFlatObject_ManyElements) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
......
...@@ -156,7 +156,6 @@ static void CheckedWait(base::Semaphore& semaphore) { ...@@ -156,7 +156,6 @@ static void CheckedWait(base::Semaphore& semaphore) {
// Verify that a LoadIC can be cycled through different states and safely // Verify that a LoadIC can be cycled through different states and safely
// read on a background thread. // read on a background thread.
TEST(CheckLoadICStates) { TEST(CheckLoadICStates) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM(); CcTest::InitializeVM();
FLAG_lazy_feedback_allocation = false; FLAG_lazy_feedback_allocation = false;
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
......
...@@ -68,7 +68,6 @@ class ConcurrentSearchThread final : public v8::base::Thread { ...@@ -68,7 +68,6 @@ class ConcurrentSearchThread final : public v8::base::Thread {
// Test to search on a background thread, while the main thread is idle. // Test to search on a background thread, while the main thread is idle.
TEST(ProtoWalkBackground) { TEST(ProtoWalkBackground) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
...@@ -108,7 +107,6 @@ TEST(ProtoWalkBackground) { ...@@ -108,7 +107,6 @@ TEST(ProtoWalkBackground) {
// Test to search on a background thread, while the main thread modifies the // Test to search on a background thread, while the main thread modifies the
// descriptor array. // descriptor array.
TEST(ProtoWalkBackground_DescriptorArrayWrite) { TEST(ProtoWalkBackground_DescriptorArrayWrite) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
...@@ -155,7 +153,6 @@ TEST(ProtoWalkBackground_DescriptorArrayWrite) { ...@@ -155,7 +153,6 @@ TEST(ProtoWalkBackground_DescriptorArrayWrite) {
} }
TEST(ProtoWalkBackground_PrototypeChainWrite) { TEST(ProtoWalkBackground_PrototypeChainWrite) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
......
...@@ -100,7 +100,6 @@ class ConcurrentStringThread final : public v8::base::Thread { ...@@ -100,7 +100,6 @@ class ConcurrentStringThread final : public v8::base::Thread {
// Inspect a one byte string, while the main thread externalizes it. // Inspect a one byte string, while the main thread externalizes it.
TEST(InspectOneByteExternalizing) { TEST(InspectOneByteExternalizing) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
...@@ -146,7 +145,6 @@ TEST(InspectOneByteExternalizing) { ...@@ -146,7 +145,6 @@ TEST(InspectOneByteExternalizing) {
// Inspect a one byte string, while the main thread externalizes it into a two // Inspect a one byte string, while the main thread externalizes it into a two
// bytes string. // bytes string.
TEST(InspectOneIntoTwoByteExternalizing) { TEST(InspectOneIntoTwoByteExternalizing) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
...@@ -189,7 +187,6 @@ TEST(InspectOneIntoTwoByteExternalizing) { ...@@ -189,7 +187,6 @@ TEST(InspectOneIntoTwoByteExternalizing) {
// Inspect a two byte string, while the main thread externalizes it. // Inspect a two byte string, while the main thread externalizes it.
TEST(InspectTwoByteExternalizing) { TEST(InspectTwoByteExternalizing) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
......
...@@ -72,7 +72,6 @@ class LocalHandlesThread final : public v8::base::Thread { ...@@ -72,7 +72,6 @@ class LocalHandlesThread final : public v8::base::Thread {
}; };
TEST(CreateLocalHandles) { TEST(CreateLocalHandles) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
...@@ -100,7 +99,6 @@ TEST(CreateLocalHandles) { ...@@ -100,7 +99,6 @@ TEST(CreateLocalHandles) {
} }
TEST(CreateLocalHandlesWithoutLocalHandleScope) { TEST(CreateLocalHandlesWithoutLocalHandleScope) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
HandleScope handle_scope(isolate); HandleScope handle_scope(isolate);
...@@ -109,7 +107,6 @@ TEST(CreateLocalHandlesWithoutLocalHandleScope) { ...@@ -109,7 +107,6 @@ TEST(CreateLocalHandlesWithoutLocalHandleScope) {
} }
TEST(DereferenceLocalHandle) { TEST(DereferenceLocalHandle) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
...@@ -133,7 +130,6 @@ TEST(DereferenceLocalHandle) { ...@@ -133,7 +130,6 @@ TEST(DereferenceLocalHandle) {
} }
TEST(DereferenceLocalHandleFailsWhenDisallowed) { TEST(DereferenceLocalHandleFailsWhenDisallowed) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
......
...@@ -83,7 +83,6 @@ class PersistentHandlesThread final : public v8::base::Thread { ...@@ -83,7 +83,6 @@ class PersistentHandlesThread final : public v8::base::Thread {
}; };
TEST(CreatePersistentHandles) { TEST(CreatePersistentHandles) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
...@@ -119,7 +118,6 @@ TEST(CreatePersistentHandles) { ...@@ -119,7 +118,6 @@ TEST(CreatePersistentHandles) {
} }
TEST(DereferencePersistentHandle) { TEST(DereferencePersistentHandle) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
...@@ -139,7 +137,6 @@ TEST(DereferencePersistentHandle) { ...@@ -139,7 +137,6 @@ TEST(DereferencePersistentHandle) {
} }
TEST(DereferencePersistentHandleFailsWhenDisallowed) { TEST(DereferencePersistentHandleFailsWhenDisallowed) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM(); CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
......
...@@ -63,7 +63,6 @@ class LocalFactoryTest : public TestWithIsolateAndZone { ...@@ -63,7 +63,6 @@ class LocalFactoryTest : public TestWithIsolateAndZone {
REPLMode::kNo), REPLMode::kNo),
&state_), &state_),
local_isolate_(isolate()->main_thread_local_isolate()) { local_isolate_(isolate()->main_thread_local_isolate()) {
FLAG_concurrent_allocation = true;
} }
FunctionLiteral* ParseProgram(const char* source) { FunctionLiteral* ParseProgram(const char* source) {
......
...@@ -15,16 +15,9 @@ ...@@ -15,16 +15,9 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
void EnsureFlagLocalHeapsEnabled() {
// Avoid data race in concurrent thread by only setting the flag to true if
// not already enabled.
if (!FLAG_local_heaps) FLAG_local_heaps = true;
}
using SafepointTest = TestWithIsolate; using SafepointTest = TestWithIsolate;
TEST_F(SafepointTest, ReachSafepointWithoutLocalHeaps) { TEST_F(SafepointTest, ReachSafepointWithoutLocalHeaps) {
EnsureFlagLocalHeapsEnabled();
Heap* heap = i_isolate()->heap(); Heap* heap = i_isolate()->heap();
bool run = false; bool run = false;
{ {
...@@ -54,7 +47,6 @@ class ParkedThread final : public v8::base::Thread { ...@@ -54,7 +47,6 @@ class ParkedThread final : public v8::base::Thread {
}; };
TEST_F(SafepointTest, StopParkedThreads) { TEST_F(SafepointTest, StopParkedThreads) {
EnsureFlagLocalHeapsEnabled();
Heap* heap = i_isolate()->heap(); Heap* heap = i_isolate()->heap();
int safepoints = 0; int safepoints = 0;
...@@ -114,7 +106,6 @@ class RunningThread final : public v8::base::Thread { ...@@ -114,7 +106,6 @@ class RunningThread final : public v8::base::Thread {
}; };
TEST_F(SafepointTest, StopRunningThreads) { TEST_F(SafepointTest, StopRunningThreads) {
EnsureFlagLocalHeapsEnabled();
Heap* heap = i_isolate()->heap(); Heap* heap = i_isolate()->heap();
const int kThreads = 10; const int kThreads = 10;
......
...@@ -16,10 +16,6 @@ ALL_VARIANT_FLAGS = { ...@@ -16,10 +16,6 @@ ALL_VARIANT_FLAGS = {
"jitless": [["--jitless"]], "jitless": [["--jitless"]],
"minor_mc": [["--minor-mc"]], "minor_mc": [["--minor-mc"]],
"no_lfa": [["--no-lazy-feedback-allocation"]], "no_lfa": [["--no-lazy-feedback-allocation"]],
"no_local_heaps": [[
"--no-local-heaps",
"--no-turbo-direct-heap-access",
"--no-finalize-streaming-on-background"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall # No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see # would not force optimization too. It turns into a Nop. Please see
# https://chromium-review.googlesource.com/c/452620/ for more discussion. # https://chromium-review.googlesource.com/c/452620/ for more discussion.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment