Commit 6df1aec6 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap, infra] Remove --local-heaps and --concurrent-allocation flags

The flags are enabled by default and have stable coverage.
This also removes the corresponding bots.

Bug: v8:10315
Change-Id: Icce01383050dff758b6554db8e0c3589d6e5459c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2658324
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72457}
parent 5a2c53f9
......@@ -294,7 +294,6 @@
{'name': 'test262', 'shards': 7},
{'name': 'v8testing', 'shards': 3},
{'name': 'v8testing', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'variant': 'no_local_heaps'},
{'name': 'v8testing', 'variant': 'slow_path'},
],
},
......@@ -447,7 +446,6 @@
{'name': 'test262', 'variant': 'default', 'shards': 3},
{'name': 'v8testing', 'shards': 5},
{'name': 'v8testing', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'variant': 'no_local_heaps'},
{'name': 'v8testing', 'variant': 'slow_path'},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
],
......@@ -1201,7 +1199,6 @@
{'name': 'test262', 'shards': 5},
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
{'name': 'v8testing', 'variant': 'no_local_heaps', 'shards': 1},
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
],
},
......@@ -1227,7 +1224,6 @@
{'name': 'test262', 'variant': 'default', 'shards': 3},
{'name': 'v8testing', 'shards': 5},
{'name': 'v8testing', 'variant': 'extra', 'shards': 3},
{'name': 'v8testing', 'variant': 'no_local_heaps', 'shards': 1},
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 1},
],
......
......@@ -1036,19 +1036,8 @@ DEFINE_BOOL(concurrent_marking, V8_CONCURRENT_MARKING_BOOL,
"use concurrent marking")
DEFINE_BOOL(concurrent_array_buffer_sweeping, true,
"concurrently sweep array buffers")
DEFINE_BOOL(concurrent_allocation, true, "concurrently allocate in old space")
DEFINE_BOOL(stress_concurrent_allocation, false,
"start background threads that allocate memory")
DEFINE_BOOL(local_heaps, true, "allow heap access from background tasks")
// Since the local_heaps flag is enabled by default, we defined reverse
// implications to simplify disabling the flag.
DEFINE_NEG_NEG_IMPLICATION(local_heaps, turbo_direct_heap_access)
DEFINE_NEG_NEG_IMPLICATION(local_heaps, stress_concurrent_inlining)
DEFINE_NEG_NEG_IMPLICATION(local_heaps, concurrent_inlining)
DEFINE_NEG_NEG_IMPLICATION(local_heaps, concurrent_allocation)
DEFINE_NEG_NEG_IMPLICATION(concurrent_allocation,
finalize_streaming_on_background)
DEFINE_NEG_NEG_IMPLICATION(concurrent_allocation, stress_concurrent_allocation)
DEFINE_BOOL(parallel_marking, V8_CONCURRENT_MARKING_BOOL,
"use parallel marking in atomic pause")
DEFINE_INT(ephemeron_fixpoint_iterations, 10,
......
......@@ -46,24 +46,22 @@ bool HandleBase::IsDereferenceAllowed() const {
if (isolate->IsBuiltinsTableHandleLocation(location_)) return true;
if (!AllowHandleDereference::IsAllowed()) return false;
if (FLAG_local_heaps) {
LocalHeap* local_heap = isolate->CurrentLocalHeap();
// Local heap can't access handles when parked
if (!local_heap->IsHandleDereferenceAllowed()) {
StdoutStream{} << "Cannot dereference handle owned by "
<< "non-running local heap\n";
return false;
}
LocalHeap* local_heap = isolate->CurrentLocalHeap();
// We are pretty strict with handle dereferences on background threads: A
// background local heap is only allowed to dereference its own local or
// persistent handles.
if (!local_heap->is_main_thread()) {
// The current thread owns the handle and thus can dereference it.
return local_heap->ContainsPersistentHandle(location_) ||
local_heap->ContainsLocalHandle(location_);
}
// Local heap can't access handles when parked
if (!local_heap->IsHandleDereferenceAllowed()) {
StdoutStream{} << "Cannot dereference handle owned by "
<< "non-running local heap\n";
return false;
}
// We are pretty strict with handle dereferences on background threads: A
// background local heap is only allowed to dereference its own local or
// persistent handles.
if (!local_heap->is_main_thread()) {
// The current thread owns the handle and thus can dereference it.
return local_heap->ContainsPersistentHandle(location_) ||
local_heap->ContainsLocalHandle(location_);
}
// If LocalHeap::Current() is null, we're on the main thread -- if we were to
// check main thread HandleScopes here, we should additionally check the
......
......@@ -122,7 +122,7 @@ void PersistentHandlesList::Remove(PersistentHandles* persistent_handles) {
}
void PersistentHandlesList::Iterate(RootVisitor* visitor, Isolate* isolate) {
DCHECK_IMPLIES(FLAG_local_heaps, isolate->heap()->safepoint()->IsActive());
DCHECK(isolate->heap()->safepoint()->IsActive());
base::MutexGuard guard(&persistent_handles_mutex_);
for (PersistentHandles* current = persistent_handles_head_; current;
current = current->next_) {
......
......@@ -21,8 +21,6 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int object_size,
AllocationAlignment alignment,
AllocationOrigin origin) {
// TODO(dinfuehr): Add support for allocation observers
CHECK(FLAG_concurrent_allocation);
#ifdef DEBUG
local_heap_->VerifyCurrent();
#endif
......
......@@ -61,7 +61,6 @@ void StressConcurrentAllocatorTask::RunInternal() {
// static
void StressConcurrentAllocatorTask::Schedule(Isolate* isolate) {
CHECK(FLAG_local_heaps && FLAG_concurrent_allocation);
auto task = std::make_unique<StressConcurrentAllocatorTask>(isolate);
const double kDelayInSeconds = 0.1;
V8::GetCurrentPlatform()->CallDelayedOnWorkerThread(std::move(task),
......
......@@ -1989,7 +1989,6 @@ GCTracer::Scope::ScopeId CollectorScopeId(GarbageCollector collector) {
size_t Heap::PerformGarbageCollection(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
DisallowJavascriptExecution no_js(isolate());
base::Optional<SafepointScope> optional_safepoint_scope;
if (IsYoungGenerationCollector(collector)) {
CompleteSweepingYoung(collector);
......@@ -2008,9 +2007,7 @@ size_t Heap::PerformGarbageCollection(
TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
if (FLAG_local_heaps) {
optional_safepoint_scope.emplace(this);
}
SafepointScope safepoint_scope(this);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
......@@ -3289,7 +3286,6 @@ void Heap::MakeHeapIterable() {
}
void Heap::MakeLocalHeapLabsIterable() {
if (!FLAG_local_heaps) return;
safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->MakeLinearAllocationAreaIterable();
});
......@@ -4498,10 +4494,8 @@ void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
isolate_->handle_scope_implementer()->Iterate(v);
#endif
if (FLAG_local_heaps) {
safepoint_->Iterate(&left_trim_visitor);
safepoint_->Iterate(v);
}
safepoint_->Iterate(&left_trim_visitor);
safepoint_->Iterate(v);
isolate_->persistent_handles_list()->Iterate(&left_trim_visitor, isolate_);
isolate_->persistent_handles_list()->Iterate(v, isolate_);
......
......@@ -261,11 +261,9 @@ void IncrementalMarking::StartBlackAllocation() {
heap()->old_space()->MarkLinearAllocationAreaBlack();
heap()->map_space()->MarkLinearAllocationAreaBlack();
heap()->code_space()->MarkLinearAllocationAreaBlack();
if (FLAG_local_heaps) {
heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->MarkLinearAllocationAreaBlack();
});
}
heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->MarkLinearAllocationAreaBlack();
});
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation started\n");
......@@ -277,11 +275,8 @@ void IncrementalMarking::PauseBlackAllocation() {
heap()->old_space()->UnmarkLinearAllocationArea();
heap()->map_space()->UnmarkLinearAllocationArea();
heap()->code_space()->UnmarkLinearAllocationArea();
if (FLAG_local_heaps) {
heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->UnmarkLinearAllocationArea();
});
}
heap()->safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->UnmarkLinearAllocationArea(); });
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation paused\n");
......@@ -600,19 +595,17 @@ void IncrementalMarking::Stop() {
is_compacting_ = false;
FinishBlackAllocation();
if (FLAG_local_heaps) {
// Merge live bytes counters of background threads
for (auto pair : background_live_bytes_) {
MemoryChunk* memory_chunk = pair.first;
intptr_t live_bytes = pair.second;
// Merge live bytes counters of background threads
for (auto pair : background_live_bytes_) {
MemoryChunk* memory_chunk = pair.first;
intptr_t live_bytes = pair.second;
if (live_bytes) {
marking_state()->IncrementLiveBytes(memory_chunk, live_bytes);
}
if (live_bytes) {
marking_state()->IncrementLiveBytes(memory_chunk, live_bytes);
}
background_live_bytes_.clear();
}
background_live_bytes_.clear();
}
......
......@@ -52,7 +52,7 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
marking_barrier_(new MarkingBarrier(this)),
old_space_allocator_(this, heap->old_space()) {
heap_->safepoint()->AddLocalHeap(this, [this] {
if (FLAG_local_heaps && !is_main_thread()) {
if (!is_main_thread()) {
WriteBarrier::SetForThread(marking_barrier_.get());
if (heap_->incremental_marking()->IsMarking()) {
marking_barrier_->Activate(
......@@ -75,7 +75,7 @@ LocalHeap::~LocalHeap() {
heap_->safepoint()->RemoveLocalHeap(this, [this] {
old_space_allocator_.FreeLinearAllocationArea();
if (FLAG_local_heaps && !is_main_thread()) {
if (!is_main_thread()) {
marking_barrier_->Publish();
WriteBarrier::ClearForThread(marking_barrier_.get());
}
......
......@@ -858,11 +858,9 @@ void MarkCompactCollector::Prepare() {
space->PrepareForMarkCompact();
}
if (FLAG_local_heaps) {
// Fill and reset all background thread LABs
heap_->safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
}
// Fill and reset all background thread LABs
heap_->safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
// All objects are guaranteed to be initialized in atomic pause
heap()->new_lo_space()->ResetPendingObject();
......
......@@ -94,36 +94,27 @@ void MarkingBarrier::RecordRelocSlot(Code host, RelocInfo* rinfo,
// static
void MarkingBarrier::ActivateAll(Heap* heap, bool is_compacting) {
heap->marking_barrier()->Activate(is_compacting);
if (FLAG_local_heaps) {
heap->safepoint()->IterateLocalHeaps(
[is_compacting](LocalHeap* local_heap) {
local_heap->marking_barrier()->Activate(is_compacting);
});
}
heap->safepoint()->IterateLocalHeaps([is_compacting](LocalHeap* local_heap) {
local_heap->marking_barrier()->Activate(is_compacting);
});
}
// static
void MarkingBarrier::DeactivateAll(Heap* heap) {
heap->marking_barrier()->Deactivate();
if (FLAG_local_heaps) {
heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->marking_barrier()->Deactivate();
});
}
heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->marking_barrier()->Deactivate();
});
}
// static
void MarkingBarrier::PublishAll(Heap* heap) {
heap->marking_barrier()->Publish();
if (FLAG_local_heaps) {
heap->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->marking_barrier()->Publish();
});
}
heap->safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->marking_barrier()->Publish(); });
}
void MarkingBarrier::Publish() {
DCHECK_IMPLIES(!is_main_thread_barrier_, FLAG_local_heaps);
if (is_activated_) {
worklist_.Publish();
for (auto& it : typed_slots_map_) {
......@@ -153,7 +144,6 @@ void MarkingBarrier::DeactivateSpace(NewSpace* space) {
void MarkingBarrier::Deactivate() {
is_activated_ = false;
is_compacting_ = false;
DCHECK_IMPLIES(!is_main_thread_barrier_, FLAG_local_heaps);
if (is_main_thread_barrier_) {
DeactivateSpace(heap_->old_space());
DeactivateSpace(heap_->map_space());
......@@ -191,7 +181,6 @@ void MarkingBarrier::ActivateSpace(NewSpace* space) {
void MarkingBarrier::Activate(bool is_compacting) {
DCHECK(!is_activated_);
DCHECK(worklist_.IsLocalEmpty());
DCHECK_IMPLIES(!is_main_thread_barrier_, FLAG_local_heaps);
is_compacting_ = is_compacting;
is_activated_ = true;
if (is_main_thread_barrier_) {
......
......@@ -429,7 +429,7 @@ void NewSpace::ResetParkedAllocationBuffers() {
void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
void NewSpace::Grow() {
DCHECK_IMPLIES(FLAG_local_heaps, heap()->safepoint()->IsActive());
DCHECK(heap()->safepoint()->IsActive());
// Double the semispace size but only up to maximum capacity.
DCHECK(TotalCapacity() < MaximumCapacity());
size_t new_capacity = std::min(
......
......@@ -326,9 +326,7 @@ class V8_EXPORT_PRIVATE PagedSpace
base::Optional<base::MutexGuard> guard_;
};
bool SupportsConcurrentAllocation() {
return FLAG_concurrent_allocation && !is_local_space();
}
bool SupportsConcurrentAllocation() { return !is_local_space(); }
// Set space linear allocation area.
void SetTopAndLimit(Address top, Address limit);
......
......@@ -18,8 +18,6 @@ GlobalSafepoint::GlobalSafepoint(Heap* heap)
: heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {}
void GlobalSafepoint::EnterSafepointScope() {
if (!FLAG_local_heaps) return;
if (++active_safepoint_scopes_ > 1) return;
TimedHistogramScope timer(
......@@ -54,8 +52,6 @@ void GlobalSafepoint::EnterSafepointScope() {
}
void GlobalSafepoint::LeaveSafepointScope() {
if (!FLAG_local_heaps) return;
DCHECK_GT(active_safepoint_scopes_, 0);
if (--active_safepoint_scopes_ > 0) return;
......
......@@ -681,14 +681,14 @@ size_t StringTable::GetCurrentMemoryUsage() const {
void StringTable::IterateElements(RootVisitor* visitor) {
// This should only happen during garbage collection when background threads
// are paused, so the load can be relaxed.
DCHECK_IMPLIES(FLAG_local_heaps, isolate_->heap()->safepoint()->IsActive());
DCHECK(isolate_->heap()->safepoint()->IsActive());
data_.load(std::memory_order_relaxed)->IterateElements(visitor);
}
void StringTable::DropOldData() {
// This should only happen during garbage collection when background threads
// are paused, so the load can be relaxed.
DCHECK_IMPLIES(FLAG_local_heaps, isolate_->heap()->safepoint()->IsActive());
DCHECK(isolate_->heap()->safepoint()->IsActive());
DCHECK_NE(isolate_->heap()->gc_state(), Heap::NOT_IN_GC);
data_.load(std::memory_order_relaxed)->DropPreviousData();
}
......@@ -696,7 +696,7 @@ void StringTable::DropOldData() {
void StringTable::NotifyElementsRemoved(int count) {
// This should only happen during garbage collection when background threads
// are paused, so the load can be relaxed.
DCHECK_IMPLIES(FLAG_local_heaps, isolate_->heap()->safepoint()->IsActive());
DCHECK(isolate_->heap()->safepoint()->IsActive());
DCHECK_NE(isolate_->heap()->gc_state(), Heap::NOT_IN_GC);
data_.load(std::memory_order_relaxed)->ElementsRemoved(count);
}
......
......@@ -22,10 +22,6 @@ SerializerTester::SerializerTester(const char* source)
: canonical_(main_isolate()) {
// The tests only make sense in the context of concurrent compilation.
FLAG_concurrent_inlining = true;
// --local-heaps is enabled by default, but some bots disable it.
// Ensure that it is enabled here because we have reverse implication
// from --no-local-heaps to --no-concurrent-inlining.
if (!FLAG_local_heaps) FLAG_local_heaps = true;
// The tests don't make sense when optimizations are turned off.
FLAG_opt = true;
// We need the IC to feed it to the serializer.
......
......@@ -243,12 +243,6 @@ bool InCorrectGeneration(HeapObject object) {
: i::Heap::InYoungGeneration(object);
}
void EnsureFlagLocalHeapsEnabled() {
// Avoid data race in concurrent thread by only setting the flag to true if
// not already enabled.
if (!FLAG_local_heaps) FLAG_local_heaps = true;
}
void GrowNewSpace(Heap* heap) {
SafepointScope scope(heap);
if (!heap->new_space()->IsAtMaximumCapacity()) {
......
......@@ -70,8 +70,6 @@ void InvokeScavenge(Isolate* isolate = nullptr);
void InvokeMarkSweep(Isolate* isolate = nullptr);
void EnsureFlagLocalHeapsEnabled();
void GrowNewSpace(Heap* heap);
void GrowNewSpaceToMaximumCapacity(Heap* heap);
......
......@@ -83,8 +83,6 @@ class ConcurrentAllocationThread final : public v8::base::Thread {
UNINITIALIZED_TEST(ConcurrentAllocationInOldSpace) {
FLAG_max_old_space_size = 32;
FLAG_concurrent_allocation = true;
FLAG_local_heaps = true;
FLAG_stress_concurrent_allocation = false;
v8::Isolate::CreateParams create_params;
......@@ -118,8 +116,6 @@ UNINITIALIZED_TEST(ConcurrentAllocationInOldSpace) {
UNINITIALIZED_TEST(ConcurrentAllocationInOldSpaceFromMainThread) {
FLAG_max_old_space_size = 4;
FLAG_concurrent_allocation = true;
FLAG_local_heaps = true;
FLAG_stress_concurrent_allocation = false;
v8::Isolate::CreateParams create_params;
......@@ -167,8 +163,6 @@ class LargeObjectConcurrentAllocationThread final : public v8::base::Thread {
UNINITIALIZED_TEST(ConcurrentAllocationInLargeSpace) {
FLAG_max_old_space_size = 32;
FLAG_concurrent_allocation = true;
FLAG_local_heaps = true;
FLAG_stress_concurrent_allocation = false;
v8::Isolate::CreateParams create_params;
......@@ -243,9 +237,6 @@ class ConcurrentBlackAllocationThread final : public v8::base::Thread {
};
UNINITIALIZED_TEST(ConcurrentBlackAllocation) {
FLAG_concurrent_allocation = true;
FLAG_local_heaps = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
......@@ -310,8 +301,6 @@ UNINITIALIZED_TEST(ConcurrentWriteBarrier) {
return;
}
ManualGCScope manual_gc_scope;
FLAG_concurrent_allocation = true;
FLAG_local_heaps = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
......@@ -377,8 +366,6 @@ UNINITIALIZED_TEST(ConcurrentRecordRelocSlot) {
}
FLAG_manual_evacuation_candidates_selection = true;
ManualGCScope manual_gc_scope;
FLAG_concurrent_allocation = true;
FLAG_local_heaps = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
......
......@@ -7116,7 +7116,6 @@ TEST(Regress978156) {
}
TEST(GarbageCollectionWithLocalHeap) {
EnsureFlagLocalHeapsEnabled();
ManualGCScope manual_gc_scope;
CcTest::InitializeVM();
......
......@@ -70,7 +70,6 @@ class ConcurrentSearchThread final : public v8::base::Thread {
// Uses linear search on a flat object, with up to 8 elements.
TEST(LinearSearchFlatObject) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......@@ -122,7 +121,6 @@ TEST(LinearSearchFlatObject) {
// Uses linear search on a flat object, which has more than 8 elements.
TEST(LinearSearchFlatObject_ManyElements) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......
......@@ -156,7 +156,6 @@ static void CheckedWait(base::Semaphore& semaphore) {
// Verify that a LoadIC can be cycled through different states and safely
// read on a background thread.
TEST(CheckLoadICStates) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
FLAG_lazy_feedback_allocation = false;
Isolate* isolate = CcTest::i_isolate();
......
......@@ -68,7 +68,6 @@ class ConcurrentSearchThread final : public v8::base::Thread {
// Test to search on a background thread, while the main thread is idle.
TEST(ProtoWalkBackground) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......@@ -108,7 +107,6 @@ TEST(ProtoWalkBackground) {
// Test to search on a background thread, while the main thread modifies the
// descriptor array.
TEST(ProtoWalkBackground_DescriptorArrayWrite) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......@@ -155,7 +153,6 @@ TEST(ProtoWalkBackground_DescriptorArrayWrite) {
}
TEST(ProtoWalkBackground_PrototypeChainWrite) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......
......@@ -100,7 +100,6 @@ class ConcurrentStringThread final : public v8::base::Thread {
// Inspect a one byte string, while the main thread externalizes it.
TEST(InspectOneByteExternalizing) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......@@ -146,7 +145,6 @@ TEST(InspectOneByteExternalizing) {
// Inspect a one byte string, while the main thread externalizes it into a two
// bytes string.
TEST(InspectOneIntoTwoByteExternalizing) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......@@ -189,7 +187,6 @@ TEST(InspectOneIntoTwoByteExternalizing) {
// Inspect a two byte string, while the main thread externalizes it.
TEST(InspectTwoByteExternalizing) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......
......@@ -72,7 +72,6 @@ class LocalHandlesThread final : public v8::base::Thread {
};
TEST(CreateLocalHandles) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......@@ -100,7 +99,6 @@ TEST(CreateLocalHandles) {
}
TEST(CreateLocalHandlesWithoutLocalHandleScope) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope handle_scope(isolate);
......@@ -109,7 +107,6 @@ TEST(CreateLocalHandlesWithoutLocalHandleScope) {
}
TEST(DereferenceLocalHandle) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......@@ -133,7 +130,6 @@ TEST(DereferenceLocalHandle) {
}
TEST(DereferenceLocalHandleFailsWhenDisallowed) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......
......@@ -83,7 +83,6 @@ class PersistentHandlesThread final : public v8::base::Thread {
};
TEST(CreatePersistentHandles) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......@@ -119,7 +118,6 @@ TEST(CreatePersistentHandles) {
}
TEST(DereferencePersistentHandle) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......@@ -139,7 +137,6 @@ TEST(DereferencePersistentHandle) {
}
TEST(DereferencePersistentHandleFailsWhenDisallowed) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......
......@@ -63,7 +63,6 @@ class LocalFactoryTest : public TestWithIsolateAndZone {
REPLMode::kNo),
&state_),
local_isolate_(isolate()->main_thread_local_isolate()) {
FLAG_concurrent_allocation = true;
}
FunctionLiteral* ParseProgram(const char* source) {
......
......@@ -15,16 +15,9 @@
namespace v8 {
namespace internal {
void EnsureFlagLocalHeapsEnabled() {
// Avoid data race in concurrent thread by only setting the flag to true if
// not already enabled.
if (!FLAG_local_heaps) FLAG_local_heaps = true;
}
using SafepointTest = TestWithIsolate;
TEST_F(SafepointTest, ReachSafepointWithoutLocalHeaps) {
EnsureFlagLocalHeapsEnabled();
Heap* heap = i_isolate()->heap();
bool run = false;
{
......@@ -54,7 +47,6 @@ class ParkedThread final : public v8::base::Thread {
};
TEST_F(SafepointTest, StopParkedThreads) {
EnsureFlagLocalHeapsEnabled();
Heap* heap = i_isolate()->heap();
int safepoints = 0;
......@@ -114,7 +106,6 @@ class RunningThread final : public v8::base::Thread {
};
TEST_F(SafepointTest, StopRunningThreads) {
EnsureFlagLocalHeapsEnabled();
Heap* heap = i_isolate()->heap();
const int kThreads = 10;
......
......@@ -16,10 +16,6 @@ ALL_VARIANT_FLAGS = {
"jitless": [["--jitless"]],
"minor_mc": [["--minor-mc"]],
"no_lfa": [["--no-lazy-feedback-allocation"]],
"no_local_heaps": [[
"--no-local-heaps",
"--no-turbo-direct-heap-access",
"--no-finalize-streaming-on-background"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
# https://chromium-review.googlesource.com/c/452620/ for more discussion.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment