Commit ef99ff6e authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Don't spawn new GC tasks when the heap is in TEAR_DOWN state.

Change-Id: I57da95525e09820ed1a1697cc4eb1e39ecb7c7cc
Reviewed-on: https://chromium-review.googlesource.com/964282Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51961}
parent f1b1ec70
......@@ -47,7 +47,7 @@ class ArrayBufferCollector::FreeingTask final : public CancelableTask {
void ArrayBufferCollector::FreeAllocationsOnBackgroundThread() {
heap_->account_external_memory_concurrently_freed();
if (heap_->use_tasks() && FLAG_concurrent_array_buffer_freeing) {
if (!heap_->IsTearingDown() && FLAG_concurrent_array_buffer_freeing) {
FreeingTask* task = new FreeingTask(heap_);
V8::GetCurrentPlatform()->CallOnWorkerThread(task);
} else {
......
......@@ -551,7 +551,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
void ConcurrentMarking::ScheduleTasks() {
DCHECK(heap_->use_tasks());
DCHECK(!heap_->IsTearingDown());
if (!FLAG_concurrent_marking) return;
base::LockGuard<base::Mutex> guard(&pending_lock_);
DCHECK_EQ(0, pending_task_count_);
......@@ -589,7 +589,7 @@ void ConcurrentMarking::ScheduleTasks() {
}
void ConcurrentMarking::RescheduleTasksIfNeeded() {
if (!FLAG_concurrent_marking || !heap_->use_tasks()) return;
if (!FLAG_concurrent_marking || heap_->IsTearingDown()) return;
{
base::LockGuard<base::Mutex> guard(&pending_lock_);
if (pending_task_count_ > 0) return;
......
......@@ -229,7 +229,6 @@ Heap::Heap()
heap_iterator_depth_(0),
local_embedder_heap_tracer_(nullptr),
fast_promotion_mode_(false),
use_tasks_(true),
force_oom_(false),
delay_sweeper_tasks_for_testing_(false),
pending_layout_change_object_(nullptr),
......@@ -5918,7 +5917,6 @@ void Heap::RegisterExternallyReferencedObject(Object** object) {
void Heap::TearDown() {
SetGCState(TEAR_DOWN);
DCHECK(!use_tasks_);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
......
......@@ -851,6 +851,7 @@ class Heap {
inline HeapState gc_state() { return gc_state_; }
void SetGCState(HeapState state);
bool IsTearingDown() const { return gc_state_ == TEAR_DOWN; }
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
......@@ -1013,10 +1014,6 @@ class Heap {
// Returns whether SetUp has been called.
bool HasBeenSetUp();
void stop_using_tasks() { use_tasks_ = false; }
bool use_tasks() const { return use_tasks_; }
// ===========================================================================
// Getters for spaces. =======================================================
// ===========================================================================
......@@ -2646,8 +2643,6 @@ class Heap {
bool fast_promotion_mode_;
bool use_tasks_;
// Used for testing purposes.
bool force_oom_;
bool delay_sweeper_tasks_for_testing_;
......
......@@ -21,7 +21,7 @@ void IncrementalMarkingJob::Start(Heap* heap) {
}
void IncrementalMarkingJob::ScheduleTask(Heap* heap) {
if (!task_pending_ && heap->use_tasks()) {
if (!task_pending_ && !heap->IsTearingDown()) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
task_pending_ = true;
auto task = new Task(heap->isolate(), this);
......
......@@ -424,7 +424,7 @@ void IncrementalMarking::StartMarking() {
IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
if (FLAG_concurrent_marking && heap_->use_tasks()) {
if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
heap_->concurrent_marking()->ScheduleTasks();
}
......
......@@ -202,7 +202,7 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
DCHECK_LT(0, delay_ms);
if (!heap()->use_tasks()) return;
if (heap()->IsTearingDown()) return;
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
......
......@@ -103,7 +103,7 @@ void ScavengeJob::ScheduleIdleTaskIfNeeded(Heap* heap, int bytes_allocated) {
void ScavengeJob::ScheduleIdleTask(Heap* heap) {
if (!idle_task_pending_ && heap->use_tasks()) {
if (!idle_task_pending_ && !heap->IsTearingDown()) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
idle_task_pending_ = true;
......
......@@ -338,7 +338,7 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
};
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
if (!MakeRoomForNewTasks()) {
// kMaxUnmapperTasks are already running. Avoid creating any more.
if (FLAG_trace_unmapper) {
......
......@@ -2591,8 +2591,6 @@ void Isolate::ClearSerializerData() {
void Isolate::Deinit() {
TRACE_ISOLATE(deinit);
// Make sure that the GC does not post any new tasks.
heap_.stop_using_tasks();
debug()->Unload();
if (concurrent_recompilation_enabled()) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment