Commit 97088422 authored by Santiago Aboy Solanes's avatar Santiago Aboy Solanes Committed by Commit Bot

[compiler][heap] Add safepointing to TickCounter's tick method

This gives the GC a location where it can interrupt background
compilation.

Bug: v8:7790
Change-Id: I8cf40e9c9b69c00fdfd5d59ed87e83137e481fb3
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2310366
Commit-Queue: Santiago Aboy Solanes <solanes@chromium.org>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69109}
parent e9a37bf8
...@@ -6,18 +6,29 @@ ...@@ -6,18 +6,29 @@
#include "src/base/logging.h" #include "src/base/logging.h"
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/heap/local-heap.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
void TickCounter::DoTick() { void TickCounter::TickAndMaybeEnterSafepoint() {
++ticks_; ++ticks_;
// Magical number to detect performance bugs or compiler divergence. // Magical number to detect performance bugs or compiler divergence.
// Selected as being roughly 10x of what's needed frequently. // Selected as being roughly 10x of what's needed frequently.
constexpr size_t kMaxTicks = 100000000; constexpr size_t kMaxTicks = 100000000;
USE(kMaxTicks); USE(kMaxTicks);
DCHECK_LT(ticks_, kMaxTicks); DCHECK_LT(ticks_, kMaxTicks);
if (local_heap_) local_heap_->Safepoint();
}
void TickCounter::AttachLocalHeap(LocalHeap* local_heap) {
DCHECK_NULL(local_heap_);
local_heap_ = local_heap;
DCHECK_NOT_NULL(local_heap_);
} }
void TickCounter::DetachLocalHeap() { local_heap_ = nullptr; }
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -10,16 +10,23 @@ ...@@ -10,16 +10,23 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// A deterministic correlate of time, used to detect performance or class LocalHeap;
// divergence bugs in Turbofan. DoTick() should be called frequently
// thoughout the compilation. // This method generates a tick. Also makes the current thread to enter a
// safepoint iff it was required to do so. The tick is used as a deterministic
// correlate of time to detect performance or divergence bugs in Turbofan.
// TickAndMaybeEnterSafepoint() should be called frequently thoughout the
// compilation.
class TickCounter { class TickCounter {
public: public:
void DoTick(); void TickAndMaybeEnterSafepoint();
void AttachLocalHeap(LocalHeap* local_heap);
void DetachLocalHeap();
size_t CurrentTicks() const { return ticks_; } size_t CurrentTicks() const { return ticks_; }
private: private:
size_t ticks_ = 0; size_t ticks_ = 0;
LocalHeap* local_heap_ = nullptr;
}; };
} // namespace internal } // namespace internal
......
...@@ -1293,7 +1293,7 @@ void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) { ...@@ -1293,7 +1293,7 @@ void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) {
} }
void InstructionSelector::VisitNode(Node* node) { void InstructionSelector::VisitNode(Node* node) {
tick_counter_->DoTick(); tick_counter_->TickAndMaybeEnterSafepoint();
DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes. DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
switch (node->opcode()) { switch (node->opcode()) {
case IrOpcode::kStart: case IrOpcode::kStart:
......
...@@ -1491,7 +1491,7 @@ MidTierRegisterAllocator::~MidTierRegisterAllocator() = default; ...@@ -1491,7 +1491,7 @@ MidTierRegisterAllocator::~MidTierRegisterAllocator() = default;
void MidTierRegisterAllocator::DefineOutputs() { void MidTierRegisterAllocator::DefineOutputs() {
for (const InstructionBlock* block : for (const InstructionBlock* block :
base::Reversed(code()->instruction_blocks())) { base::Reversed(code()->instruction_blocks())) {
data_->tick_counter()->DoTick(); data_->tick_counter()->TickAndMaybeEnterSafepoint();
InitializeBlockState(block); InitializeBlockState(block);
DefineOutputs(block); DefineOutputs(block);
...@@ -1569,7 +1569,7 @@ void MidTierRegisterAllocator::DefineOutputs(const InstructionBlock* block) { ...@@ -1569,7 +1569,7 @@ void MidTierRegisterAllocator::DefineOutputs(const InstructionBlock* block) {
void MidTierRegisterAllocator::AllocateRegisters() { void MidTierRegisterAllocator::AllocateRegisters() {
for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) { for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
data_->tick_counter()->DoTick(); data_->tick_counter()->TickAndMaybeEnterSafepoint();
AllocateRegisters(block); AllocateRegisters(block);
} }
......
...@@ -1836,7 +1836,7 @@ InstructionOperand* ConstraintBuilder::AllocateFixed( ...@@ -1836,7 +1836,7 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
void ConstraintBuilder::MeetRegisterConstraints() { void ConstraintBuilder::MeetRegisterConstraints() {
for (InstructionBlock* block : code()->instruction_blocks()) { for (InstructionBlock* block : code()->instruction_blocks()) {
data_->tick_counter()->DoTick(); data_->tick_counter()->TickAndMaybeEnterSafepoint();
MeetRegisterConstraints(block); MeetRegisterConstraints(block);
} }
} }
...@@ -2002,7 +2002,7 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) { ...@@ -2002,7 +2002,7 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
void ConstraintBuilder::ResolvePhis() { void ConstraintBuilder::ResolvePhis() {
// Process the blocks in reverse order. // Process the blocks in reverse order.
for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) { for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
data_->tick_counter()->DoTick(); data_->tick_counter()->TickAndMaybeEnterSafepoint();
ResolvePhis(block); ResolvePhis(block);
} }
} }
...@@ -2617,7 +2617,7 @@ void LiveRangeBuilder::BuildLiveRanges() { ...@@ -2617,7 +2617,7 @@ void LiveRangeBuilder::BuildLiveRanges() {
// Process the blocks in reverse order. // Process the blocks in reverse order.
for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0; for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
--block_id) { --block_id) {
data_->tick_counter()->DoTick(); data_->tick_counter()->TickAndMaybeEnterSafepoint();
InstructionBlock* block = InstructionBlock* block =
code()->InstructionBlockAt(RpoNumber::FromInt(block_id)); code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
BitVector* live = ComputeLiveOut(block, data()); BitVector* live = ComputeLiveOut(block, data());
...@@ -2637,7 +2637,7 @@ void LiveRangeBuilder::BuildLiveRanges() { ...@@ -2637,7 +2637,7 @@ void LiveRangeBuilder::BuildLiveRanges() {
// Postprocess the ranges. // Postprocess the ranges.
const size_t live_ranges_size = data()->live_ranges().size(); const size_t live_ranges_size = data()->live_ranges().size();
for (TopLevelLiveRange* range : data()->live_ranges()) { for (TopLevelLiveRange* range : data()->live_ranges()) {
data_->tick_counter()->DoTick(); data_->tick_counter()->TickAndMaybeEnterSafepoint();
CHECK_EQ(live_ranges_size, CHECK_EQ(live_ranges_size,
data()->live_ranges().size()); // TODO(neis): crbug.com/831822 data()->live_ranges().size()); // TODO(neis): crbug.com/831822
if (range == nullptr) continue; if (range == nullptr) continue;
...@@ -3784,7 +3784,7 @@ void LinearScanAllocator::AllocateRegisters() { ...@@ -3784,7 +3784,7 @@ void LinearScanAllocator::AllocateRegisters() {
while (!unhandled_live_ranges().empty() || while (!unhandled_live_ranges().empty() ||
(data()->is_turbo_control_flow_aware_allocation() && (data()->is_turbo_control_flow_aware_allocation() &&
last_block < max_blocks)) { last_block < max_blocks)) {
data()->tick_counter()->DoTick(); data()->tick_counter()->TickAndMaybeEnterSafepoint();
LiveRange* current = unhandled_live_ranges().empty() LiveRange* current = unhandled_live_ranges().empty()
? nullptr ? nullptr
: *unhandled_live_ranges().begin(); : *unhandled_live_ranges().begin();
...@@ -4771,7 +4771,7 @@ OperandAssigner::OperandAssigner(TopTierRegisterAllocationData* data) ...@@ -4771,7 +4771,7 @@ OperandAssigner::OperandAssigner(TopTierRegisterAllocationData* data)
void OperandAssigner::DecideSpillingMode() { void OperandAssigner::DecideSpillingMode() {
if (data()->is_turbo_control_flow_aware_allocation()) { if (data()->is_turbo_control_flow_aware_allocation()) {
for (auto range : data()->live_ranges()) { for (auto range : data()->live_ranges()) {
data()->tick_counter()->DoTick(); data()->tick_counter()->TickAndMaybeEnterSafepoint();
int max_blocks = data()->code()->InstructionBlockCount(); int max_blocks = data()->code()->InstructionBlockCount();
if (range != nullptr && range->IsSpilledOnlyInDeferredBlocks(data())) { if (range != nullptr && range->IsSpilledOnlyInDeferredBlocks(data())) {
// If the range is spilled only in deferred blocks and starts in // If the range is spilled only in deferred blocks and starts in
...@@ -4800,7 +4800,7 @@ void OperandAssigner::DecideSpillingMode() { ...@@ -4800,7 +4800,7 @@ void OperandAssigner::DecideSpillingMode() {
void OperandAssigner::AssignSpillSlots() { void OperandAssigner::AssignSpillSlots() {
for (auto range : data()->live_ranges()) { for (auto range : data()->live_ranges()) {
data()->tick_counter()->DoTick(); data()->tick_counter()->TickAndMaybeEnterSafepoint();
if (range != nullptr && range->get_bundle() != nullptr) { if (range != nullptr && range->get_bundle() != nullptr) {
range->get_bundle()->MergeSpillRanges(); range->get_bundle()->MergeSpillRanges();
} }
...@@ -4808,7 +4808,7 @@ void OperandAssigner::AssignSpillSlots() { ...@@ -4808,7 +4808,7 @@ void OperandAssigner::AssignSpillSlots() {
ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges(); ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
// Merge disjoint spill ranges // Merge disjoint spill ranges
for (size_t i = 0; i < spill_ranges.size(); ++i) { for (size_t i = 0; i < spill_ranges.size(); ++i) {
data()->tick_counter()->DoTick(); data()->tick_counter()->TickAndMaybeEnterSafepoint();
SpillRange* range = spill_ranges[i]; SpillRange* range = spill_ranges[i];
if (range == nullptr) continue; if (range == nullptr) continue;
if (range->IsEmpty()) continue; if (range->IsEmpty()) continue;
...@@ -4821,7 +4821,7 @@ void OperandAssigner::AssignSpillSlots() { ...@@ -4821,7 +4821,7 @@ void OperandAssigner::AssignSpillSlots() {
} }
// Allocate slots for the merged spill ranges. // Allocate slots for the merged spill ranges.
for (SpillRange* range : spill_ranges) { for (SpillRange* range : spill_ranges) {
data()->tick_counter()->DoTick(); data()->tick_counter()->TickAndMaybeEnterSafepoint();
if (range == nullptr || range->IsEmpty()) continue; if (range == nullptr || range->IsEmpty()) continue;
// Allocate a new operand referring to the spill slot. // Allocate a new operand referring to the spill slot.
if (!range->HasSlot()) { if (!range->HasSlot()) {
...@@ -4834,7 +4834,7 @@ void OperandAssigner::AssignSpillSlots() { ...@@ -4834,7 +4834,7 @@ void OperandAssigner::AssignSpillSlots() {
void OperandAssigner::CommitAssignment() { void OperandAssigner::CommitAssignment() {
const size_t live_ranges_size = data()->live_ranges().size(); const size_t live_ranges_size = data()->live_ranges().size();
for (TopLevelLiveRange* top_range : data()->live_ranges()) { for (TopLevelLiveRange* top_range : data()->live_ranges()) {
data()->tick_counter()->DoTick(); data()->tick_counter()->TickAndMaybeEnterSafepoint();
CHECK_EQ(live_ranges_size, CHECK_EQ(live_ranges_size,
data()->live_ranges().size()); // TODO(neis): crbug.com/831822 data()->live_ranges().size()); // TODO(neis): crbug.com/831822
if (top_range == nullptr || top_range->IsEmpty()) continue; if (top_range == nullptr || top_range->IsEmpty()) continue;
...@@ -5035,7 +5035,7 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) { ...@@ -5035,7 +5035,7 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
BitVector* live = live_in_sets[block->rpo_number().ToInt()]; BitVector* live = live_in_sets[block->rpo_number().ToInt()];
BitVector::Iterator iterator(live); BitVector::Iterator iterator(live);
while (!iterator.Done()) { while (!iterator.Done()) {
data()->tick_counter()->DoTick(); data()->tick_counter()->TickAndMaybeEnterSafepoint();
int vreg = iterator.Current(); int vreg = iterator.Current();
LiveRangeBoundArray* array = finder.ArrayFor(vreg); LiveRangeBoundArray* array = finder.ArrayFor(vreg);
for (const RpoNumber& pred : block->predecessors()) { for (const RpoNumber& pred : block->predecessors()) {
......
...@@ -1385,7 +1385,7 @@ void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() { ...@@ -1385,7 +1385,7 @@ void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() {
} }
void BytecodeGraphBuilder::VisitSingleBytecode() { void BytecodeGraphBuilder::VisitSingleBytecode() {
tick_counter_->DoTick(); tick_counter_->TickAndMaybeEnterSafepoint();
int current_offset = bytecode_iterator().current_offset(); int current_offset = bytecode_iterator().current_offset();
UpdateSourcePosition(current_offset); UpdateSourcePosition(current_offset);
ExitThenEnterExceptionHandlers(current_offset); ExitThenEnterExceptionHandlers(current_offset);
......
...@@ -30,7 +30,7 @@ ControlFlowOptimizer::ControlFlowOptimizer(Graph* graph, ...@@ -30,7 +30,7 @@ ControlFlowOptimizer::ControlFlowOptimizer(Graph* graph,
void ControlFlowOptimizer::Optimize() { void ControlFlowOptimizer::Optimize() {
Enqueue(graph()->start()); Enqueue(graph()->start());
while (!queue_.empty()) { while (!queue_.empty()) {
tick_counter_->DoTick(); tick_counter_->TickAndMaybeEnterSafepoint();
Node* node = queue_.front(); Node* node = queue_.front();
queue_.pop(); queue_.pop();
if (node->IsDead()) continue; if (node->IsDead()) continue;
......
...@@ -297,7 +297,7 @@ void EffectGraphReducer::ReduceFrom(Node* node) { ...@@ -297,7 +297,7 @@ void EffectGraphReducer::ReduceFrom(Node* node) {
DCHECK(stack_.empty()); DCHECK(stack_.empty());
stack_.push({node, 0}); stack_.push({node, 0});
while (!stack_.empty()) { while (!stack_.empty()) {
tick_counter_->DoTick(); tick_counter_->TickAndMaybeEnterSafepoint();
Node* current = stack_.top().node; Node* current = stack_.top().node;
int& input_index = stack_.top().input_index; int& input_index = stack_.top().input_index;
if (input_index < current->InputCount()) { if (input_index < current->InputCount()) {
...@@ -412,7 +412,7 @@ VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) { ...@@ -412,7 +412,7 @@ VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) {
State first_input = table_.Get(NodeProperties::GetEffectInput(effect_phi, 0)); State first_input = table_.Get(NodeProperties::GetEffectInput(effect_phi, 0));
State result = first_input; State result = first_input;
for (std::pair<Variable, Node*> var_value : first_input) { for (std::pair<Variable, Node*> var_value : first_input) {
tick_counter_->DoTick(); tick_counter_->TickAndMaybeEnterSafepoint();
if (Node* value = var_value.second) { if (Node* value = var_value.second) {
Variable var = var_value.first; Variable var = var_value.first;
TRACE("var %i:\n", var.id_); TRACE("var %i:\n", var.id_);
......
...@@ -85,7 +85,7 @@ Reduction GraphReducer::Reduce(Node* const node) { ...@@ -85,7 +85,7 @@ Reduction GraphReducer::Reduce(Node* const node) {
auto skip = reducers_.end(); auto skip = reducers_.end();
for (auto i = reducers_.begin(); i != reducers_.end();) { for (auto i = reducers_.begin(); i != reducers_.end();) {
if (i != skip) { if (i != skip) {
tick_counter_->DoTick(); tick_counter_->TickAndMaybeEnterSafepoint();
Reduction reduction = (*i)->Reduce(node); Reduction reduction = (*i)->Reduce(node);
if (!reduction.Changed()) { if (!reduction.Changed()) {
// No change from this reducer. // No change from this reducer.
......
...@@ -220,6 +220,10 @@ class V8_EXPORT_PRIVATE JSHeapBroker { ...@@ -220,6 +220,10 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
return ph_->NewHandle(*obj); return ph_->NewHandle(*obj);
} }
LocalHeap* local_heap() {
return local_heap_.has_value() ? &(*local_heap_) : nullptr;
}
std::string Trace() const; std::string Trace() const;
void IncrementTracingIndentation(); void IncrementTracingIndentation();
void DecrementTracingIndentation(); void DecrementTracingIndentation();
......
...@@ -190,7 +190,7 @@ class LoopFinderImpl { ...@@ -190,7 +190,7 @@ class LoopFinderImpl {
Queue(end_); Queue(end_);
while (!queue_.empty()) { while (!queue_.empty()) {
tick_counter_->DoTick(); tick_counter_->TickAndMaybeEnterSafepoint();
Node* node = queue_.front(); Node* node = queue_.front();
info(node); info(node);
queue_.pop_front(); queue_.pop_front();
...@@ -309,7 +309,7 @@ class LoopFinderImpl { ...@@ -309,7 +309,7 @@ class LoopFinderImpl {
} }
// Propagate forward on paths that were backward reachable from backedges. // Propagate forward on paths that were backward reachable from backedges.
while (!queue_.empty()) { while (!queue_.empty()) {
tick_counter_->DoTick(); tick_counter_->TickAndMaybeEnterSafepoint();
Node* node = queue_.front(); Node* node = queue_.front();
queue_.pop_front(); queue_.pop_front();
queued_.Set(node, false); queued_.Set(node, false);
......
...@@ -203,7 +203,7 @@ void MemoryOptimizer::Optimize() { ...@@ -203,7 +203,7 @@ void MemoryOptimizer::Optimize() {
} }
void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) { void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
tick_counter_->DoTick(); tick_counter_->TickAndMaybeEnterSafepoint();
DCHECK(!node->IsDead()); DCHECK(!node->IsDead());
DCHECK_LT(0, node->op()->EffectInputCount()); DCHECK_LT(0, node->op()->EffectInputCount());
switch (node->opcode()) { switch (node->opcode()) {
......
...@@ -932,14 +932,20 @@ class PipelineRunScope { ...@@ -932,14 +932,20 @@ class PipelineRunScope {
// LocalHeapScope encapsulates the liveness of the brokers's LocalHeap. // LocalHeapScope encapsulates the liveness of the brokers's LocalHeap.
class LocalHeapScope { class LocalHeapScope {
public: public:
explicit LocalHeapScope(JSHeapBroker* broker) : broker_(broker) { explicit LocalHeapScope(JSHeapBroker* broker, OptimizedCompilationInfo* info)
: broker_(broker), tick_counter_(&info->tick_counter()) {
broker_->InitializeLocalHeap(); broker_->InitializeLocalHeap();
tick_counter_->AttachLocalHeap(broker_->local_heap());
} }
~LocalHeapScope() { broker_->TearDownLocalHeap(); } ~LocalHeapScope() {
tick_counter_->DetachLocalHeap();
broker_->TearDownLocalHeap();
}
private: private:
JSHeapBroker* broker_; JSHeapBroker* broker_;
TickCounter* tick_counter_;
}; };
PipelineStatistics* CreatePipelineStatistics(Handle<Script> script, PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
...@@ -1173,7 +1179,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl( ...@@ -1173,7 +1179,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl(
// Ensure that the RuntimeCallStats table is only available during execution // Ensure that the RuntimeCallStats table is only available during execution
// and not during finalization as that might be on a different thread. // and not during finalization as that might be on a different thread.
PipelineJobScope scope(&data_, stats); PipelineJobScope scope(&data_, stats);
LocalHeapScope local_heap_scope(data_.broker()); LocalHeapScope local_heap_scope(data_.broker(), data_.info());
if (data_.broker()->is_concurrent_inlining()) { if (data_.broker()->is_concurrent_inlining()) {
if (!pipeline_.CreateGraph()) { if (!pipeline_.CreateGraph()) {
return AbortOptimization(BailoutReason::kGraphBuildingFailed); return AbortOptimization(BailoutReason::kGraphBuildingFailed);
...@@ -2994,7 +3000,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting( ...@@ -2994,7 +3000,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate); Deoptimizer::EnsureCodeForDeoptimizationEntries(isolate);
pipeline.Serialize(); pipeline.Serialize();
LocalHeapScope local_heap_scope(data.broker()); LocalHeapScope local_heap_scope(data.broker(), data.info());
if (!pipeline.CreateGraph()) return MaybeHandle<Code>(); if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>(); if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
pipeline.AssembleCode(&linkage); pipeline.AssembleCode(&linkage);
......
...@@ -265,7 +265,7 @@ class CFGBuilder : public ZoneObject { ...@@ -265,7 +265,7 @@ class CFGBuilder : public ZoneObject {
Queue(scheduler_->graph_->end()); Queue(scheduler_->graph_->end());
while (!queue_.empty()) { // Breadth-first backwards traversal. while (!queue_.empty()) { // Breadth-first backwards traversal.
scheduler_->tick_counter_->DoTick(); scheduler_->tick_counter_->TickAndMaybeEnterSafepoint();
Node* node = queue_.front(); Node* node = queue_.front();
queue_.pop(); queue_.pop();
int max = NodeProperties::PastControlIndex(node); int max = NodeProperties::PastControlIndex(node);
...@@ -291,7 +291,7 @@ class CFGBuilder : public ZoneObject { ...@@ -291,7 +291,7 @@ class CFGBuilder : public ZoneObject {
component_end_ = schedule_->block(exit); component_end_ = schedule_->block(exit);
scheduler_->equivalence_->Run(exit); scheduler_->equivalence_->Run(exit);
while (!queue_.empty()) { // Breadth-first backwards traversal. while (!queue_.empty()) { // Breadth-first backwards traversal.
scheduler_->tick_counter_->DoTick(); scheduler_->tick_counter_->TickAndMaybeEnterSafepoint();
Node* node = queue_.front(); Node* node = queue_.front();
queue_.pop(); queue_.pop();
...@@ -1274,7 +1274,7 @@ void Scheduler::PrepareUses() { ...@@ -1274,7 +1274,7 @@ void Scheduler::PrepareUses() {
visited[node->id()] = true; visited[node->id()] = true;
stack.push(node->input_edges().begin()); stack.push(node->input_edges().begin());
while (!stack.empty()) { while (!stack.empty()) {
tick_counter_->DoTick(); tick_counter_->TickAndMaybeEnterSafepoint();
Edge edge = *stack.top(); Edge edge = *stack.top();
Node* node = edge.to(); Node* node = edge.to();
if (visited[node->id()]) { if (visited[node->id()]) {
...@@ -1303,7 +1303,7 @@ class ScheduleEarlyNodeVisitor { ...@@ -1303,7 +1303,7 @@ class ScheduleEarlyNodeVisitor {
for (Node* const root : *roots) { for (Node* const root : *roots) {
queue_.push(root); queue_.push(root);
while (!queue_.empty()) { while (!queue_.empty()) {
scheduler_->tick_counter_->DoTick(); scheduler_->tick_counter_->TickAndMaybeEnterSafepoint();
VisitNode(queue_.front()); VisitNode(queue_.front());
queue_.pop(); queue_.pop();
} }
...@@ -1430,7 +1430,7 @@ class ScheduleLateNodeVisitor { ...@@ -1430,7 +1430,7 @@ class ScheduleLateNodeVisitor {
queue->push(node); queue->push(node);
do { do {
scheduler_->tick_counter_->DoTick(); scheduler_->tick_counter_->TickAndMaybeEnterSafepoint();
Node* const node = queue->front(); Node* const node = queue->front();
queue->pop(); queue->pop();
VisitNode(node); VisitNode(node);
......
...@@ -1808,7 +1808,7 @@ class RepresentationSelector { ...@@ -1808,7 +1808,7 @@ class RepresentationSelector {
template <Phase T> template <Phase T>
void VisitNode(Node* node, Truncation truncation, void VisitNode(Node* node, Truncation truncation,
SimplifiedLowering* lowering) { SimplifiedLowering* lowering) {
tick_counter_->DoTick(); tick_counter_->TickAndMaybeEnterSafepoint();
// Unconditionally eliminate unused pure nodes (only relevant if there's // Unconditionally eliminate unused pure nodes (only relevant if there's
// a pure operation in between two effectful ones, where the last one // a pure operation in between two effectful ones, where the last one
......
...@@ -243,7 +243,7 @@ void RedundantStoreFinder::Find() { ...@@ -243,7 +243,7 @@ void RedundantStoreFinder::Find() {
Visit(jsgraph()->graph()->end()); Visit(jsgraph()->graph()->end());
while (!revisit_.empty()) { while (!revisit_.empty()) {
tick_counter_->DoTick(); tick_counter_->TickAndMaybeEnterSafepoint();
Node* next = revisit_.top(); Node* next = revisit_.top();
revisit_.pop(); revisit_.pop();
DCHECK_LT(next->id(), in_revisit_.size()); DCHECK_LT(next->id(), in_revisit_.size());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment