Commit a33353a0 authored by Georgia Kouveli's avatar Georgia Kouveli Committed by Commit Bot

[instruction scheduler] Fix issue with block terminators and deopts.

Remove IsBlockTerminator and introduce InstructionScheduler::AddTerminator in
order to handle block terminator instructions.

Instead of the kBlockTerminator flags, we now rely on Instruction::IsTrap(),
Instruction::IsDeoptimizeCall() and explicitly denoting block terminators
when adding them with InstructionScheduler::AddTerminator().

IsBlockTerminator incorrectly included deopts when they were not at the end of
a block, which meant that an instruction with side effects could have been
reordered with respect to a deopt as the deopt was not identified correctly.

Since the snapshot does not contain deopts, this is not causing any problems
at the moment (the scheduler is only enabled on the snapshot).

Change-Id: I1c2dad748a9398a3355630d9a542f4ac89afaa42
Reviewed-on: https://chromium-review.googlesource.com/960501Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Commit-Queue: Georgia Kouveli <georgia.kouveli@arm.com>
Cr-Commit-Position: refs/heads/master@{#52019}
parent 65c54c7a
......@@ -277,13 +277,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S1x8AllTrue:
case kArm64S1x16AnyTrue:
case kArm64S1x16AllTrue:
return kNoOpcodeFlags;
case kArm64TestAndBranch32:
case kArm64TestAndBranch:
case kArm64CompareAndBranch32:
case kArm64CompareAndBranch:
return kIsBlockTerminator;
return kNoOpcodeFlags;
case kArm64LdrS:
case kArm64LdrD:
......
......@@ -114,17 +114,24 @@ void InstructionScheduler::EndBlock(RpoNumber rpo) {
operands_map_.clear();
}
void InstructionScheduler::AddTerminator(Instruction* instr) {
ScheduleGraphNode* new_node = new (zone()) ScheduleGraphNode(zone(), instr);
// Make sure that basic block terminators are not moved by adding them
// as successor of every instruction.
for (ScheduleGraphNode* node : graph_) {
node->AddSuccessor(new_node);
}
graph_.push_back(new_node);
}
void InstructionScheduler::AddInstruction(Instruction* instr) {
ScheduleGraphNode* new_node = new (zone()) ScheduleGraphNode(zone(), instr);
if (IsBlockTerminator(instr)) {
// Make sure that basic block terminators are not moved by adding them
// as successor of every instruction.
for (ScheduleGraphNode* node : graph_) {
node->AddSuccessor(new_node);
}
} else if (IsFixedRegisterParameter(instr)) {
// We should not have branches in the middle of a block.
DCHECK_NE(instr->flags_mode(), kFlags_branch);
DCHECK_NE(instr->flags_mode(), kFlags_branch_and_poison);
if (IsFixedRegisterParameter(instr)) {
if (last_live_in_reg_marker_ != nullptr) {
last_live_in_reg_marker_->AddSuccessor(new_node);
}
......@@ -244,6 +251,12 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
// reference to a frame slot, so it is not affected
// by the arm64 dual stack issues mentioned below.
case kArchComment:
case kArchDeoptimize:
case kArchJmp:
case kArchLookupSwitch:
case kArchRet:
case kArchTableSwitch:
case kArchThrowTerminator:
return kNoOpcodeFlags;
case kArchTruncateDoubleToI:
......@@ -283,23 +296,13 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchCallCodeObject:
case kArchCallJSFunction:
case kArchCallWasmFunction:
return kHasSideEffect;
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject:
case kArchTailCallAddress:
case kArchTailCallWasm:
return kHasSideEffect | kIsBlockTerminator;
case kArchDeoptimize:
case kArchJmp:
case kArchLookupSwitch:
case kArchTableSwitch:
case kArchRet:
case kArchDebugAbort:
case kArchDebugBreak:
case kArchThrowTerminator:
return kIsBlockTerminator;
return kHasSideEffect;
case kArchStoreWithWriteBarrier:
return kHasSideEffect;
......@@ -362,14 +365,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
UNREACHABLE();
}
bool InstructionScheduler::IsBlockTerminator(const Instruction* instr) const {
return ((GetInstructionFlags(instr) & kIsBlockTerminator) ||
(instr->flags_mode() == kFlags_branch) ||
(instr->flags_mode() == kFlags_branch_and_poison));
}
void InstructionScheduler::ComputeTotalLatencies() {
for (ScheduleGraphNode* node : base::Reversed(graph_)) {
int max_latency = 0;
......
......@@ -16,12 +16,10 @@ namespace compiler {
// scheduler is aware of dependencies between instructions.
enum ArchOpcodeFlags {
kNoOpcodeFlags = 0,
kIsBlockTerminator = 1, // The instruction marks the end of a basic block
// e.g.: jump and return instructions.
kHasSideEffect = 2, // The instruction has some side effects (memory
// store, function call...)
kIsLoadOperation = 4, // The instruction is a memory load.
kMayNeedDeoptOrTrapCheck = 8, // The instruction may be associated with a
kHasSideEffect = 1, // The instruction has some side effects (memory
// store, function call...)
kIsLoadOperation = 2, // The instruction is a memory load.
kMayNeedDeoptOrTrapCheck = 4, // The instruction may be associated with a
// deopt or trap check which must be run before
// instruction e.g. div on Intel platform which
// will raise an exception when the divisor is
......@@ -36,6 +34,7 @@ class InstructionScheduler final : public ZoneObject {
void EndBlock(RpoNumber rpo);
void AddInstruction(Instruction* instr);
void AddTerminator(Instruction* instr);
static bool SchedulerSupported();
......@@ -153,9 +152,6 @@ class InstructionScheduler final : public ZoneObject {
int GetInstructionFlags(const Instruction* instr) const;
int GetTargetInstructionFlags(const Instruction* instr) const;
// Return true if the instruction is a basic block terminator.
bool IsBlockTerminator(const Instruction* instr) const;
// Check whether the given instruction has side effects (e.g. function call,
// memory store).
bool HasSideEffect(const Instruction* instr) const {
......@@ -208,6 +204,8 @@ class InstructionScheduler final : public ZoneObject {
InstructionSequence* sequence_;
ZoneVector<ScheduleGraphNode*> graph_;
friend class InstructionSchedulerTester;
// Last side effect instruction encountered while building the graph.
ScheduleGraphNode* last_side_effect_instr_;
......
......@@ -96,9 +96,13 @@ bool InstructionSelector::SelectInstructions() {
size_t start = instruction_block->code_start();
DCHECK_LE(end, start);
StartBlock(RpoNumber::FromInt(block->rpo_number()));
while (start-- > end) {
UpdateRenames(instructions_[start]);
AddInstruction(instructions_[start]);
if (end != start) {
while (start-- > end + 1) {
UpdateRenames(instructions_[start]);
AddInstruction(instructions_[start]);
}
UpdateRenames(instructions_[end]);
AddTerminator(instructions_[end]);
}
EndBlock(RpoNumber::FromInt(block->rpo_number()));
}
......@@ -127,6 +131,14 @@ void InstructionSelector::EndBlock(RpoNumber rpo) {
}
}
void InstructionSelector::AddTerminator(Instruction* instr) {
if (UseInstructionScheduling()) {
DCHECK_NOT_NULL(scheduler_);
scheduler_->AddTerminator(instr);
} else {
sequence()->AddInstruction(instr);
}
}
void InstructionSelector::AddInstruction(Instruction* instr) {
if (UseInstructionScheduling()) {
......
......@@ -282,6 +282,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void StartBlock(RpoNumber rpo);
void EndBlock(RpoNumber rpo);
void AddInstruction(Instruction* instr);
void AddTerminator(Instruction* instr);
// ===========================================================================
// ============= Architecture-independent code emission methods. =============
......
......@@ -58,6 +58,7 @@ v8_source_set("cctest_sources") {
"compiler/test-code-generator.cc",
"compiler/test-gap-resolver.cc",
"compiler/test-graph-visualizer.cc",
"compiler/test-instruction-scheduler.cc",
"compiler/test-instruction.cc",
"compiler/test-js-constant-cache.cc",
"compiler/test-js-context-specialization.cc",
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/instruction-scheduler.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/instruction.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
namespace compiler {
// Create InstructionBlocks with a single block.
InstructionBlocks* CreateSingleBlock(Zone* zone) {
InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1);
new (blocks) InstructionBlocks(1, nullptr, zone);
InstructionBlock* block = new (zone)
InstructionBlock(zone, RpoNumber::FromInt(0), RpoNumber::Invalid(),
RpoNumber::Invalid(), false, false);
block->set_ao_number(RpoNumber::FromInt(0));
(*blocks)[0] = block;
return blocks;
}
// Wrapper around the InstructionScheduler.
class InstructionSchedulerTester {
public:
InstructionSchedulerTester()
: scope_(),
blocks_(CreateSingleBlock(scope_.main_zone())),
sequence_(scope_.main_isolate(), scope_.main_zone(), blocks_),
scheduler_(scope_.main_zone(), &sequence_) {}
void StartBlock() { scheduler_.StartBlock(RpoNumber::FromInt(0)); }
void EndBlock() { scheduler_.EndBlock(RpoNumber::FromInt(0)); }
void AddInstruction(Instruction* instr) { scheduler_.AddInstruction(instr); }
void AddTerminator(Instruction* instr) { scheduler_.AddTerminator(instr); }
void CheckHasSideEffect(Instruction* instr) {
CHECK(scheduler_.HasSideEffect(instr));
}
void CheckIsDeopt(Instruction* instr) { CHECK(instr->IsDeoptimizeCall()); }
void CheckInSuccessors(Instruction* instr, Instruction* successor) {
InstructionScheduler::ScheduleGraphNode* node = GetNode(instr);
InstructionScheduler::ScheduleGraphNode* succ_node = GetNode(successor);
ZoneDeque<InstructionScheduler::ScheduleGraphNode*>& successors =
node->successors();
CHECK_NE(std::find(successors.begin(), successors.end(), succ_node),
successors.end());
}
Zone* zone() { return scope_.main_zone(); }
private:
InstructionScheduler::ScheduleGraphNode* GetNode(Instruction* instr) {
for (auto node : scheduler_.graph_) {
if (node->instruction() == instr) return node;
}
return nullptr;
}
HandleAndZoneScope scope_;
InstructionBlocks* blocks_;
InstructionSequence sequence_;
InstructionScheduler scheduler_;
};
TEST(DeoptInMiddleOfBasicBlock) {
InstructionSchedulerTester tester;
Zone* zone = tester.zone();
tester.StartBlock();
InstructionCode jmp_opcode = kArchJmp;
// Dummy node for FlagsContinuation::ForDeoptimize (which won't accept
// nullptr).
Node* node = Node::New(zone, 0, nullptr, 0, nullptr, false);
VectorSlotPair feedback;
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kEqual, DeoptimizeKind::kEager, DeoptimizeReason::kUnknown, feedback,
node, LoadPoisoning::kDoPoison);
jmp_opcode = cont.Encode(jmp_opcode);
Instruction* jmp_inst = Instruction::New(zone, jmp_opcode);
tester.CheckIsDeopt(jmp_inst);
tester.AddInstruction(jmp_inst);
Instruction* side_effect_inst = Instruction::New(zone, kArchPrepareTailCall);
tester.CheckHasSideEffect(side_effect_inst);
tester.AddInstruction(side_effect_inst);
Instruction* other_jmp_inst = Instruction::New(zone, jmp_opcode);
tester.CheckIsDeopt(other_jmp_inst);
tester.AddInstruction(other_jmp_inst);
Instruction* ret_inst = Instruction::New(zone, kArchRet);
tester.AddTerminator(ret_inst);
// Check that an instruction with a side effect is a successor of the deopt.
tester.CheckInSuccessors(jmp_inst, side_effect_inst);
// Check that the second deopt is a successor of the first deopt.
tester.CheckInSuccessors(jmp_inst, other_jmp_inst);
// Check that the second deopt is a successor of the side-effect instruction.
tester.CheckInSuccessors(side_effect_inst, other_jmp_inst);
// Check that the block terminator is a successor of all other instructions.
tester.CheckInSuccessors(jmp_inst, ret_inst);
tester.CheckInSuccessors(side_effect_inst, ret_inst);
tester.CheckInSuccessors(other_jmp_inst, ret_inst);
// Schedule block.
tester.EndBlock();
}
} // namespace compiler
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment