Commit ea97572d authored by JialuZhang-intel's avatar JialuZhang-intel Committed by V8 LUCI CQ

[turbofan] Improve jump threading phase

Let jump threading phase be able to handle the jump with gap moves instructions.
Record the first occurrence of the gap jump instruction and forward
the same gap jump instructions into the recorded one.

For example:
  In this case, we merge the second instruction into the first one,
  because those two gap jump instructions have the same gap moves.

    -- Before jump threading phase:
    B0:
    1. gap(rdx=rbx)
       ArchJmp imm:3
    B1:
    2. gap(rdx=rbx)
       ArchJmp imm:3

    -- After jump threading phase:
    B0:
    1. gap(rdx=rbx)
       ArchJmp imm:3
    B1:
    2. ArchNop

This can eliminate redundant jump and move instructions.

Design doc: https://docs.google.com/document/d/1SpO7Kw4e6CnCesFT118MUnCufUHZDy3QaVSymcci5jE/edit?usp=sharing

Change-Id: Ie94c8f63e2f758824619f6ed9513cbdff00186c4
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3858528Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Commit-Queue: Jialu Zhang <jialu.zhang@intel.com>
Cr-Commit-Position: refs/heads/main@{#83288}
parent f6a61372
......@@ -325,6 +325,20 @@ void ParallelMove::PrepareInsertAfter(
if (replacement != nullptr) move->set_source(replacement->source());
}
bool ParallelMove::Equals(const ParallelMove& that) const {
if (this->size() != that.size()) return false;
for (size_t i = 0; i < this->size(); ++i) {
if (!(*this)[i]->Equals(*that[i])) return false;
}
return true;
}
void ParallelMove::Eliminate() {
for (MoveOperands* move : *this) {
move->Eliminate();
}
}
Instruction::Instruction(InstructionCode opcode)
: opcode_(opcode),
bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
......
......@@ -775,6 +775,12 @@ class V8_EXPORT_PRIVATE MoveOperands final
// APIs to aid debugging. For general-stream APIs, use operator<<.
void Print() const;
bool Equals(const MoveOperands& that) const {
if (IsRedundant() && that.IsRedundant()) return true;
return source_.Equals(that.source_) &&
destination_.Equals(that.destination_);
}
private:
InstructionOperand source_;
InstructionOperand destination_;
......@@ -813,6 +819,11 @@ class V8_EXPORT_PRIVATE ParallelMove final
// to_eliminate must be Eliminated.
void PrepareInsertAfter(MoveOperands* move,
ZoneVector<MoveOperands*>* to_eliminate) const;
bool Equals(const ParallelMove& that) const;
// Eliminate all the MoveOperands in this ParallelMove.
void Eliminate();
};
std::ostream& operator<<(std::ostream&, const ParallelMove&);
......
......@@ -55,6 +55,72 @@ struct JumpThreadingState {
RpoNumber onstack() { return RpoNumber::FromInt(-2); }
};
struct GapJumpRecord {
GapJumpRecord(Zone* zone) : zone_(zone), gap_jump_records_(zone) {}
struct Record {
RpoNumber block;
Instruction* instr;
};
struct RpoNumberHash {
std::size_t operator()(const RpoNumber& key) const {
return std::hash<int>()(key.ToInt());
}
};
bool CanForwardGapJump(Instruction* instr, RpoNumber instr_block,
RpoNumber target_block, RpoNumber* forward_to) {
DCHECK_EQ(instr->arch_opcode(), kArchJmp);
bool can_forward = false;
auto search = gap_jump_records_.find(target_block);
if (search != gap_jump_records_.end()) {
for (Record& record : search->second) {
Instruction* record_instr = record.instr;
DCHECK_EQ(record_instr->arch_opcode(), kArchJmp);
bool is_same_instr = true;
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
Instruction::GapPosition pos =
static_cast<Instruction::GapPosition>(i);
ParallelMove* record_move = record_instr->GetParallelMove(pos);
ParallelMove* instr_move = instr->GetParallelMove(pos);
if (record_move == nullptr && instr_move == nullptr) continue;
if (((record_move == nullptr) != (instr_move == nullptr)) ||
!record_move->Equals(*instr_move)) {
is_same_instr = false;
break;
}
}
if (is_same_instr) {
// Found an instruction same as the recorded one.
*forward_to = record.block;
can_forward = true;
break;
}
}
if (!can_forward) {
// No recorded instruction has been found for this target block,
// so create a new record with the given instruction.
search->second.push_back({instr_block, instr});
}
} else {
// This is the first explored gap jump to target block.
auto ins =
gap_jump_records_.insert({target_block, ZoneVector<Record>(zone_)});
if (ins.second) {
ins.first->second.reserve(4);
ins.first->second.push_back({instr_block, instr});
}
}
return can_forward;
}
Zone* zone_;
ZoneUnorderedMap<RpoNumber, ZoneVector<Record>, RpoNumberHash>
gap_jump_records_;
};
} // namespace
bool JumpThreading::ComputeForwarding(Zone* local_zone,
......@@ -68,6 +134,7 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
int32_t empty_deconstruct_frame_return_size;
RpoNumber empty_no_deconstruct_frame_return_block = RpoNumber::Invalid();
int32_t empty_no_deconstruct_frame_return_size;
GapJumpRecord record(local_zone);
// Iterate over the blocks forward, pushing the blocks onto the stack.
for (auto const instruction_block : code->instruction_blocks()) {
......@@ -85,8 +152,24 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
for (int i = block->code_start(); i < block->code_end(); ++i) {
Instruction* instr = code->InstructionAt(i);
if (!instr->AreMovesRedundant()) {
// can't skip instructions with non redundant moves.
TRACE(" parallel move\n");
TRACE(" parallel move");
// can't skip instructions with non redundant moves, except when we
// can forward to a block with identical gap-moves.
if (instr->arch_opcode() == kArchJmp) {
TRACE(" jmp");
RpoNumber forward_to;
if ((frame_at_start || !(block->must_deconstruct_frame() ||
block->must_construct_frame())) &&
record.CanForwardGapJump(instr, block->rpo_number(),
code->InputRpo(instr, 0),
&forward_to)) {
DCHECK(forward_to.IsValid());
fw = forward_to;
TRACE("\n merge B%d into B%d", block->rpo_number().ToInt(),
forward_to.ToInt());
}
}
TRACE("\n");
fallthru = false;
} else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
// can't skip instructions with flags continuations.
......@@ -217,6 +300,16 @@ void JumpThreading::ApplyForwarding(Zone* local_zone,
// Overwrite a redundant jump with a nop.
TRACE("jt-fw nop @%d\n", i);
instr->OverwriteWithNop();
// Eliminate all the ParallelMoves.
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
Instruction::GapPosition pos =
static_cast<Instruction::GapPosition>(i);
ParallelMove* instr_move = instr->GetParallelMove(pos);
if (instr_move != nullptr) {
instr_move->Eliminate();
}
}
// If this block was marked as a handler, it can be unmarked now.
code->InstructionBlockAt(block_rpo)->UnmarkHandler();
}
......
......@@ -82,6 +82,21 @@ class TestCode : public HandleAndZoneScope {
AllocatedOperand(LocationOperand::REGISTER,
MachineRepresentation::kWord32, 11));
}
int JumpWithGapMove(int target, int id = 10) {
Start();
InstructionOperand ops[] = {UseRpo(target)};
sequence_.AddInstruction(Instruction::New(main_zone(), kArchJmp, 0, nullptr,
1, ops, 0, nullptr));
int index = static_cast<int>(sequence_.instructions().size()) - 1;
InstructionOperand from = AllocatedOperand(
LocationOperand::REGISTER, MachineRepresentation::kWord32, id);
InstructionOperand to = AllocatedOperand(
LocationOperand::REGISTER, MachineRepresentation::kWord32, id + 1);
AddGapMove(index, from, to);
End();
return index;
}
void Other() {
Start();
sequence_.AddInstruction(Instruction::New(main_zone(), 155));
......@@ -228,6 +243,45 @@ TEST(FwMoves2b) {
VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwMoves3a) {
constexpr size_t kBlockCount = 4;
TestCode code(kBlockCount);
// B0
code.JumpWithGapMove(3, 10);
// B1 (merge B1 into B0, because they have the same gap moves.)
code.JumpWithGapMove(3, 10);
// B2 (can not merge B2 into B0, because they have different gap moves.)
code.JumpWithGapMove(3, 11);
// B3
code.End();
static int expected[] = {0, 0, 2, 3};
VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwMoves3b) {
constexpr size_t kBlockCount = 7;
TestCode code(kBlockCount);
// B0
code.JumpWithGapMove(6);
// B1
code.Jump(2);
// B2
code.Jump(3);
// B3
code.JumpWithGapMove(6);
// B4
code.Jump(3);
// B5
code.Jump(2);
// B6
code.End();
static int expected[] = {0, 0, 0, 0, 0, 0, 6};
VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwOther2) {
constexpr size_t kBlockCount = 2;
......@@ -463,6 +517,35 @@ TEST(FwLoop3_1a) {
VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop4a) {
constexpr size_t kBlockCount = 2;
TestCode code(kBlockCount);
// B0
code.JumpWithGapMove(1);
// B1
code.JumpWithGapMove(0);
static int expected[] = {0, 1};
VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwLoop4b) {
constexpr size_t kBlockCount = 4;
TestCode code(kBlockCount);
// B0
code.Jump(3);
// B1
code.JumpWithGapMove(2);
// B2
code.Jump(0);
// B3
code.JumpWithGapMove(2);
static int expected[] = {3, 3, 3, 3};
VerifyForwarding(&code, kBlockCount, expected);
}
TEST(FwDiamonds) {
constexpr size_t kBlockCount = 4;
......@@ -925,6 +1008,61 @@ TEST(DifferentSizeRet) {
CheckRet(&code, j2);
}
TEST(RewireGapJump1) {
constexpr size_t kBlockCount = 4;
TestCode code(kBlockCount);
// B0
int j1 = code.JumpWithGapMove(3);
// B1
int j2 = code.JumpWithGapMove(3);
// B2
int j3 = code.JumpWithGapMove(3);
// B3
code.End();
int forward[] = {0, 0, 0, 3};
VerifyForwarding(&code, kBlockCount, forward);
ApplyForwarding(&code, kBlockCount, forward);
CheckJump(&code, j1, 3);
CheckNop(&code, j2);
CheckNop(&code, j3);
static int assembly[] = {0, 1, 1, 1};
CheckAssemblyOrder(&code, kBlockCount, assembly);
}
TEST(RewireGapJump2) {
constexpr size_t kBlockCount = 6;
TestCode code(kBlockCount);
// B0
int j1 = code.JumpWithGapMove(4);
// B1
int j2 = code.JumpWithGapMove(4);
// B2
code.Other();
int j3 = code.Jump(3);
// B3
int j4 = code.Jump(1);
// B4
int j5 = code.Jump(5);
// B5
code.End();
int forward[] = {0, 0, 2, 0, 5, 5};
VerifyForwarding(&code, kBlockCount, forward);
ApplyForwarding(&code, kBlockCount, forward);
CheckJump(&code, j1, 5);
CheckNop(&code, j2);
CheckJump(&code, j3, 0);
CheckNop(&code, j4);
CheckNop(&code, j5);
static int assembly[] = {0, 1, 1, 2, 2, 2};
CheckAssemblyOrder(&code, kBlockCount, assembly);
}
} // namespace compiler
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment