Commit a3a583db authored by epertoso's avatar epertoso Committed by Commit bot

[turbofan] Re-enables the jump table emission in the mips instruction selector.

Changes MacroAssembler::GenerateSwitchTable to make sure that 'ra' is properly restored.

BUG=

Review URL: https://codereview.chromium.org/1761863002

Cr-Commit-Position: refs/heads/master@{#34460}
parent 2689548e
...@@ -1216,27 +1216,23 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { ...@@ -1216,27 +1216,23 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
MipsOperandGenerator g(this); MipsOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// TODO(mips): TableSwitch is broken, as it messes with ra without saving it // Emit either ArchTableSwitch or ArchLookupSwitch.
// properly (which breaks with frame elision, i.e. inside stubs). size_t table_space_cost = 9 + sw.value_range;
if (false) { size_t table_time_cost = 3;
// Emit either ArchTableSwitch or ArchLookupSwitch. size_t lookup_space_cost = 2 + 2 * sw.case_count;
size_t table_space_cost = 9 + sw.value_range; size_t lookup_time_cost = sw.case_count;
size_t table_time_cost = 3; if (sw.case_count > 0 &&
size_t lookup_space_cost = 2 + 2 * sw.case_count; table_space_cost + 3 * table_time_cost <=
size_t lookup_time_cost = sw.case_count; lookup_space_cost + 3 * lookup_time_cost &&
if (sw.case_count > 0 && sw.min_value > std::numeric_limits<int32_t>::min()) {
table_space_cost + 3 * table_time_cost <= InstructionOperand index_operand = value_operand;
lookup_space_cost + 3 * lookup_time_cost && if (sw.min_value) {
sw.min_value > std::numeric_limits<int32_t>::min()) { index_operand = g.TempRegister();
InstructionOperand index_operand = value_operand; Emit(kMipsSub, index_operand, value_operand,
if (sw.min_value) { g.TempImmediate(sw.min_value));
index_operand = g.TempRegister();
Emit(kMipsSub, index_operand, value_operand,
g.TempImmediate(sw.min_value));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a sequence of conditional jumps. // Generate a sequence of conditional jumps.
......
...@@ -1711,27 +1711,23 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { ...@@ -1711,27 +1711,23 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
Mips64OperandGenerator g(this); Mips64OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// TODO(mips): TableSwitch is broken, as it messes with ra without saving it // Emit either ArchTableSwitch or ArchLookupSwitch.
// properly (which breaks with frame elision, i.e. inside stubs). size_t table_space_cost = 10 + 2 * sw.value_range;
if (false) { size_t table_time_cost = 3;
// Emit either ArchTableSwitch or ArchLookupSwitch. size_t lookup_space_cost = 2 + 2 * sw.case_count;
size_t table_space_cost = 10 + 2 * sw.value_range; size_t lookup_time_cost = sw.case_count;
size_t table_time_cost = 3; if (sw.case_count > 0 &&
size_t lookup_space_cost = 2 + 2 * sw.case_count; table_space_cost + 3 * table_time_cost <=
size_t lookup_time_cost = sw.case_count; lookup_space_cost + 3 * lookup_time_cost &&
if (sw.case_count > 0 && sw.min_value > std::numeric_limits<int32_t>::min()) {
table_space_cost + 3 * table_time_cost <= InstructionOperand index_operand = value_operand;
lookup_space_cost + 3 * lookup_time_cost && if (sw.min_value) {
sw.min_value > std::numeric_limits<int32_t>::min()) { index_operand = g.TempRegister();
InstructionOperand index_operand = value_operand; Emit(kMips64Sub, index_operand, value_operand,
if (sw.min_value) { g.TempImmediate(sw.min_value));
index_operand = g.TempRegister();
Emit(kMips64Sub, index_operand, value_operand,
g.TempImmediate(sw.min_value));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
} }
// Generate a sequence of conditional jumps. // Generate a sequence of conditional jumps.
......
...@@ -1778,12 +1778,14 @@ void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count, ...@@ -1778,12 +1778,14 @@ void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
lw(at, MemOperand(at)); lw(at, MemOperand(at));
} else { } else {
Label here; Label here;
BlockTrampolinePoolFor(case_count + 6); BlockTrampolinePoolFor(case_count + 10);
push(ra);
bal(&here); bal(&here);
sll(at, index, kPointerSizeLog2); // Branch delay slot. sll(at, index, kPointerSizeLog2); // Branch delay slot.
bind(&here); bind(&here);
addu(at, at, ra); addu(at, at, ra);
lw(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize)); pop(ra);
lw(at, MemOperand(at, 6 * v8::internal::Assembler::kInstrSize));
} }
jr(at); jr(at);
nop(); // Branch delay slot nop. nop(); // Branch delay slot nop.
......
...@@ -1941,13 +1941,15 @@ void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count, ...@@ -1941,13 +1941,15 @@ void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
ld(at, MemOperand(at)); ld(at, MemOperand(at));
} else { } else {
Label here; Label here;
BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 7); BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 11);
Align(8); Align(8);
push(ra);
bal(&here); bal(&here);
dsll(at, index, kPointerSizeLog2); // Branch delay slot. dsll(at, index, kPointerSizeLog2); // Branch delay slot.
bind(&here); bind(&here);
daddu(at, at, ra); daddu(at, at, ra);
ld(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize)); pop(ra);
ld(at, MemOperand(at, 6 * v8::internal::Assembler::kInstrSize));
} }
jr(at); jr(at);
nop(); // Branch delay slot nop. nop(); // Branch delay slot nop.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment