Commit d735f3ab authored by dusan.simicic's avatar dusan.simicic Committed by Commit bot

MIPS: Fix trampoline emission after switch table generation

Trampolines are generated when the value of pc_offset is greater than
next_buffer_check_ (attribute from Assembler class). This value
shouldn't be incremented in bind_to() method when internal reference
label is bound, because it is not decremented when the switch table is
generated (dd() method from Assemler class).

This patch fixes this problem. Regression test are also included for
mips and mips64 arch.

BUG=

Review-Url: https://codereview.chromium.org/2530143002
Cr-Commit-Position: refs/heads/master@{#41423}
parent 39d289f5
......@@ -895,8 +895,7 @@ void Assembler::print(Label* L) {
} else {
PrintF("%d\n", instr);
}
next(&l, internal_reference_positions_.find(l.pos()) !=
internal_reference_positions_.end());
next(&l, is_internal_reference(&l));
}
} else {
PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
......@@ -910,14 +909,15 @@ void Assembler::bind_to(Label* L, int pos) {
bool is_internal = false;
if (L->is_linked() && !trampoline_emitted_) {
unbound_labels_count_--;
next_buffer_check_ += kTrampolineSlotsSize;
if (!is_internal_reference(L)) {
next_buffer_check_ += kTrampolineSlotsSize;
}
}
while (L->is_linked()) {
int32_t fixup_pos = L->pos();
int32_t dist = pos - fixup_pos;
is_internal = internal_reference_positions_.find(fixup_pos) !=
internal_reference_positions_.end();
is_internal = is_internal_reference(L);
next(L, is_internal); // Call next before overwriting link with target at
// fixup_pos.
Instr instr = instr_at(fixup_pos);
......@@ -934,7 +934,6 @@ void Assembler::bind_to(Label* L, int pos) {
CHECK((trampoline_pos - fixup_pos) <= branch_offset);
target_at_put(fixup_pos, trampoline_pos, false);
fixup_pos = trampoline_pos;
dist = pos - fixup_pos;
}
target_at_put(fixup_pos, pos, false);
} else {
......
......@@ -1452,6 +1452,10 @@ class Assembler : public AssemblerBase {
// Internal reference positions, required for unbounded internal reference
// labels.
std::set<int> internal_reference_positions_;
bool is_internal_reference(Label* L) {
return internal_reference_positions_.find(L->pos()) !=
internal_reference_positions_.end();
}
void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
......
......@@ -859,8 +859,7 @@ void Assembler::print(Label* L) {
} else {
PrintF("%d\n", instr);
}
next(&l, internal_reference_positions_.find(l.pos()) !=
internal_reference_positions_.end());
next(&l, is_internal_reference(&l));
}
} else {
PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
......@@ -874,14 +873,15 @@ void Assembler::bind_to(Label* L, int pos) {
bool is_internal = false;
if (L->is_linked() && !trampoline_emitted_) {
unbound_labels_count_--;
next_buffer_check_ += kTrampolineSlotsSize;
if (!is_internal_reference(L)) {
next_buffer_check_ += kTrampolineSlotsSize;
}
}
while (L->is_linked()) {
int fixup_pos = L->pos();
int dist = pos - fixup_pos;
is_internal = internal_reference_positions_.find(fixup_pos) !=
internal_reference_positions_.end();
is_internal = is_internal_reference(L);
next(L, is_internal); // Call next before overwriting link with target at
// fixup_pos.
Instr instr = instr_at(fixup_pos);
......@@ -898,7 +898,6 @@ void Assembler::bind_to(Label* L, int pos) {
CHECK((trampoline_pos - fixup_pos) <= branch_offset);
target_at_put(fixup_pos, trampoline_pos, false);
fixup_pos = trampoline_pos;
dist = pos - fixup_pos;
}
target_at_put(fixup_pos, pos, false);
} else {
......
......@@ -1503,6 +1503,10 @@ class Assembler : public AssemblerBase {
// Internal reference positions, required for unbounded internal reference
// labels.
std::set<int64_t> internal_reference_positions_;
bool is_internal_reference(Label* L) {
return internal_reference_positions_.find(L->pos()) !=
internal_reference_positions_.end();
}
void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
......
......@@ -282,6 +282,90 @@ TEST(jump_tables5) {
}
}
TEST(jump_tables6) {
// Similar to test-assembler-mips jump_tables1, with extra test for branch
// trampoline required after emission of the dd table (where trampolines are
// blocked). This test checks if number of really generated instructions is
// greater than number of counted instructions from code, as we are expecting
// generation of trampoline in this case (when number of kFillInstr
// instructions is close to 32K)
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
MacroAssembler assembler(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
const int kNumCases = 40;
const int kFillInstr = 32551;
const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
const int kTrampolineSlotsSize = 4 * Instruction::kInstrSize;
const int kMaxOffsetForTrampolineStart =
kMaxBranchOffset - 16 * kTrampolineSlotsSize;
int values[kNumCases];
isolate->random_number_generator()->NextBytes(values, sizeof(values));
Label labels[kNumCases];
Label near_start, end, done;
__ Push(ra);
__ mov(v0, zero_reg);
int offs1 = masm->pc_offset();
int gen_insn = 0;
__ Branch(&end);
gen_insn += 2;
__ bind(&near_start);
// Generate slightly less than 32K instructions, which will soon require
// trampoline for branch distance fixup.
for (int i = 0; i < kFillInstr; ++i) {
__ addiu(v0, v0, 1);
}
gen_insn += kFillInstr;
__ GenerateSwitchTable(a0, kNumCases,
[&labels](size_t i) { return labels + i; });
gen_insn += (10 + kNumCases);
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
__ li(v0, values[i]);
__ Branch(&done);
}
gen_insn += (4 * kNumCases);
// If offset from here to first branch instr is greater than max allowed
// offset for trampoline ...
CHECK_LT(kMaxOffsetForTrampolineStart, masm->pc_offset() - offs1);
// ... number of generated instructions must be greater then "gen_insn",
// as we are expecting trampoline generation
CHECK_LT(gen_insn, (masm->pc_offset() - offs1) / Instruction::kInstrSize);
__ bind(&done);
__ Pop(ra);
__ jr(ra);
__ nop();
__ bind(&end);
__ Branch(&near_start);
CodeDesc desc;
masm->GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
for (int i = 0; i < kNumCases; ++i) {
int res =
reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
::printf("f(%d) = %d\n", i, res);
CHECK_EQ(values[i], res);
}
}
static uint32_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
Isolate* isolate = CcTest::i_isolate();
......
......@@ -350,6 +350,90 @@ TEST(jump_tables5) {
}
}
TEST(jump_tables6) {
// Similar to test-assembler-mips jump_tables1, with extra test for branch
// trampoline required after emission of the dd table (where trampolines are
// blocked). This test checks if number of really generated instructions is
// greater than number of counted instructions from code, as we are expecting
// generation of trampoline in this case (when number of kFillInstr
// instructions is close to 32K)
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
MacroAssembler assembler(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
const int kNumCases = 40;
const int kFillInstr = 32551;
const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
const int kTrampolineSlotsSize = 2 * Instruction::kInstrSize;
const int kMaxOffsetForTrampolineStart =
kMaxBranchOffset - 16 * kTrampolineSlotsSize;
int values[kNumCases];
isolate->random_number_generator()->NextBytes(values, sizeof(values));
Label labels[kNumCases];
Label near_start, end, done;
__ Push(ra);
__ mov(v0, zero_reg);
int offs1 = masm->pc_offset();
int gen_insn = 0;
__ Branch(&end);
gen_insn += 2;
__ bind(&near_start);
// Generate slightly less than 32K instructions, which will soon require
// trampoline for branch distance fixup.
for (int i = 0; i < kFillInstr; ++i) {
__ addiu(v0, v0, 1);
}
gen_insn += kFillInstr;
__ GenerateSwitchTable(a0, kNumCases,
[&labels](size_t i) { return labels + i; });
gen_insn += (11 + 2 * kNumCases);
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
__ li(v0, values[i]);
__ Branch(&done);
}
gen_insn += (4 * kNumCases);
// If offset from here to first branch instr is greater than max allowed
// offset for trampoline ...
CHECK_LT(kMaxOffsetForTrampolineStart, masm->pc_offset() - offs1);
// ... number of generated instructions must be greater then "gen_insn",
// as we are expecting trampoline generation
CHECK_LT(gen_insn, (masm->pc_offset() - offs1) / Instruction::kInstrSize);
__ bind(&done);
__ Pop(ra);
__ jr(ra);
__ nop();
__ bind(&end);
__ Branch(&near_start);
CodeDesc desc;
masm->GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
for (int i = 0; i < kNumCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(
CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
CHECK_EQ(values[i], res);
}
}
static uint64_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
Isolate* isolate = CcTest::i_isolate();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment