Commit 5ba6f2b0 authored by Predrag Rudic's avatar Predrag Rudic Committed by Commit Bot

MIPS[64] Make BranchLong PIC

In order to enable PIC code in builtins we need to have BranchLong
position independent.

Change-Id: I374134ff540b515f3cf385a8b936487b47c55762
Reviewed-on: https://chromium-review.googlesource.com/1152810Reviewed-by: 's avatarIvica Bogosavljevic <ibogosavljevic@wavecomp.com>
Reviewed-by: 's avatarSreten Kovacevic <skovacevic@wavecomp.com>
Commit-Queue: Ivica Bogosavljevic <ibogosavljevic@wavecomp.com>
Cr-Commit-Position: refs/heads/master@{#54901}
parent 3656b465
...@@ -554,6 +554,12 @@ bool Assembler::IsBc(Instr instr) { ...@@ -554,6 +554,12 @@ bool Assembler::IsBc(Instr instr) {
return opcode == BC || opcode == BALC; return opcode == BC || opcode == BALC;
} }
bool Assembler::IsBal(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
uint32_t rt_field = GetRtField(instr);
uint32_t rs_field = GetRsField(instr);
return opcode == REGIMM && rt_field == BGEZAL && rs_field == 0;
}
bool Assembler::IsBzc(Instr instr) { bool Assembler::IsBzc(Instr instr) {
uint32_t opcode = GetOpcodeField(instr); uint32_t opcode = GetOpcodeField(instr);
...@@ -850,9 +856,37 @@ int Assembler::target_at(int pos, bool is_internal) { ...@@ -850,9 +856,37 @@ int Assembler::target_at(int pos, bool is_internal) {
} }
} }
// Check we have a branch or jump instruction. // Check we have a branch or jump instruction.
DCHECK(IsBranch(instr) || IsLui(instr)); DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra));
if (IsBranch(instr)) { if (IsBranch(instr)) {
return AddBranchOffset(pos, instr); return AddBranchOffset(pos, instr);
} else if (IsMov(instr, t8, ra)) {
int32_t imm32;
Instr instr_lui = instr_at(pos + 2 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 3 * Assembler::kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
if (imm32 == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
}
return pos + Assembler::kLongBranchPCOffset + imm32;
} else {
DCHECK(IsLui(instr));
if (IsBal(instr_at(pos + Assembler::kInstrSize))) {
int32_t imm32;
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 2 * Assembler::kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
if (imm32 == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
}
return pos + Assembler::kLongBranchPCOffset + imm32;
} else { } else {
Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize); Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize); Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
...@@ -875,6 +909,7 @@ int Assembler::target_at(int pos, bool is_internal) { ...@@ -875,6 +909,7 @@ int Assembler::target_at(int pos, bool is_internal) {
return pos - delta; return pos - delta;
} }
} }
}
return 0; return 0;
} }
...@@ -916,8 +951,8 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos, ...@@ -916,8 +951,8 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr = SetBranchOffset(pos, target_pos, instr); instr = SetBranchOffset(pos, target_pos, instr);
instr_at_put(pos, instr); instr_at_put(pos, instr);
} else if (IsMov(instr, t8, ra)) { } else if (IsMov(instr, t8, ra)) {
Instr instr_lui = instr_at(pos + 4 * Assembler::kInstrSize); Instr instr_lui = instr_at(pos + 2 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 5 * Assembler::kInstrSize); Instr instr_ori = instr_at(pos + 3 * Assembler::kInstrSize);
DCHECK(IsLui(instr_lui)); DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori)); DCHECK(IsOri(instr_ori));
...@@ -938,11 +973,28 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos, ...@@ -938,11 +973,28 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr_lui &= ~kImm16Mask; instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask; instr_ori &= ~kImm16Mask;
instr_at_put(pos + 4 * Assembler::kInstrSize, instr_at_put(pos + 2 * Assembler::kInstrSize,
instr_lui | ((imm >> 16) & kImm16Mask)); instr_lui | ((imm >> kLuiShift) & kImm16Mask));
instr_at_put(pos + 5 * Assembler::kInstrSize, instr_at_put(pos + 3 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask)); instr_ori | (imm & kImm16Mask));
} }
} else {
DCHECK(IsLui(instr));
if (IsBal(instr_at(pos + Assembler::kInstrSize))) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 2 * Assembler::kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
DCHECK_EQ(imm & 3, 0);
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
instr_at_put(pos + 0 * Assembler::kInstrSize,
instr_lui | ((imm >> 16) & kImm16Mask));
instr_at_put(pos + 2 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
} else { } else {
Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize); Instr instr1 = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize); Instr instr2 = instr_at(pos + 1 * Assembler::kInstrSize);
...@@ -965,6 +1017,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos, ...@@ -965,6 +1017,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr2 | (imm & kImm16Mask)); instr2 | (imm & kImm16Mask));
} }
} }
}
} }
void Assembler::print(const Label* L) { void Assembler::print(const Label* L) {
...@@ -1421,6 +1474,28 @@ uint32_t Assembler::jump_address(Label* L) { ...@@ -1421,6 +1474,28 @@ uint32_t Assembler::jump_address(Label* L) {
return imm; return imm;
} }
uint32_t Assembler::branch_long_offset(Label* L) {
int32_t target_pos;
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos(); // L's link.
L->link_to(pc_offset());
} else {
L->link_to(pc_offset());
return kEndOfJumpChain;
}
}
DCHECK(is_int32(static_cast<int64_t>(target_pos) -
static_cast<int64_t>(pc_offset() + kLongBranchPCOffset)));
int32_t offset = target_pos - (pc_offset() + kLongBranchPCOffset);
DCHECK_EQ(offset & 3, 0);
return offset;
}
int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) { int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
int32_t target_pos; int32_t target_pos;
...@@ -2228,7 +2303,7 @@ void Assembler::sc(Register rd, const MemOperand& rs) { ...@@ -2228,7 +2303,7 @@ void Assembler::sc(Register rd, const MemOperand& rs) {
} }
void Assembler::lui(Register rd, int32_t j) { void Assembler::lui(Register rd, int32_t j) {
DCHECK(is_uint16(j)); DCHECK(is_uint16(j) || is_int16(j));
GenInstrImmediate(LUI, zero_reg, rd, j); GenInstrImmediate(LUI, zero_reg, rd, j);
} }
...@@ -3832,10 +3907,6 @@ void Assembler::CheckTrampolinePool() { ...@@ -3832,10 +3907,6 @@ void Assembler::CheckTrampolinePool() {
int pool_start = pc_offset(); int pool_start = pc_offset();
for (int i = 0; i < unbound_labels_count_; i++) { for (int i = 0; i < unbound_labels_count_; i++) {
{ {
// printf("Generate trampoline %d\n", i);
// Buffer growth (and relocation) must be blocked for internal
// references until associated instructions are emitted and
// available to be patched.
if (IsMipsArchVariant(kMips32r6)) { if (IsMipsArchVariant(kMips32r6)) {
bc(&after_pool); bc(&after_pool);
nop(); nop();
...@@ -3843,20 +3914,15 @@ void Assembler::CheckTrampolinePool() { ...@@ -3843,20 +3914,15 @@ void Assembler::CheckTrampolinePool() {
Label find_pc; Label find_pc;
or_(t8, ra, zero_reg); or_(t8, ra, zero_reg);
bal(&find_pc); bal(&find_pc);
or_(t9, ra, zero_reg); lui(t9, 0);
bind(&find_pc); bind(&find_pc);
or_(ra, t8, zero_reg); ori(t9, t9, 0);
lui(t8, 0); addu(t9, ra, t9);
ori(t8, t8, 0);
addu(t9, t9, t8);
// Instruction jr will take or_ from the next trampoline.
// in its branch delay slot. This is the expected behavior
// in order to decrease size of trampoline pool.
jr(t9); jr(t9);
or_(ra, t8, zero_reg); // Branch delay slot.
} }
} }
} }
nop();
bind(&after_pool); bind(&after_pool);
trampoline_ = Trampoline(pool_start, unbound_labels_count_); trampoline_ = Trampoline(pool_start, unbound_labels_count_);
......
...@@ -556,6 +556,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -556,6 +556,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
return branch_offset26(L) >> 2; return branch_offset26(L) >> 2;
} }
uint32_t jump_address(Label* L); uint32_t jump_address(Label* L);
uint32_t branch_long_offset(Label* L);
// Puts a labels target address at the given position. // Puts a labels target address at the given position.
// The high 8 bits are set to zero. // The high 8 bits are set to zero.
...@@ -609,12 +610,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -609,12 +610,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kInstrSize = sizeof(Instr); static constexpr int kInstrSize = sizeof(Instr);
// Difference between address of current opcode and target address offset. // Difference between address of current opcode and target address offset.
static constexpr int kBranchPCOffset = 4; static constexpr int kBranchPCOffset = kInstrSize;
// Difference between address of current opcode and target address offset, // Difference between address of current opcode and target address offset,
// when we are generatinga sequence of instructions for long relative PC // when we are generatinga sequence of instructions for long relative PC
// branches // branches
static constexpr int kLongBranchPCOffset = 12; static constexpr int kLongBranchPCOffset = 3 * kInstrSize;
// Here we are patching the address in the LUI/ORI instruction pair. // Here we are patching the address in the LUI/ORI instruction pair.
// These values are used in the serialization process and must be zero for // These values are used in the serialization process and must be zero for
...@@ -649,7 +650,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -649,7 +650,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1; static constexpr int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
static constexpr int kTrampolineSlotsSize = static constexpr int kTrampolineSlotsSize =
IsMipsArchVariant(kMips32r6) ? 2 * kInstrSize : 8 * kInstrSize; IsMipsArchVariant(kMips32r6) ? 2 * kInstrSize : 7 * kInstrSize;
RegList* GetScratchRegisterList() { return &scratch_register_list_; } RegList* GetScratchRegisterList() { return &scratch_register_list_; }
...@@ -1758,6 +1759,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -1758,6 +1759,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsBranch(Instr instr); static bool IsBranch(Instr instr);
static bool IsMsaBranch(Instr instr); static bool IsMsaBranch(Instr instr);
static bool IsBc(Instr instr); static bool IsBc(Instr instr);
static bool IsBal(Instr instr);
static bool IsBzc(Instr instr); static bool IsBzc(Instr instr);
static bool IsBeq(Instr instr); static bool IsBeq(Instr instr);
static bool IsBne(Instr instr); static bool IsBne(Instr instr);
......
...@@ -3914,41 +3914,23 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) { ...@@ -3914,41 +3914,23 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
(!L->is_bound() || is_near_r6(L))) { (!L->is_bound() || is_near_r6(L))) {
BranchShortHelperR6(0, L); BranchShortHelperR6(0, L);
} else { } else {
// Generate position independent long branch.
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
uint32_t imm32; Label find_pc;
imm32 = jump_address(L); int32_t imm32;
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { imm32 = branch_long_offset(L);
uint32_t lui_offset, jic_offset; or_(t8, ra, zero_reg);
UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset); bal(&find_pc);
{ lui(t9, (imm32 & kHiMask) >> kLuiShift);
BlockGrowBufferScope block_buf_growth(this); bind(&find_pc);
// Buffer growth (and relocation) must be blocked for internal ori(t9, t9, (imm32 & kImm16Mask));
// references until associated instructions are emitted and addu(t9, ra, t9);
// available to be patched. if (bdslot == USE_DELAY_SLOT) {
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); or_(ra, t8, zero_reg);
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
lui(scratch, lui_offset);
jic(scratch, jic_offset);
}
CheckBuffer();
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
// references until associated instructions are emitted and
// available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
lui(scratch, (imm32 & kHiMask) >> kLuiShift);
ori(scratch, scratch, (imm32 & kImm16Mask));
}
CheckBuffer();
jr(scratch);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT) nop();
} }
jr(t9);
// Emit a or_ in the branch delay slot if it's protected.
if (bdslot == PROTECT) or_(ra, t8, zero_reg);
} }
} }
...@@ -3957,42 +3939,20 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) { ...@@ -3957,42 +3939,20 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
(!L->is_bound() || is_near_r6(L))) { (!L->is_bound() || is_near_r6(L))) {
BranchAndLinkShortHelperR6(0, L); BranchAndLinkShortHelperR6(0, L);
} else { } else {
// Generate position independent long branch and link.
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
uint32_t imm32; Label find_pc;
imm32 = jump_address(L); int32_t imm32;
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { imm32 = branch_long_offset(L);
uint32_t lui_offset, jialc_offset; lui(t8, (imm32 & kHiMask) >> kLuiShift);
UnpackTargetAddressUnsigned(imm32, lui_offset, jialc_offset); bal(&find_pc);
{ ori(t8, t8, (imm32 & kImm16Mask));
BlockGrowBufferScope block_buf_growth(this); bind(&find_pc);
// Buffer growth (and relocation) must be blocked for internal addu(t8, ra, t8);
// references until associated instructions are emitted and jalr(t8);
// available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
lui(scratch, lui_offset);
jialc(scratch, jialc_offset);
}
CheckBuffer();
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
// references until associated instructions are emitted and
// available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
lui(scratch, (imm32 & kHiMask) >> kLuiShift);
ori(scratch, scratch, (imm32 & kImm16Mask));
}
CheckBuffer();
jalr(scratch);
// Emit a nop in the branch delay slot if required. // Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT) nop(); if (bdslot == PROTECT) nop();
} }
}
} }
void TurboAssembler::DropAndRet(int drop) { void TurboAssembler::DropAndRet(int drop) {
......
...@@ -533,6 +533,12 @@ bool Assembler::IsBc(Instr instr) { ...@@ -533,6 +533,12 @@ bool Assembler::IsBc(Instr instr) {
return opcode == BC || opcode == BALC; return opcode == BC || opcode == BALC;
} }
bool Assembler::IsBal(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
uint32_t rt_field = GetRtField(instr);
uint32_t rs_field = GetRsField(instr);
return opcode == REGIMM && rt_field == BGEZAL && rs_field == 0;
}
bool Assembler::IsBzc(Instr instr) { bool Assembler::IsBzc(Instr instr) {
uint32_t opcode = GetOpcodeField(instr); uint32_t opcode = GetOpcodeField(instr);
...@@ -781,12 +787,40 @@ int Assembler::target_at(int pos, bool is_internal) { ...@@ -781,12 +787,40 @@ int Assembler::target_at(int pos, bool is_internal) {
} }
} }
// Check we have a branch or jump instruction. // Check we have a branch or jump instruction.
DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr)); DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr) ||
IsMov(instr, t8, ra));
// Do NOT change this to <<2. We rely on arithmetic shifts here, assuming // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
// the compiler uses arithmetic shifts for signed integers. // the compiler uses arithmetic shifts for signed integers.
if (IsBranch(instr)) { if (IsBranch(instr)) {
return AddBranchOffset(pos, instr); return AddBranchOffset(pos, instr);
} else if (IsMov(instr, t8, ra)) {
int32_t imm32;
Instr instr_lui = instr_at(pos + 2 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 3 * Assembler::kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
if (imm32 == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
}
return pos + Assembler::kLongBranchPCOffset + imm32;
} else if (IsLui(instr)) { } else if (IsLui(instr)) {
if (IsBal(instr_at(pos + Assembler::kInstrSize))) {
int32_t imm32;
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 2 * Assembler::kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
imm32 = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
imm32 |= (instr_ori & static_cast<int32_t>(kImm16Mask));
if (imm32 == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
}
return pos + Assembler::kLongBranchPCOffset + imm32;
} else {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize); Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
...@@ -810,6 +844,7 @@ int Assembler::target_at(int pos, bool is_internal) { ...@@ -810,6 +844,7 @@ int Assembler::target_at(int pos, bool is_internal) {
DCHECK(pos > delta); DCHECK(pos > delta);
return pos - delta; return pos - delta;
} }
}
} else { } else {
DCHECK(IsJ(instr) || IsJal(instr)); DCHECK(IsJ(instr) || IsJal(instr));
int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2; int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
...@@ -859,6 +894,22 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) { ...@@ -859,6 +894,22 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr = SetBranchOffset(pos, target_pos, instr); instr = SetBranchOffset(pos, target_pos, instr);
instr_at_put(pos, instr); instr_at_put(pos, instr);
} else if (IsLui(instr)) { } else if (IsLui(instr)) {
if (IsBal(instr_at(pos + Assembler::kInstrSize))) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 2 * Assembler::kInstrSize);
DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori));
int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset);
DCHECK_EQ(imm & 3, 0);
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
instr_at_put(pos + 0 * Assembler::kInstrSize,
instr_lui | ((imm >> kLuiShift) & kImm16Mask));
instr_at_put(pos + 2 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
} else {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize); Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
...@@ -878,9 +929,10 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) { ...@@ -878,9 +929,10 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr_ori | ((imm >> 16) & kImm16Mask)); instr_ori | ((imm >> 16) & kImm16Mask));
instr_at_put(pos + 3 * Assembler::kInstrSize, instr_at_put(pos + 3 * Assembler::kInstrSize,
instr_ori2 | (imm & kImm16Mask)); instr_ori2 | (imm & kImm16Mask));
}
} else if (IsMov(instr, t8, ra)) { } else if (IsMov(instr, t8, ra)) {
Instr instr_lui = instr_at(pos + 4 * Assembler::kInstrSize); Instr instr_lui = instr_at(pos + 2 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 5 * Assembler::kInstrSize); Instr instr_ori = instr_at(pos + 3 * Assembler::kInstrSize);
DCHECK(IsLui(instr_lui)); DCHECK(IsLui(instr_lui));
DCHECK(IsOri(instr_ori)); DCHECK(IsOri(instr_ori));
...@@ -901,9 +953,9 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) { ...@@ -901,9 +953,9 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
instr_lui &= ~kImm16Mask; instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask; instr_ori &= ~kImm16Mask;
instr_at_put(pos + 4 * Assembler::kInstrSize, instr_at_put(pos + 2 * Assembler::kInstrSize,
instr_lui | ((imm >> 16) & kImm16Mask)); instr_lui | ((imm >> kLuiShift) & kImm16Mask));
instr_at_put(pos + 5 * Assembler::kInstrSize, instr_at_put(pos + 3 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask)); instr_ori | (imm & kImm16Mask));
} }
} else if (IsJ(instr) || IsJal(instr)) { } else if (IsJ(instr) || IsJal(instr)) {
...@@ -989,7 +1041,7 @@ void Assembler::bind_to(Label* L, int pos) { ...@@ -989,7 +1041,7 @@ void Assembler::bind_to(Label* L, int pos) {
target_at_put(fixup_pos, pos, false); target_at_put(fixup_pos, pos, false);
} else { } else {
DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) || DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
IsEmittedConstant(instr)); IsEmittedConstant(instr) || IsMov(instr, t8, ra));
target_at_put(fixup_pos, pos, false); target_at_put(fixup_pos, pos, false);
} }
} }
...@@ -1405,6 +1457,25 @@ uint64_t Assembler::jump_offset(Label* L) { ...@@ -1405,6 +1457,25 @@ uint64_t Assembler::jump_offset(Label* L) {
return static_cast<uint64_t>(imm); return static_cast<uint64_t>(imm);
} }
uint64_t Assembler::branch_long_offset(Label* L) {
int64_t target_pos;
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos(); // L's link.
L->link_to(pc_offset());
} else {
L->link_to(pc_offset());
return kEndOfJumpChain;
}
}
int64_t offset = target_pos - (pc_offset() + kLongBranchPCOffset);
DCHECK_EQ(offset & 3, 0);
return static_cast<uint64_t>(offset);
}
int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) { int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
int32_t target_pos; int32_t target_pos;
...@@ -2404,7 +2475,7 @@ void Assembler::scd(Register rd, const MemOperand& rs) { ...@@ -2404,7 +2475,7 @@ void Assembler::scd(Register rd, const MemOperand& rs) {
} }
void Assembler::lui(Register rd, int32_t j) { void Assembler::lui(Register rd, int32_t j) {
DCHECK(is_uint16(j)); DCHECK(is_uint16(j) || is_int16(j));
GenInstrImmediate(LUI, zero_reg, rd, j); GenInstrImmediate(LUI, zero_reg, rd, j);
} }
...@@ -4184,9 +4255,7 @@ void Assembler::CheckTrampolinePool() { ...@@ -4184,9 +4255,7 @@ void Assembler::CheckTrampolinePool() {
int pool_start = pc_offset(); int pool_start = pc_offset();
for (int i = 0; i < unbound_labels_count_; i++) { for (int i = 0; i < unbound_labels_count_; i++) {
{ // Buffer growth (and relocation) must be blocked for internal {
// references until associated instructions are emitted and available
// to be patched.
if (kArchVariant == kMips64r6) { if (kArchVariant == kMips64r6) {
bc(&after_pool); bc(&after_pool);
nop(); nop();
...@@ -4194,20 +4263,15 @@ void Assembler::CheckTrampolinePool() { ...@@ -4194,20 +4263,15 @@ void Assembler::CheckTrampolinePool() {
Label find_pc; Label find_pc;
or_(t8, ra, zero_reg); or_(t8, ra, zero_reg);
bal(&find_pc); bal(&find_pc);
or_(t9, ra, zero_reg); lui(t9, 0);
bind(&find_pc); bind(&find_pc);
or_(ra, t8, zero_reg); ori(t9, t9, 0);
lui(t8, 0); daddu(t9, ra, t9);
ori(t8, t8, 0);
daddu(t9, t9, t8);
// Instruction jr will take or_ from the next trampoline.
// in its branch delay slot. This is the expected behavior
// in order to decrease size of trampoline pool.
jr(t9); jr(t9);
or_(ra, t8, zero_reg); // Branch delay slot.
} }
} }
} }
nop();
bind(&after_pool); bind(&after_pool);
trampoline_ = Trampoline(pool_start, unbound_labels_count_); trampoline_ = Trampoline(pool_start, unbound_labels_count_);
......
...@@ -564,6 +564,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -564,6 +564,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
} }
uint64_t jump_address(Label* L); uint64_t jump_address(Label* L);
uint64_t jump_offset(Label* L); uint64_t jump_offset(Label* L);
uint64_t branch_long_offset(Label* L);
// Puts a labels target address at the given position. // Puts a labels target address at the given position.
// The high 8 bits are set to zero. // The high 8 bits are set to zero.
...@@ -619,12 +620,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -619,12 +620,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kInstrSize = sizeof(Instr); static constexpr int kInstrSize = sizeof(Instr);
// Difference between address of current opcode and target address offset. // Difference between address of current opcode and target address offset.
static constexpr int kBranchPCOffset = 4; static constexpr int kBranchPCOffset = kInstrSize;
// Difference between address of current opcode and target address offset, // Difference between address of current opcode and target address offset,
// when we are generatinga sequence of instructions for long relative PC // when we are generatinga sequence of instructions for long relative PC
// branches // branches
static constexpr int kLongBranchPCOffset = 12; static constexpr int kLongBranchPCOffset = 3 * kInstrSize;
// Here we are patching the address in the LUI/ORI instruction pair. // Here we are patching the address in the LUI/ORI instruction pair.
// These values are used in the serialization process and must be zero for // These values are used in the serialization process and must be zero for
...@@ -660,7 +661,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -660,7 +661,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1; static constexpr int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
static constexpr int kTrampolineSlotsSize = static constexpr int kTrampolineSlotsSize =
kArchVariant == kMips64r6 ? 2 * kInstrSize : 8 * kInstrSize; kArchVariant == kMips64r6 ? 2 * kInstrSize : 7 * kInstrSize;
RegList* GetScratchRegisterList() { return &scratch_register_list_; } RegList* GetScratchRegisterList() { return &scratch_register_list_; }
...@@ -1836,6 +1837,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -1836,6 +1837,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsBranch(Instr instr); static bool IsBranch(Instr instr);
static bool IsMsaBranch(Instr instr); static bool IsMsaBranch(Instr instr);
static bool IsBc(Instr instr); static bool IsBc(Instr instr);
static bool IsBal(Instr instr);
static bool IsBzc(Instr instr); static bool IsBzc(Instr instr);
static bool IsBeq(Instr instr); static bool IsBeq(Instr instr);
......
...@@ -4291,17 +4291,24 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) { ...@@ -4291,17 +4291,24 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
(!L->is_bound() || is_near_r6(L))) { (!L->is_bound() || is_near_r6(L))) {
BranchShortHelperR6(0, L); BranchShortHelperR6(0, L);
} else { } else {
EmitForbiddenSlotInstruction(); // Generate position independent jong branch.
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
{ Label find_pc;
BlockGrowBufferScope block_buf_growth(this); int64_t imm64;
// Buffer growth (and relocation) must be blocked for internal references imm64 = branch_long_offset(L);
// until associated instructions are emitted and available to be patched. DCHECK(is_int32(imm64));
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); or_(t8, ra, zero_reg);
j(L); bal(&find_pc);
lui(t9, (imm64 & kHiMaskOf32) >> kLuiShift);
bind(&find_pc);
ori(t9, t9, (imm64 & kImm16Mask));
daddu(t9, ra, t9);
if (bdslot == USE_DELAY_SLOT) {
or_(ra, t8, zero_reg);
} }
// Emit a nop in the branch delay slot if required. jr(t9);
if (bdslot == PROTECT) nop(); // Emit a or_ in the branch delay slot if it's protected.
if (bdslot == PROTECT) or_(ra, t8, zero_reg);
} }
} }
...@@ -4310,15 +4317,17 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) { ...@@ -4310,15 +4317,17 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
(!L->is_bound() || is_near_r6(L))) { (!L->is_bound() || is_near_r6(L))) {
BranchAndLinkShortHelperR6(0, L); BranchAndLinkShortHelperR6(0, L);
} else { } else {
EmitForbiddenSlotInstruction();
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
{ Label find_pc;
BlockGrowBufferScope block_buf_growth(this); int64_t imm64;
// Buffer growth (and relocation) must be blocked for internal references imm64 = branch_long_offset(L);
// until associated instructions are emitted and available to be patched. DCHECK(is_int32(imm64));
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); lui(t8, (imm64 & kHiMaskOf32) >> kLuiShift);
jal(L); bal(&find_pc);
} ori(t8, t8, (imm64 & kImm16Mask));
bind(&find_pc);
daddu(t8, ra, t8);
jalr(t8);
// Emit a nop in the branch delay slot if required. // Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT) nop(); if (bdslot == PROTECT) nop();
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment