Commit 8ae28f32 authored by sreten.kovacevic's avatar sreten.kovacevic Committed by Commit Bot

[Refactor][mips] Refactor liftoff MIPS files

Since `at` register is used a lot in macro-assembler-mips, change
usage of this register with `kScratchReg` and `kScratchReg2`.
Also, remove TODO comments for unaligned memory access, since there
is nothing that can be done about it.

Change-Id: Ibf55c04a1f53521f34dfb483294de3010a0120c6
Reviewed-on: https://chromium-review.googlesource.com/1059347Reviewed-by: 's avatarIvica Bogosavljevic <ivica.bogosavljevic@mips.com>
Commit-Queue: Ivica Bogosavljevic <ivica.bogosavljevic@mips.com>
Cr-Commit-Position: refs/heads/master@{#53181}
parent b9d80c01
...@@ -173,7 +173,6 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, ...@@ -173,7 +173,6 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm, Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned, LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) { uint32_t* protected_load_pc, bool is_load_mem) {
// TODO(ksreten): Add check if unaligned memory access
Register src = no_reg; Register src = no_reg;
if (offset_reg != no_reg) { if (offset_reg != no_reg) {
src = GetUnusedRegister(kGpReg, pinned).gp(); src = GetUnusedRegister(kGpReg, pinned).gp();
...@@ -255,7 +254,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, ...@@ -255,7 +254,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src, uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned, StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) { uint32_t* protected_store_pc, bool is_store_mem) {
// TODO(ksreten): Add check if unaligned memory access
Register dst = no_reg; Register dst = no_reg;
if (offset_reg != no_reg) { if (offset_reg != no_reg) {
dst = GetUnusedRegister(kGpReg, pinned).gp(); dst = GetUnusedRegister(kGpReg, pinned).gp();
...@@ -323,9 +321,9 @@ void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type, ...@@ -323,9 +321,9 @@ void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
case LoadType::kI64Load8U: case LoadType::kI64Load8U:
case LoadType::kI64Load8S: case LoadType::kI64Load8S:
// Swap low and high registers. // Swap low and high registers.
TurboAssembler::Move(at, tmp.low_gp()); TurboAssembler::Move(kScratchReg, tmp.low_gp());
TurboAssembler::Move(tmp.low_gp(), tmp.high_gp()); TurboAssembler::Move(tmp.low_gp(), tmp.high_gp());
TurboAssembler::Move(tmp.high_gp(), at); TurboAssembler::Move(tmp.high_gp(), kScratchReg);
V8_FALLTHROUGH; V8_FALLTHROUGH;
case LoadType::kI32Load8U: case LoadType::kI32Load8U:
case LoadType::kI32Load8S: case LoadType::kI32Load8S:
...@@ -351,9 +349,9 @@ void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type, ...@@ -351,9 +349,9 @@ void LiftoffAssembler::ChangeEndiannessLoad(LiftoffRegister dst, LoadType type,
emit_type_conversion(kExprI64ReinterpretF64, tmp, dst); emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
V8_FALLTHROUGH; V8_FALLTHROUGH;
case LoadType::kI64Load: case LoadType::kI64Load:
TurboAssembler::Move(at, tmp.low_gp()); TurboAssembler::Move(kScratchReg, tmp.low_gp());
TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4); TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
TurboAssembler::ByteSwapSigned(tmp.high_gp(), at, 4); TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
break; break;
case LoadType::kI64Load16U: case LoadType::kI64Load16U:
TurboAssembler::ByteSwapUnsigned(tmp.low_gp(), tmp.high_gp(), 2); TurboAssembler::ByteSwapUnsigned(tmp.low_gp(), tmp.high_gp(), 2);
...@@ -397,9 +395,9 @@ void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src, ...@@ -397,9 +395,9 @@ void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
switch (type.value()) { switch (type.value()) {
case StoreType::kI64Store8: case StoreType::kI64Store8:
// Swap low and high registers. // Swap low and high registers.
TurboAssembler::Move(at, tmp.low_gp()); TurboAssembler::Move(kScratchReg, tmp.low_gp());
TurboAssembler::Move(tmp.low_gp(), tmp.high_gp()); TurboAssembler::Move(tmp.low_gp(), tmp.high_gp());
TurboAssembler::Move(tmp.high_gp(), at); TurboAssembler::Move(tmp.high_gp(), kScratchReg);
V8_FALLTHROUGH; V8_FALLTHROUGH;
case StoreType::kI32Store8: case StoreType::kI32Store8:
// No need to change endianness for byte size. // No need to change endianness for byte size.
...@@ -421,9 +419,9 @@ void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src, ...@@ -421,9 +419,9 @@ void LiftoffAssembler::ChangeEndiannessStore(LiftoffRegister src,
case StoreType::kI64Store: case StoreType::kI64Store:
case StoreType::kI64Store32: case StoreType::kI64Store32:
case StoreType::kI64Store16: case StoreType::kI64Store16:
TurboAssembler::Move(at, tmp.low_gp()); TurboAssembler::Move(kScratchReg, tmp.low_gp());
TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4); TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
TurboAssembler::ByteSwapSigned(tmp.high_gp(), at, 4); TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
...@@ -1337,8 +1335,8 @@ void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig, ...@@ -1337,8 +1335,8 @@ void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor, compiler::CallDescriptor* call_descriptor,
Register target) { Register target) {
if (target == no_reg) { if (target == no_reg) {
pop(at); pop(kScratchReg);
Call(at); Call(kScratchReg);
} else { } else {
Call(target); Call(target);
} }
...@@ -1360,13 +1358,14 @@ void LiftoffStackSlots::Construct() { ...@@ -1360,13 +1358,14 @@ void LiftoffStackSlots::Construct() {
case LiftoffAssembler::VarState::kStack: { case LiftoffAssembler::VarState::kStack: {
if (src.type() == kWasmF64) { if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, slot.half_); DCHECK_EQ(kLowWord, slot.half_);
asm_->lw(at, liftoff::GetHalfStackSlot(2 * slot.src_index_ - 1)); asm_->lw(kScratchReg,
asm_->push(at); liftoff::GetHalfStackSlot(2 * slot.src_index_ - 1));
asm_->push(kScratchReg);
} }
asm_->lw(at, asm_->lw(kScratchReg,
liftoff::GetHalfStackSlot(2 * slot.src_index_ + liftoff::GetHalfStackSlot(2 * slot.src_index_ +
(slot.half_ == kLowWord ? 0 : 1))); (slot.half_ == kLowWord ? 0 : 1)));
asm_->push(at); asm_->push(kScratchReg);
break; break;
} }
case LiftoffAssembler::VarState::kRegister: case LiftoffAssembler::VarState::kRegister:
...@@ -1380,9 +1379,10 @@ void LiftoffStackSlots::Construct() { ...@@ -1380,9 +1379,10 @@ void LiftoffStackSlots::Construct() {
break; break;
case LiftoffAssembler::VarState::KIntConst: { case LiftoffAssembler::VarState::KIntConst: {
// The high word is the sign extension of the low word. // The high word is the sign extension of the low word.
asm_->li(at, Operand(slot.half_ == kLowWord ? src.i32_const() asm_->li(kScratchReg,
: src.i32_const() >> 31)); Operand(slot.half_ == kLowWord ? src.i32_const()
asm_->push(at); : src.i32_const() >> 31));
asm_->push(kScratchReg);
break; break;
} }
} }
......
...@@ -162,7 +162,6 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, ...@@ -162,7 +162,6 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm, Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned, LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) { uint32_t* protected_load_pc, bool is_load_mem) {
// TODO(ksreten): Add check if unaligned memory access
MemOperand src_op(src_addr, offset_imm); MemOperand src_op(src_addr, offset_imm);
if (offset_reg != no_reg) { if (offset_reg != no_reg) {
Register src = GetUnusedRegister(kGpReg, pinned).gp(); Register src = GetUnusedRegister(kGpReg, pinned).gp();
...@@ -219,7 +218,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, ...@@ -219,7 +218,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src, uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned, StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) { uint32_t* protected_store_pc, bool is_store_mem) {
// TODO(ksreten): Add check if unaligned memory access
Register dst = no_reg; Register dst = no_reg;
if (offset_reg != no_reg) { if (offset_reg != no_reg) {
dst = GetUnusedRegister(kGpReg, pinned).gp(); dst = GetUnusedRegister(kGpReg, pinned).gp();
...@@ -1203,8 +1201,8 @@ void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig, ...@@ -1203,8 +1201,8 @@ void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor, compiler::CallDescriptor* call_descriptor,
Register target) { Register target) {
if (target == no_reg) { if (target == no_reg) {
pop(at); pop(kScratchReg);
Call(at); Call(kScratchReg);
} else { } else {
Call(target); Call(target);
} }
...@@ -1224,15 +1222,15 @@ void LiftoffStackSlots::Construct() { ...@@ -1224,15 +1222,15 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_; const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) { switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: case LiftoffAssembler::VarState::kStack:
asm_->ld(at, liftoff::GetStackSlot(slot.src_index_)); asm_->ld(kScratchReg, liftoff::GetStackSlot(slot.src_index_));
asm_->push(at); asm_->push(kScratchReg);
break; break;
case LiftoffAssembler::VarState::kRegister: case LiftoffAssembler::VarState::kRegister:
liftoff::push(asm_, src.reg(), src.type()); liftoff::push(asm_, src.reg(), src.type());
break; break;
case LiftoffAssembler::VarState::KIntConst: { case LiftoffAssembler::VarState::KIntConst: {
asm_->li(at, Operand(src.i32_const())); asm_->li(kScratchReg, Operand(src.i32_const()));
asm_->push(at); asm_->push(kScratchReg);
break; break;
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment