Commit c98b50fb authored by Sreten Kovacevic's avatar Sreten Kovacevic Committed by Commit Bot

[mips][Liftoff]: Enable BE tests on Liftoff

* Enable Liftoff wasm cctests on BE for mips and mips64
* Fix issues that were introduced with these tests and that are
linked with Load/Store instructions
* Change endianness on GetGlobal and SetGlobal, as done in TF
* Skip I32Binop tests that fail with OOM error and seem to not be
related directly to this task

Bug: v8:6600
Change-Id: Ib62ca5e3c681326d28e70a5157d8646e0c8d0b51
Reviewed-on: https://chromium-review.googlesource.com/1213183
Commit-Queue: Sreten Kovacevic <skovacevic@wavecomp.com>
Reviewed-by: 's avatarIvica Bogosavljevic <ibogosavljevic@wavecomp.com>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55751}
parent e569438b
...@@ -1174,7 +1174,7 @@ class LiftoffCompiler { ...@@ -1174,7 +1174,7 @@ class LiftoffCompiler {
LiftoffRegister value = LiftoffRegister value =
pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned)); pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
LoadType type = LoadType::ForValueType(global->type); LoadType type = LoadType::ForValueType(global->type);
__ Load(value, addr.gp(), no_reg, offset, type, pinned); __ Load(value, addr.gp(), no_reg, offset, type, pinned, nullptr, true);
__ PushRegister(global->type, value); __ PushRegister(global->type, value);
} }
...@@ -1188,7 +1188,7 @@ class LiftoffCompiler { ...@@ -1188,7 +1188,7 @@ class LiftoffCompiler {
LiftoffRegister addr = GetGlobalBaseAndOffset(global, pinned, &offset); LiftoffRegister addr = GetGlobalBaseAndOffset(global, pinned, &offset);
LiftoffRegister reg = pinned.set(__ PopToRegister(pinned)); LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
StoreType type = StoreType::ForValueType(global->type); StoreType type = StoreType::ForValueType(global->type);
__ Store(addr.gp(), no_reg, offset, reg, type, pinned); __ Store(addr.gp(), no_reg, offset, reg, type, pinned, nullptr, true);
} }
void Unreachable(FullDecoder* decoder) { void Unreachable(FullDecoder* decoder) {
......
...@@ -15,6 +15,14 @@ namespace wasm { ...@@ -15,6 +15,14 @@ namespace wasm {
namespace liftoff { namespace liftoff {
#if defined(V8_TARGET_BIG_ENDIAN)
constexpr int32_t kLowWordOffset = 4;
constexpr int32_t kHighWordOffset = 0;
#else
constexpr int32_t kLowWordOffset = 0;
constexpr int32_t kHighWordOffset = 4;
#endif
// fp-4 holds the stack marker, fp-8 is the instance parameter, first stack // fp-4 holds the stack marker, fp-8 is the instance parameter, first stack
// slot is located at fp-16. // slot is located at fp-16.
constexpr int32_t kConstantStackSpace = 8; constexpr int32_t kConstantStackSpace = 8;
...@@ -41,8 +49,10 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base, ...@@ -41,8 +49,10 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
assm->lw(dst.gp(), src); assm->lw(dst.gp(), src);
break; break;
case kWasmI64: case kWasmI64:
assm->lw(dst.low_gp(), src); assm->lw(dst.low_gp(),
assm->lw(dst.high_gp(), MemOperand(base, offset + 4)); MemOperand(base, offset + liftoff::kLowWordOffset));
assm->lw(dst.high_gp(),
MemOperand(base, offset + liftoff::kHighWordOffset));
break; break;
case kWasmF32: case kWasmF32:
assm->lwc1(dst.fp(), src); assm->lwc1(dst.fp(), src);
...@@ -63,8 +73,10 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset, ...@@ -63,8 +73,10 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
assm->Usw(src.gp(), dst); assm->Usw(src.gp(), dst);
break; break;
case kWasmI64: case kWasmI64:
assm->Usw(src.low_gp(), dst); assm->Usw(src.low_gp(),
assm->Usw(src.high_gp(), MemOperand(base, offset + 4)); MemOperand(base, offset + liftoff::kLowWordOffset));
assm->Usw(src.high_gp(),
MemOperand(base, offset + liftoff::kHighWordOffset));
break; break;
case kWasmF32: case kWasmF32:
assm->Uswc1(src.fp(), dst, t8); assm->Uswc1(src.fp(), dst, t8);
...@@ -106,11 +118,6 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst, ...@@ -106,11 +118,6 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
switch (type.value()) { switch (type.value()) {
case LoadType::kI64Load8U: case LoadType::kI64Load8U:
case LoadType::kI64Load8S: case LoadType::kI64Load8S:
// Swap low and high registers.
assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
assm->TurboAssembler::Move(tmp.low_gp(), tmp.high_gp());
assm->TurboAssembler::Move(tmp.high_gp(), kScratchReg);
V8_FALLTHROUGH;
case LoadType::kI32Load8U: case LoadType::kI32Load8U:
case LoadType::kI32Load8S: case LoadType::kI32Load8S:
// No need to change endianness for byte size. // No need to change endianness for byte size.
...@@ -140,20 +147,20 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst, ...@@ -140,20 +147,20 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4); assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
break; break;
case LoadType::kI64Load16U: case LoadType::kI64Load16U:
assm->TurboAssembler::ByteSwapUnsigned(tmp.low_gp(), tmp.high_gp(), 2); assm->TurboAssembler::ByteSwapUnsigned(tmp.low_gp(), tmp.low_gp(), 2);
assm->TurboAssembler::Move(tmp.high_gp(), zero_reg); assm->TurboAssembler::Move(tmp.high_gp(), zero_reg);
break; break;
case LoadType::kI64Load16S: case LoadType::kI64Load16S:
assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 2); assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 2);
assm->sra(tmp.high_gp(), tmp.high_gp(), 31); assm->sra(tmp.high_gp(), tmp.low_gp(), 31);
break; break;
case LoadType::kI64Load32U: case LoadType::kI64Load32U:
assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4); assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 4);
assm->TurboAssembler::Move(tmp.high_gp(), zero_reg); assm->TurboAssembler::Move(tmp.high_gp(), zero_reg);
break; break;
case LoadType::kI64Load32S: case LoadType::kI64Load32S:
assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4); assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 4);
assm->sra(tmp.high_gp(), tmp.high_gp(), 31); assm->sra(tmp.high_gp(), tmp.low_gp(), 31);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
...@@ -179,11 +186,6 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src, ...@@ -179,11 +186,6 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
LiftoffRegister tmp = src; LiftoffRegister tmp = src;
switch (type.value()) { switch (type.value()) {
case StoreType::kI64Store8: case StoreType::kI64Store8:
// Swap low and high registers.
assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
assm->TurboAssembler::Move(tmp.low_gp(), tmp.high_gp());
assm->TurboAssembler::Move(tmp.high_gp(), kScratchReg);
V8_FALLTHROUGH;
case StoreType::kI32Store8: case StoreType::kI32Store8:
// No need to change endianness for byte size. // No need to change endianness for byte size.
return; return;
...@@ -193,21 +195,27 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src, ...@@ -193,21 +195,27 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src); assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
V8_FALLTHROUGH; V8_FALLTHROUGH;
case StoreType::kI32Store: case StoreType::kI32Store:
case StoreType::kI32Store16:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
break; break;
case StoreType::kI32Store16:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
break;
case StoreType::kF64Store: case StoreType::kF64Store:
is_float = true; is_float = true;
tmp = assm->GetUnusedRegister(kGpRegPair, pinned); tmp = assm->GetUnusedRegister(kGpRegPair, pinned);
assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src); assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
V8_FALLTHROUGH; V8_FALLTHROUGH;
case StoreType::kI64Store: case StoreType::kI64Store:
case StoreType::kI64Store32:
case StoreType::kI64Store16:
assm->TurboAssembler::Move(kScratchReg, tmp.low_gp()); assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4); assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4); assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
break; break;
case StoreType::kI64Store32:
assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 4);
break;
case StoreType::kI64Store16:
assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 2);
break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
...@@ -358,11 +366,16 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, ...@@ -358,11 +366,16 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
sra(dst.high_gp(), dst.high_gp(), 31); sra(dst.high_gp(), dst.high_gp(), 31);
break; break;
case LoadType::kI64Load: { case LoadType::kI64Load: {
MemOperand src_op_upper = (offset_reg != no_reg) MemOperand src_op =
? MemOperand(src, offset_imm + 4) (offset_reg != no_reg)
: MemOperand(src_addr, offset_imm + 4); ? MemOperand(src, offset_imm + liftoff::kLowWordOffset)
TurboAssembler::Ulw(dst.high_gp(), src_op_upper); : MemOperand(src_addr, offset_imm + liftoff::kLowWordOffset);
MemOperand src_op_upper =
(offset_reg != no_reg)
? MemOperand(src, offset_imm + liftoff::kHighWordOffset)
: MemOperand(src_addr, offset_imm + liftoff::kHighWordOffset);
TurboAssembler::Ulw(dst.low_gp(), src_op); TurboAssembler::Ulw(dst.low_gp(), src_op);
TurboAssembler::Ulw(dst.high_gp(), src_op_upper);
break; break;
} }
case LoadType::kF32Load: case LoadType::kF32Load:
...@@ -377,6 +390,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, ...@@ -377,6 +390,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
#if defined(V8_TARGET_BIG_ENDIAN) #if defined(V8_TARGET_BIG_ENDIAN)
if (is_load_mem) { if (is_load_mem) {
pinned.set(src_op.rm());
liftoff::ChangeEndiannessLoad(this, dst, type, pinned); liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
} }
#endif #endif
...@@ -396,6 +410,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, ...@@ -396,6 +410,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
#if defined(V8_TARGET_BIG_ENDIAN) #if defined(V8_TARGET_BIG_ENDIAN)
if (is_store_mem) { if (is_store_mem) {
pinned.set(dst_op.rm());
LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned); LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned);
// Save original value. // Save original value.
Move(tmp, src, type.value_type()); Move(tmp, src, type.value_type());
...@@ -427,11 +442,16 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, ...@@ -427,11 +442,16 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
TurboAssembler::Usw(src.gp(), dst_op); TurboAssembler::Usw(src.gp(), dst_op);
break; break;
case StoreType::kI64Store: { case StoreType::kI64Store: {
MemOperand dst_op_upper = (offset_reg != no_reg) MemOperand dst_op =
? MemOperand(dst, offset_imm + 4) (offset_reg != no_reg)
: MemOperand(dst_addr, offset_imm + 4); ? MemOperand(dst, offset_imm + liftoff::kLowWordOffset)
TurboAssembler::Usw(src.high_gp(), dst_op_upper); : MemOperand(dst_addr, offset_imm + liftoff::kLowWordOffset);
MemOperand dst_op_upper =
(offset_reg != no_reg)
? MemOperand(dst, offset_imm + liftoff::kHighWordOffset)
: MemOperand(dst_addr, offset_imm + liftoff::kHighWordOffset);
TurboAssembler::Usw(src.low_gp(), dst_op); TurboAssembler::Usw(src.low_gp(), dst_op);
TurboAssembler::Usw(src.high_gp(), dst_op_upper);
break; break;
} }
case StoreType::kF32Store: case StoreType::kF32Store:
......
...@@ -72,6 +72,9 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset, ...@@ -72,6 +72,9 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) { inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
assm->daddiu(sp, sp, -kPointerSize);
assm->sw(reg.gp(), MemOperand(sp, 0));
break;
case kWasmI64: case kWasmI64:
assm->push(reg.gp()); assm->push(reg.gp());
break; break;
...@@ -107,22 +110,18 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst, ...@@ -107,22 +110,18 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
V8_FALLTHROUGH; V8_FALLTHROUGH;
case LoadType::kI64Load32U: case LoadType::kI64Load32U:
assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4); assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4);
assm->dsrl32(tmp.gp(), tmp.gp(), 0);
break; break;
case LoadType::kI32Load: case LoadType::kI32Load:
case LoadType::kI64Load32S: case LoadType::kI64Load32S:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
assm->dsra32(tmp.gp(), tmp.gp(), 0);
break; break;
case LoadType::kI32Load16S: case LoadType::kI32Load16S:
case LoadType::kI64Load16S: case LoadType::kI64Load16S:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
assm->dsra32(tmp.gp(), tmp.gp(), 0);
break; break;
case LoadType::kI32Load16U: case LoadType::kI32Load16U:
case LoadType::kI64Load16U: case LoadType::kI64Load16U:
assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2); assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
assm->dsrl32(tmp.gp(), tmp.gp(), 0);
break; break;
case LoadType::kF64Load: case LoadType::kF64Load:
is_float = true; is_float = true;
...@@ -165,18 +164,24 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src, ...@@ -165,18 +164,24 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src); assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
V8_FALLTHROUGH; V8_FALLTHROUGH;
case StoreType::kI32Store: case StoreType::kI32Store:
case StoreType::kI32Store16:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
break; break;
case StoreType::kI32Store16:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
break;
case StoreType::kF64Store: case StoreType::kF64Store:
is_float = true; is_float = true;
tmp = assm->GetUnusedRegister(kGpReg, pinned); tmp = assm->GetUnusedRegister(kGpReg, pinned);
assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src); assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
V8_FALLTHROUGH; V8_FALLTHROUGH;
case StoreType::kI64Store: case StoreType::kI64Store:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
break;
case StoreType::kI64Store32: case StoreType::kI64Store32:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
break;
case StoreType::kI64Store16: case StoreType::kI64Store16:
assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8); assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
...@@ -274,12 +279,13 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, ...@@ -274,12 +279,13 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm, Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned, LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) { uint32_t* protected_load_pc, bool is_load_mem) {
MemOperand src_op(src_addr, offset_imm); Register src = no_reg;
if (offset_reg != no_reg) { if (offset_reg != no_reg) {
Register src = GetUnusedRegister(kGpReg, pinned).gp(); src = GetUnusedRegister(kGpReg, pinned).gp();
emit_ptrsize_add(src, src_addr, offset_reg); emit_ptrsize_add(src, src_addr, offset_reg);
src_op = MemOperand(src, offset_imm);
} }
MemOperand src_op = (offset_reg != no_reg) ? MemOperand(src, offset_imm)
: MemOperand(src_addr, offset_imm);
if (protected_load_pc) *protected_load_pc = pc_offset(); if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) { switch (type.value()) {
...@@ -321,6 +327,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, ...@@ -321,6 +327,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
#if defined(V8_TARGET_BIG_ENDIAN) #if defined(V8_TARGET_BIG_ENDIAN)
if (is_load_mem) { if (is_load_mem) {
pinned.set(src_op.rm());
liftoff::ChangeEndiannessLoad(this, dst, type, pinned); liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
} }
#endif #endif
...@@ -340,6 +347,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, ...@@ -340,6 +347,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
#if defined(V8_TARGET_BIG_ENDIAN) #if defined(V8_TARGET_BIG_ENDIAN)
if (is_store_mem) { if (is_store_mem) {
pinned.set(dst_op.rm());
LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned); LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned);
// Save original value. // Save original value.
Move(tmp, src, type.value_type()); Move(tmp, src, type.value_type());
......
...@@ -314,12 +314,10 @@ ...@@ -314,12 +314,10 @@
}], # 'arch == mips' }], # 'arch == mips'
############################################################################## ##############################################################################
['arch == mips or arch == mips64', { ['arch == mips64', {
# TODO(mips-team): Implement LiftOff on big-endian # TODO(mips-team): Currently fails on mips64 board.
'test-run-wasm/RunWasmLiftoff*': [SKIP], 'test-run-wasm/RunWasmLiftoff_I32Binop*': [SKIP],
'test-run-wasm-64/RunWasmLiftoff*': [SKIP], }], # 'arch == mips64'
}], # 'arch == mips or arch == mips64'
############################################################################## ##############################################################################
['arch == mips64el or arch == mips64', { ['arch == mips64el or arch == mips64', {
# BUG(v8:3154). # BUG(v8:3154).
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment