Commit d24b63ec authored by sreten.kovacevic's avatar sreten.kovacevic Committed by Commit Bot

[Liftoff] Introduce Liftoff support on [MIPS]

Added registers and conditions for this architecture.
Implemented some of the instructions, mainly for wasm
context operations.

Bug: v8:6600
Change-Id: I5f3e32eb4d284172b21434456395256872da3b46
Reviewed-on: https://chromium-review.googlesource.com/906609
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51176}
parent 3c474998
......@@ -11,6 +11,10 @@
#include "src/ia32/assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "src/x64/assembler-x64.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/assembler-mips64.h"
#endif
namespace v8 {
......@@ -38,6 +42,28 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs =
constexpr RegList kLiftoffAssemblerFpCacheRegs =
DoubleRegister::ListOf<xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7>();
#elif V8_TARGET_ARCH_MIPS
constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<a0, a1, a2, a3, t0, t1, t2, t3, t4, t5, t6, s7, v0, v1>();
constexpr RegList kLiftoffAssemblerFpCacheRegs =
DoubleRegister::ListOf<f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20,
f22, f24>();
#elif V8_TARGET_ARCH_MIPS64
constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, s7, v0, v1>();
constexpr RegList kLiftoffAssemblerFpCacheRegs =
DoubleRegister::ListOf<f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20,
f22, f24, f26>();
#else
constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
......@@ -59,6 +85,20 @@ constexpr Condition kUnsignedLessThan = below;
constexpr Condition kUnsignedLessEqual = below_equal;
constexpr Condition kUnsignedGreaterThan = above;
constexpr Condition kUnsignedGreaterEqual = above_equal;
#elif V8_TARGET_ARCH_MIPS
constexpr Condition kEqual = eq;
constexpr Condition kUnequal = ne;
constexpr Condition kSignedLessThan = lt;
constexpr Condition kSignedLessEqual = le;
constexpr Condition kSignedGreaterThan = gt;
constexpr Condition kSignedGreaterEqual = ge;
constexpr Condition kUnsignedLessThan = ult;
constexpr Condition kUnsignedLessEqual = ule;
constexpr Condition kUnsignedGreaterThan = ugt;
constexpr Condition kUnsignedGreaterEqual = uge;
#else
// On unimplemented platforms, just make this compile.
constexpr Condition kEqual = static_cast<Condition>(0);
......
......@@ -11,23 +11,59 @@ namespace v8 {
namespace internal {
namespace wasm {
namespace liftoff {
// sp-8 holds the stack marker, sp-16 is the wasm context, first stack slot
// is located at sp-24.
constexpr int32_t kConstantStackSpace = 16;
inline MemOperand GetContextOperand() { return MemOperand(sp, -16); }
} // namespace liftoff
void LiftoffAssembler::ReserveStackSpace(uint32_t stack_slots) {
UNIMPLEMENTED();
uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
DCHECK_LE(bytes, kMaxInt);
addiu(sp, sp, -bytes);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
UNIMPLEMENTED();
switch (value.type()) {
case kWasmI32:
TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
break;
case kWasmI64: {
DCHECK(RelocInfo::IsNone(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
TurboAssembler::li(reg.low_gp(), Operand(low_word));
TurboAssembler::li(reg.high_gp(), Operand(high_word));
break;
}
case kWasmF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_scalar());
break;
default:
UNREACHABLE();
}
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
UNIMPLEMENTED();
DCHECK_LE(offset, kMaxInt);
lw(dst, liftoff::GetContextOperand());
DCHECK_EQ(4, size);
lw(dst, MemOperand(dst, offset));
}
void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
void LiftoffAssembler::SpillContext(Register context) {
sw(context, liftoff::GetContextOperand());
}
void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
void LiftoffAssembler::FillContextInto(Register dst) {
lw(dst, liftoff::GetContextOperand());
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
......@@ -55,11 +91,29 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
UNIMPLEMENTED();
// TODO(wasm): Extract the destination register from the CallDescriptor.
// TODO(wasm): Add multi-return support.
LiftoffRegister dst =
reg.is_pair()
? LiftoffRegister::ForPair(LiftoffRegister(v0), LiftoffRegister(v1))
: reg.is_gp() ? LiftoffRegister(v0) : LiftoffRegister(f0);
if (reg != dst) Move(dst, reg);
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
UNIMPLEMENTED();
// The caller should check that the registers are not equal. For most
// occurences, this is already guaranteed, so no need to check within this
// method.
DCHECK_NE(dst, src);
DCHECK_EQ(dst.reg_class(), src.reg_class());
if (src.is_pair()) {
TurboAssembler::Move(dst.low_gp(), src.low_gp());
TurboAssembler::Move(dst.high_gp(), src.high_gp());
} else if (src.is_gp()) {
TurboAssembler::mov(dst.gp(), src.gp());
} else {
TurboAssembler::Move(dst.fp(), src.fp());
}
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
......@@ -165,7 +219,8 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
UNIMPLEMENTED();
DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots * kPointerSize));
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
......
......@@ -11,23 +11,58 @@ namespace v8 {
namespace internal {
namespace wasm {
namespace liftoff {
// sp-8 holds the stack marker, sp-16 is the wasm context, first stack slot
// is located at sp-24.
constexpr int32_t kConstantStackSpace = 16;
inline MemOperand GetContextOperand() { return MemOperand(sp, -16); }
} // namespace liftoff
void LiftoffAssembler::ReserveStackSpace(uint32_t stack_slots) {
UNIMPLEMENTED();
uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
DCHECK_LE(bytes, kMaxInt);
daddiu(sp, sp, -bytes);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
UNIMPLEMENTED();
switch (value.type()) {
case kWasmI32:
TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
break;
case kWasmI64:
TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
break;
case kWasmF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_scalar());
break;
default:
UNREACHABLE();
}
}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {
UNIMPLEMENTED();
DCHECK_LE(offset, kMaxInt);
ld(dst, liftoff::GetContextOperand());
DCHECK(size == 4 || size == 8);
if (size == 4) {
lw(dst, MemOperand(dst, offset));
} else {
ld(dst, MemOperand(dst, offset));
}
}
void LiftoffAssembler::SpillContext(Register context) { UNIMPLEMENTED(); }
void LiftoffAssembler::SpillContext(Register context) {
sd(context, liftoff::GetContextOperand());
}
void LiftoffAssembler::FillContextInto(Register dst) { UNIMPLEMENTED(); }
void LiftoffAssembler::FillContextInto(Register dst) {
ld(dst, liftoff::GetContextOperand());
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
......@@ -55,11 +90,22 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
UNIMPLEMENTED();
LiftoffRegister dst = reg.is_gp() ? LiftoffRegister(v0) : LiftoffRegister(f0);
if (reg != dst) Move(dst, reg);
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
UNIMPLEMENTED();
// The caller should check that the registers are not equal. For most
// occurences, this is already guaranteed, so no need to check within this
// method.
DCHECK_NE(dst, src);
DCHECK_EQ(dst.reg_class(), src.reg_class());
// TODO(ksreten): Handle different sizes here.
if (dst.is_gp()) {
TurboAssembler::Move(dst.gp(), src.gp());
} else {
TurboAssembler::Move(dst.fp(), src.fp());
}
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
......@@ -165,7 +211,8 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { UNIMPLEMENTED(); }
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
UNIMPLEMENTED();
DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize);
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots * kPointerSize));
}
void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment