Commit ba330c3e authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Add support for i64 constants, params and returns

This adds support for the {i64.const} opcode. Since this makes i64
values show up on the wasm stack, quite some code paths need to handle
them. The {CheckSupportedType} method still returns false for kWasmI64,
which will be changed in a follow-up CL. That requires more changes
since it unlocks more uses of i64, e.g. in loads and stores.

R=titzer@chromium.org

Bug: v8:6600
Change-Id: Ie012d0cd3db001f8693573fd16a3cfafe187009b
Reviewed-on: https://chromium-review.googlesource.com/893319
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51105}
parent f5ee2ccf
......@@ -76,6 +76,10 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
UNIMPLEMENTED();
}
void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
UNREACHABLE();
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
......@@ -147,7 +151,8 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index) {
uint32_t src_index,
RegPairHalf half) {
UNIMPLEMENTED();
}
......
......@@ -76,6 +76,10 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
UNIMPLEMENTED();
}
void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
UNREACHABLE();
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
......@@ -147,7 +151,8 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index) {
uint32_t src_index,
RegPairHalf half) {
UNIMPLEMENTED();
}
......
......@@ -23,8 +23,13 @@ constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline Operand GetStackSlot(uint32_t index) {
return Operand(
ebp, -kFirstStackSlotOffset - index * LiftoffAssembler::kStackSlotSize);
int32_t offset = index * LiftoffAssembler::kStackSlotSize;
return Operand(ebp, -kFirstStackSlotOffset - offset);
}
inline Operand GetHalfStackSlot(uint32_t half_index) {
int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
return Operand(ebp, -kFirstStackSlotOffset - offset);
}
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
......@@ -58,6 +63,14 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
reg.gp(),
Immediate(reinterpret_cast<Address>(value.to_i32()), rmode));
break;
case kWasmI64: {
DCHECK(RelocInfo::IsNone(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
TurboAssembler::Move(reg.low_gp(), Immediate(low_word));
TurboAssembler::Move(reg.high_gp(), Immediate(high_word));
break;
}
case kWasmF32: {
Register tmp = GetUnusedRegister(kGpReg).gp();
mov(tmp, Immediate(value.to_f32_boxed().get_bits()));
......@@ -206,7 +219,9 @@ void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
// TODO(wasm): Extract the destination register from the CallDescriptor.
// TODO(wasm): Add multi-return support.
LiftoffRegister dst =
reg.is_gp() ? LiftoffRegister(eax) : LiftoffRegister(xmm1);
reg.is_pair()
? LiftoffRegister::ForPair(LiftoffRegister(eax), LiftoffRegister(edx))
: reg.is_gp() ? LiftoffRegister(eax) : LiftoffRegister(xmm1);
if (reg != dst) Move(dst, reg);
}
......@@ -216,8 +231,10 @@ void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
// method.
DCHECK_NE(dst, src);
DCHECK_EQ(dst.reg_class(), src.reg_class());
// TODO(clemensh): Handle different sizes here.
if (dst.is_gp()) {
if (src.is_pair()) {
if (dst.low_gp() != src.low_gp()) mov(dst.low_gp(), src.low_gp());
if (dst.high_gp() != src.high_gp()) mov(dst.high_gp(), src.high_gp());
} else if (dst.is_gp()) {
mov(dst.gp(), src.gp());
} else {
movsd(dst.fp(), src.fp());
......@@ -231,6 +248,10 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
case kWasmI32:
mov(dst, reg.gp());
break;
case kWasmI64:
mov(dst, reg.low_gp());
mov(liftoff::GetHalfStackSlot(2 * index + 1), reg.high_gp());
break;
case kWasmF32:
movss(dst, reg.fp());
break;
......@@ -263,6 +284,10 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
case kWasmI32:
mov(reg.gp(), src);
break;
case kWasmI64:
mov(reg.low_gp(), src);
mov(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
break;
case kWasmF32:
movss(reg.fp(), src);
break;
......@@ -274,6 +299,10 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
}
}
void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
mov(reg, liftoff::GetHalfStackSlot(half_index));
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
if (lhs != dst) {
lea(dst, Operand(lhs, rhs, times_1, 0));
......@@ -502,14 +531,18 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index) {
uint32_t src_index,
RegPairHalf half) {
switch (src.loc()) {
case VarState::kStack:
DCHECK_NE(kWasmF64, src.type()); // TODO(clemensh): Implement this.
push(liftoff::GetStackSlot(src_index));
push(liftoff::GetHalfStackSlot(2 * src_index + half == kLowWord ? 0 : 1));
break;
case VarState::kRegister:
PushCallerFrameSlot(src.reg());
PushCallerFrameSlot(
src.type() == kWasmI64
? (half == kLowWord ? src.reg().low() : src.reg().high())
: src.reg());
break;
case VarState::kI32Const:
push(Immediate(src.i32_const()));
......
......@@ -37,17 +37,32 @@ class StackTransferRecipe {
: dst(dst), src(src), type(type) {}
};
struct RegisterLoad {
enum LoadKind : uint8_t {
kConstant, // load a constant value into a register.
kStack, // fill a register from a stack slot.
kHalfStack // fill one half of a register pair from half a stack slot.
};
LiftoffRegister dst;
bool is_constant_load; // otherwise load it from the stack.
LoadKind kind;
ValueType type;
uint32_t value; // i32 constant if {is_constant_load}, else stack slot.
RegisterLoad(LiftoffRegister dst, WasmValue constant)
: dst(dst),
is_constant_load(true),
type(kWasmI32),
value(constant.to_i32()) {}
RegisterLoad(LiftoffRegister dst, uint32_t stack_slot, ValueType type)
: dst(dst), is_constant_load(false), type(type), value(stack_slot) {}
uint32_t value; // i32 constant value or stack index, depending on kind.
// Named constructors.
static RegisterLoad Const(LiftoffRegister dst, WasmValue constant) {
return {dst, kConstant, kWasmI32, constant.to_u32()};
}
static RegisterLoad Stack(LiftoffRegister dst, uint32_t stack_index,
ValueType type) {
return {dst, kStack, type, stack_index};
}
static RegisterLoad HalfStack(LiftoffRegister dst,
uint32_t half_stack_index) {
return {dst, kHalfStack, kWasmI32, half_stack_index};
}
private:
RegisterLoad(LiftoffRegister dst, LoadKind kind, ValueType type,
uint32_t value)
: dst(dst), kind(kind), type(type), value(value) {}
};
public:
......@@ -111,10 +126,17 @@ class StackTransferRecipe {
}
for (RegisterLoad& rl : register_loads_) {
if (rl.is_constant_load) {
asm_->LoadConstant(rl.dst, WasmValue(rl.value));
} else {
asm_->Fill(rl.dst, rl.value, rl.type);
switch (rl.kind) {
case RegisterLoad::kConstant:
asm_->LoadConstant(rl.dst, WasmValue(rl.value));
break;
case RegisterLoad::kStack:
asm_->Fill(rl.dst, rl.value, rl.type);
break;
case RegisterLoad::kHalfStack:
// As half of a register pair, {rl.dst} must be a gp register.
asm_->FillI64Half(rl.dst.gp(), rl.value);
break;
}
}
register_loads_.clear();
......@@ -166,10 +188,41 @@ class StackTransferRecipe {
}
}
void LoadI64HalfIntoRegister(LiftoffRegister dst,
const LiftoffAssembler::VarState& src,
uint32_t index, RegPairHalf half) {
// Use CHECK such that the remaining code is statically dead if
// {kNeedI64RegPair} is false.
CHECK(kNeedI64RegPair);
DCHECK_EQ(kWasmI64, src.type());
switch (src.loc()) {
case VarState::kStack:
LoadI64HalfStackSlot(dst, 2 * index + half == kLowWord ? 0 : 1);
break;
case VarState::kRegister: {
LiftoffRegister src_half =
half == kLowWord ? src.reg().low() : src.reg().high();
if (dst != src_half) MoveRegister(dst, src_half, kWasmI32);
break;
}
case VarState::kI32Const:
int32_t value = half == kLowWord ? src.i32_const() : 0;
LoadConstant(dst, WasmValue(value));
break;
}
}
void MoveRegister(LiftoffRegister dst, LiftoffRegister src, ValueType type) {
DCHECK_NE(dst, src);
DCHECK_EQ(dst.reg_class(), src.reg_class());
DCHECK_EQ(reg_class_for(type), src.reg_class());
if (src.is_pair()) {
DCHECK_EQ(kWasmI64, type);
if (dst.low() != src.low()) MoveRegister(dst.low(), src.low(), kWasmI32);
if (dst.high() != src.high())
MoveRegister(dst.high(), src.high(), kWasmI32);
return;
}
DCHECK(!move_dst_regs_.has(dst));
move_dst_regs_.set(dst);
move_src_regs_.set(src);
......@@ -177,12 +230,16 @@ class StackTransferRecipe {
}
void LoadConstant(LiftoffRegister dst, WasmValue value) {
register_loads_.emplace_back(dst, value);
register_loads_.push_back(RegisterLoad::Const(dst, value));
}
void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index,
ValueType type) {
register_loads_.emplace_back(dst, stack_index, type);
register_loads_.push_back(RegisterLoad::Stack(dst, stack_index, type));
}
void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t half_stack_index) {
register_loads_.push_back(RegisterLoad::HalfStack(dst, half_stack_index));
}
uint32_t max_used_spill_slot() const { return max_used_spill_slot_; }
......@@ -400,10 +457,6 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
uint32_t* max_used_spill_slot,
Register* target) {
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
// Parameter 0 is the wasm context.
constexpr size_t kFirstActualParameter = 1;
DCHECK_EQ(kFirstActualParameter + num_params, call_desc->ParameterCount());
// Input 0 is the call target.
constexpr size_t kInputShift = 1;
......@@ -426,25 +479,40 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
// in the correct order.
LiftoffRegList param_regs;
uint32_t param_base = cache_state_.stack_height() - num_params;
uint32_t call_desc_input_idx = static_cast<uint32_t>(call_desc->InputCount());
for (uint32_t i = num_params; i > 0; --i) {
uint32_t param = i - 1;
ValueType type = sig->GetParam(param);
RegClass rc = reg_class_for(type);
compiler::LinkageLocation loc = call_desc->GetInputLocation(
param + kFirstActualParameter + kInputShift);
const int num_lowered_params = kNeedI64RegPair && type == kWasmI64 ? 2 : 1;
uint32_t stack_idx = param_base + param;
const VarState& slot = cache_state_.stack_state[stack_idx];
if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister());
int reg_code = loc.AsRegister();
LiftoffRegister reg = LiftoffRegister::from_code(rc, reg_code);
param_regs.set(reg);
stack_transfers.LoadIntoRegister(reg, slot, stack_idx);
} else {
DCHECK(loc.IsCallerFrameSlot());
PushCallerFrameSlot(slot, stack_idx);
// Process both halfs of register pair separately, because they are passed
// as separate parameters. One or both of them could end up on the stack.
for (int lowered_idx = 0; lowered_idx < num_lowered_params; ++lowered_idx) {
--call_desc_input_idx;
compiler::LinkageLocation loc =
call_desc->GetInputLocation(call_desc_input_idx);
if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister());
int reg_code = loc.AsRegister();
RegClass rc = num_lowered_params == 2 ? kGpReg : reg_class_for(type);
LiftoffRegister reg = LiftoffRegister::from_code(rc, reg_code);
param_regs.set(reg);
if (num_lowered_params == 1) {
stack_transfers.LoadIntoRegister(reg, slot, stack_idx);
} else {
stack_transfers.LoadI64HalfIntoRegister(
reg, slot, stack_idx, lowered_idx == 0 ? kHighWord : kLowWord);
}
} else {
DCHECK(loc.IsCallerFrameSlot());
PushCallerFrameSlot(slot, stack_idx,
lowered_idx == 0 ? kHighWord : kLowWord);
}
}
}
// {call_desc_input_idx} should point after the context parameter now.
DCHECK_EQ(call_desc_input_idx, kInputShift + 1);
compiler::LinkageLocation context_loc =
call_desc->GetInputLocation(kInputShift);
......@@ -516,8 +584,14 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
for (uint32_t idx = cache_state_.stack_height() - 1;; --idx) {
DCHECK_GT(cache_state_.stack_height(), idx);
auto* slot = &cache_state_.stack_state[idx];
if (!slot->is_reg() || slot->reg() != reg) continue;
Spill(idx, reg, slot->type());
if (!slot->is_reg() || !slot->reg().overlaps(reg)) continue;
if (slot->reg().is_pair()) {
// Make sure to decrement *both* registers in a pair, because the
// {clear_used} call below only clears one of them.
cache_state_.dec_used(slot->reg().low());
cache_state_.dec_used(slot->reg().high());
}
Spill(idx, slot->reg(), slot->type());
slot->MakeStack();
if (--remaining_uses == 0) break;
}
......
......@@ -115,6 +115,11 @@ class LiftoffAssembler : public TurboAssembler {
uint32_t stack_base = 0;
bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
if (kNeedI64RegPair && rc == kGpRegPair) {
LiftoffRegList available_regs =
kGpCacheRegList & ~used_registers & ~pinned;
return available_regs.GetNumRegsSet() >= 2;
}
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
return has_unused_register(candidates, pinned);
......@@ -128,9 +133,14 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister unused_register(RegClass rc,
LiftoffRegList pinned = {}) const {
if (kNeedI64RegPair && rc == kGpRegPair) {
LiftoffRegister low = pinned.set(unused_register(kGpReg, pinned));
LiftoffRegister high = unused_register(kGpReg, pinned);
return LiftoffRegister::ForPair(low, high);
}
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
return unused_register(candidates);
return unused_register(candidates, pinned);
}
LiftoffRegister unused_register(LiftoffRegList candidates,
......@@ -164,10 +174,7 @@ class LiftoffAssembler : public TurboAssembler {
}
bool is_used(LiftoffRegister reg) const {
if (reg.is_pair()) {
DCHECK_EQ(is_used(reg.low()), is_used(reg.high()));
reg = reg.low();
}
if (reg.is_pair()) return is_used(reg.low()) || is_used(reg.high());
bool used = used_registers.has(reg);
DCHECK_EQ(used, register_use_count[reg.liftoff_code()] != 0);
return used;
......@@ -255,6 +262,7 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister high = GetUnusedRegister(candidates, pinned);
return LiftoffRegister::ForPair(low, high);
}
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
return GetUnusedRegister(candidates, pinned);
}
......@@ -323,6 +331,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void Spill(uint32_t index, LiftoffRegister, ValueType);
inline void Spill(uint32_t index, WasmValue);
inline void Fill(LiftoffRegister, uint32_t index, ValueType);
inline void FillI64Half(Register, uint32_t half_index);
// i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
......@@ -367,7 +376,8 @@ class LiftoffAssembler : public TurboAssembler {
inline void AssertUnreachable(AbortReason reason);
// Push a value to the stack (will become a caller frame slot).
inline void PushCallerFrameSlot(const VarState& src, uint32_t src_index);
inline void PushCallerFrameSlot(const VarState& src, uint32_t src_index,
RegPairHalf half);
inline void PushCallerFrameSlot(LiftoffRegister reg);
inline void PushRegisters(LiftoffRegList);
inline void PopRegisters(LiftoffRegList);
......
......@@ -616,7 +616,10 @@ class LiftoffCompiler {
}
void I64Const(Decoder* decoder, Value* result, int64_t value) {
unsupported(decoder, "i64.const");
LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64));
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmI64, reg);
CheckStackSizeLimit(decoder);
}
void F32Const(Decoder* decoder, Value* result, float value) {
......@@ -996,6 +999,10 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_desc =
compiler::GetWasmCallDescriptor(compilation_zone_, operand.sig);
if (kPointerSize == 4) {
call_desc =
compiler::GetI32WasmCallDescriptor(compilation_zone_, call_desc);
}
uint32_t max_used_spill_slot = 0;
__ PrepareCall(operand.sig, call_desc, &max_used_spill_slot);
......@@ -1117,6 +1124,10 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_desc =
compiler::GetWasmCallDescriptor(compilation_zone_, operand.sig);
if (kPointerSize == 4) {
call_desc =
compiler::GetI32WasmCallDescriptor(compilation_zone_, call_desc);
}
uint32_t max_used_spill_slot = 0;
__ CallIndirect(operand.sig, call_desc, scratch.gp(), &max_used_spill_slot);
......
......@@ -18,7 +18,7 @@ namespace wasm {
static constexpr bool kNeedI64RegPair = kPointerSize == 4;
enum RegClass {
enum RegClass : uint8_t {
kGpReg,
kFpReg,
// {kGpRegPair} equals {kNoReg} if {kNeedI64RegPair} is false.
......@@ -26,6 +26,8 @@ enum RegClass {
kNoReg = kGpRegPair + kNeedI64RegPair
};
enum RegPairHalf : uint8_t { kLowWord, kHighWord };
// TODO(clemensh): Use a switch once we require C++14 support.
static inline constexpr RegClass reg_class_for(ValueType type) {
return kNeedI64RegPair && type == kWasmI64 // i64 on 32 bit
......@@ -58,10 +60,11 @@ static constexpr int kBitsPerLiftoffRegCode =
32 - base::bits::CountLeadingZeros<uint32_t>(kAfterMaxLiftoffRegCode - 1);
static constexpr int kBitsPerGpRegCode =
32 - base::bits::CountLeadingZeros<uint32_t>(kMaxGpRegCode);
static constexpr int kBitsPerGpRegPair = 1 + 2 * kBitsPerGpRegCode;
class LiftoffRegister {
static constexpr int needed_bits =
kNeedI64RegPair ? 1 + 2 * kBitsPerGpRegCode : kBitsPerLiftoffRegCode;
Max(kNeedI64RegPair ? kBitsPerGpRegPair : 0, kBitsPerLiftoffRegCode);
using storage_t = std::conditional<
needed_bits <= 8, uint8_t,
std::conditional<needed_bits <= 16, uint16_t, uint32_t>::type>::type;
......@@ -96,11 +99,11 @@ class LiftoffRegister {
}
}
static LiftoffRegister ForPair(LiftoffRegister reg1, LiftoffRegister reg2) {
static LiftoffRegister ForPair(LiftoffRegister low, LiftoffRegister high) {
DCHECK(kNeedI64RegPair);
DCHECK_NE(reg1, reg2);
storage_t combined_code = reg1.liftoff_code() |
reg2.liftoff_code() << kBitsPerGpRegCode |
DCHECK_NE(low, high);
storage_t combined_code = low.gp().code() |
high.gp().code() << kBitsPerGpRegCode |
1 << (2 * kBitsPerGpRegCode);
return LiftoffRegister(combined_code);
}
......
......@@ -76,6 +76,10 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
UNIMPLEMENTED();
}
void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
UNREACHABLE();
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
......@@ -147,7 +151,8 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index) {
uint32_t src_index,
RegPairHalf half) {
UNIMPLEMENTED();
}
......
......@@ -76,6 +76,10 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
UNIMPLEMENTED();
}
void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
UNREACHABLE();
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
......@@ -147,7 +151,8 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index) {
uint32_t src_index,
RegPairHalf half) {
UNIMPLEMENTED();
}
......
......@@ -76,6 +76,10 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
UNIMPLEMENTED();
}
void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
UNREACHABLE();
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
......@@ -147,7 +151,8 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index) {
uint32_t src_index,
RegPairHalf half) {
UNIMPLEMENTED();
}
......
......@@ -76,6 +76,10 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
UNIMPLEMENTED();
}
void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
UNREACHABLE();
}
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
......@@ -147,7 +151,8 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index) {
uint32_t src_index,
RegPairHalf half) {
UNIMPLEMENTED();
}
......
......@@ -284,6 +284,10 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
}
}
void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
UNREACHABLE();
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
if (lhs != dst) {
leal(dst, Operand(lhs, rhs, times_1, 0));
......@@ -502,7 +506,7 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index) {
uint32_t src_index, RegPairHalf) {
switch (src.loc()) {
case VarState::kStack:
pushq(liftoff::GetStackSlot(src_index));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment