Commit bc293c24 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Introduce LiftoffRegister type

LiftoffRegister is an abstraction over Register and DoubleRegister.
Many functions in Liftoff do not need to distinguish GP and FP
registers. LiftoffRegister allows to implement most functions
generically. Accessors allow to convert them back to Register or
DoubleRegister.
Both register types are represented in a unified index space, which
allows to extend this concept to more register types and implement
aliasing in a straight-forward manner.
Many functions currently only implement the GP case. FP will be added
later.

R=titzer@chromium.org

Bug: v8:6600
Change-Id: I043b787bc09dd1a06506ad515eb325b8ea19746d
Reviewed-on: https://chromium-review.googlesource.com/793390Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49780}
parent ffcb51b2
......@@ -2075,9 +2075,11 @@ v8_source_set("v8_base") {
"src/visitors.h",
"src/vm-state-inl.h",
"src/vm-state.h",
"src/wasm/baseline/liftoff-assembler-defs.h",
"src/wasm/baseline/liftoff-assembler.cc",
"src/wasm/baseline/liftoff-assembler.h",
"src/wasm/baseline/liftoff-compiler.cc",
"src/wasm/baseline/liftoff-register.h",
"src/wasm/compilation-manager.cc",
"src/wasm/compilation-manager.h",
"src/wasm/decoder.h",
......@@ -2186,7 +2188,6 @@ v8_source_set("v8_base") {
"src/ia32/sse-instr.h",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
"src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h",
"src/wasm/baseline/ia32/liftoff-assembler-ia32.h",
]
} else if (v8_current_cpu == "x64") {
......@@ -2201,7 +2202,6 @@ v8_source_set("v8_base") {
"src/regexp/x64/regexp-macro-assembler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.h",
"src/third_party/valgrind/valgrind.h",
"src/wasm/baseline/x64/liftoff-assembler-x64-defs.h",
"src/wasm/baseline/x64/liftoff-assembler-x64.h",
"src/x64/assembler-x64-inl.h",
"src/x64/assembler-x64.cc",
......@@ -2255,7 +2255,6 @@ v8_source_set("v8_base") {
"src/debug/arm/debug-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.h",
"src/wasm/baseline/arm/liftoff-assembler-arm-defs.h",
"src/wasm/baseline/arm/liftoff-assembler-arm.h",
]
} else if (v8_current_cpu == "arm64") {
......@@ -2301,7 +2300,6 @@ v8_source_set("v8_base") {
"src/debug/arm64/debug-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
"src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h",
"src/wasm/baseline/arm64/liftoff-assembler-arm64.h",
]
if (use_jumbo_build) {
......@@ -2338,7 +2336,6 @@ v8_source_set("v8_base") {
"src/mips/simulator-mips.h",
"src/regexp/mips/regexp-macro-assembler-mips.cc",
"src/regexp/mips/regexp-macro-assembler-mips.h",
"src/wasm/baseline/mips/liftoff-assembler-mips-defs.h",
"src/wasm/baseline/mips/liftoff-assembler-mips.h",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
......@@ -2368,7 +2365,6 @@ v8_source_set("v8_base") {
"src/mips64/simulator-mips64.h",
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
"src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h",
"src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
......@@ -2398,7 +2394,6 @@ v8_source_set("v8_base") {
"src/ppc/simulator-ppc.h",
"src/regexp/ppc/regexp-macro-assembler-ppc.cc",
"src/regexp/ppc/regexp-macro-assembler-ppc.h",
"src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h",
"src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
......@@ -2428,7 +2423,6 @@ v8_source_set("v8_base") {
"src/s390/macro-assembler-s390.h",
"src/s390/simulator-s390.cc",
"src/s390/simulator-s390.h",
"src/wasm/baseline/s390/liftoff-assembler-s390-defs.h",
"src/wasm/baseline/s390/liftoff-assembler-s390.h",
]
}
......
......@@ -1437,9 +1437,11 @@
'visitors.h',
'vm-state-inl.h',
'vm-state.h',
'wasm/baseline/liftoff-assembler-defs.h',
'wasm/baseline/liftoff-assembler.cc',
'wasm/baseline/liftoff-assembler.h',
'wasm/baseline/liftoff-compiler.cc',
'wasm/baseline/liftoff-register.h',
'wasm/compilation-manager.cc',
'wasm/compilation-manager.h',
'wasm/decoder.h',
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
......@@ -13,33 +13,35 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {}
void LiftoffAssembler::SpillContext(Register context) {}
void LiftoffAssembler::Load(Register dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
uint32_t offset_imm, int size,
PinnedRegisterScope pinned) {}
LiftoffRegList pinned) {}
void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
Register src, int size,
PinnedRegisterScope pinned) {}
LiftoffRegister src, int size,
LiftoffRegList pinned) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
......@@ -13,33 +13,35 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {}
void LiftoffAssembler::SpillContext(Register context) {}
void LiftoffAssembler::Load(Register dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
uint32_t offset_imm, int size,
PinnedRegisterScope pinned) {}
LiftoffRegList pinned) {}
void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
Register src, int size,
PinnedRegisterScope pinned) {}
LiftoffRegister src, int size,
LiftoffRegList pinned) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
static constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<eax, ecx, edx, ebx, esi, edi>();
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
......@@ -34,17 +34,17 @@ void LiftoffAssembler::ReserveStackSpace(uint32_t space) {
sub(esp, Immediate(space));
}
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type()) {
case kWasmI32:
if (value.to_i32() == 0) {
xor_(reg, reg);
xor_(reg.gp(), reg.gp());
} else {
mov(reg, Immediate(value.to_i32()));
mov(reg.gp(), Immediate(value.to_i32()));
}
break;
default:
UNIMPLEMENTED();
UNREACHABLE();
}
}
......@@ -60,46 +60,46 @@ void LiftoffAssembler::SpillContext(Register context) {
mov(liftoff::GetContextOperand(), context);
}
void LiftoffAssembler::Load(Register dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
uint32_t offset_imm, int size,
PinnedRegisterScope pinned) {
LiftoffRegList pinned) {
Operand src_op = Operand(src_addr, offset_imm);
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
Register src = GetUnusedRegister(kGpReg, pinned);
Register src = GetUnusedRegister(kGpReg, pinned).gp();
mov(src, Immediate(offset_imm));
src_op = Operand(src_addr, src, times_1, 0);
}
DCHECK_EQ(4, size);
mov(dst, src_op);
mov(dst.gp(), src_op);
}
void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
Register src, int size,
PinnedRegisterScope pinned) {
LiftoffRegister src, int size,
LiftoffRegList pinned) {
Operand dst_op = Operand(dst_addr, offset_imm);
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
Register dst = GetUnusedRegister(kGpReg, pinned);
Register dst = GetUnusedRegister(kGpReg, pinned).gp();
mov(dst, Immediate(offset_imm));
dst_op = Operand(dst_addr, dst, times_1, 0);
}
DCHECK_EQ(4, size);
mov(dst_op, src);
mov(dst_op, src.gp());
}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {
constexpr int32_t kCallerStackSlotSize = 4;
mov(dst, Operand(ebp, kCallerStackSlotSize * (caller_slot_idx + 1)));
mov(dst.gp(), Operand(ebp, kCallerStackSlotSize * (caller_slot_idx + 1)));
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
DCHECK_NE(dst_index, src_index);
if (cache_state_.has_unused_register()) {
Register reg = GetUnusedRegister(kGpReg);
if (cache_state_.has_unused_register(kGpReg)) {
LiftoffRegister reg = GetUnusedRegister(kGpReg);
Fill(reg, src_index);
Spill(dst_index, reg);
} else {
......@@ -108,13 +108,26 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
}
}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {
if (reg != eax) mov(eax, reg);
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
if (reg.gp() != eax) mov(eax, reg.gp());
}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {
// TODO(clemensh): Handle different types here.
mov(liftoff::GetStackSlot(index), reg);
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
// The caller should check that the registers are not equal. For most
// occurences, this is already guaranteed, so no need to check within this
// method.
DCHECK_NE(dst, src);
DCHECK_EQ(dst.reg_class(), src.reg_class());
// TODO(clemensh): Handle different sizes here.
if (dst.is_gp()) {
mov(dst.gp(), src.gp());
} else {
movsd(dst.fp(), src.fp());
}
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
mov(liftoff::GetStackSlot(index), reg.gp());
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
......@@ -122,9 +135,8 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
mov(liftoff::GetStackSlot(index), Immediate(value.to_i32()));
}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {
// TODO(clemensh): Handle different types here.
mov(reg, liftoff::GetStackSlot(index));
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
mov(reg.gp(), liftoff::GetStackSlot(index));
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
#include "src/reglist.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/assembler-ia32.h"
namespace v8 {
namespace internal {
namespace wasm {
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
static constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<eax, ecx, edx, ebx, esi, edi>();
// TODO(clemensh): Fix this once we support float operations.
static constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#elif V8_TARGET_ARCH_X64
#include "src/x64/assembler-x64.h"
namespace v8 {
namespace internal {
namespace wasm {
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
static constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<rax, rcx, rdx, rbx, rsi, rdi>();
// TODO(clemensh): Fix this once we support float operations.
static constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#else
namespace v8 {
namespace internal {
namespace wasm {
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
static constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
This diff is collapsed.
This diff is collapsed.
......@@ -112,6 +112,33 @@ class LiftoffCompiler {
}
}
void ProcessParameter(uint32_t param_idx, uint32_t input_location) {
DCHECK_EQ(kWasmI32, __ local_type(param_idx));
compiler::LinkageLocation param_loc =
call_desc_->GetInputLocation(input_location);
if (param_loc.IsRegister()) {
DCHECK(!param_loc.IsAnyRegister());
int reg_code = param_loc.AsRegister();
LiftoffRegister reg = LiftoffRegister(Register::from_code(reg_code));
if (kGpCacheRegList.has(reg)) {
// This is a cache register, just use it.
__ PushRegister(kWasmI32, reg);
return;
}
// No cache register. Push to the stack.
__ Spill(param_idx, reg);
__ cache_state()->stack_state.emplace_back(kWasmI32);
return;
}
if (param_loc.IsCallerFrameSlot()) {
LiftoffRegister tmp_reg = __ GetUnusedRegister(kGpReg);
__ LoadCallerFrameSlot(tmp_reg, -param_loc.AsCallerFrameSlot());
__ PushRegister(kWasmI32, tmp_reg);
return;
}
UNREACHABLE();
}
void StartFunctionBody(Decoder* decoder, Control* block) {
if (!kLiftoffAssemblerImplementedOnThisPlatform) {
unsupported(decoder, "platform");
......@@ -140,28 +167,8 @@ class LiftoffCompiler {
__ SpillContext(context_reg);
uint32_t param_idx = 0;
for (; param_idx < num_params; ++param_idx) {
constexpr uint32_t kFirstActualParamIndex = kContextParameterIndex + 1;
ValueType type = __ local_type(param_idx);
compiler::LinkageLocation param_loc =
call_desc_->GetInputLocation(param_idx + kFirstActualParamIndex);
if (param_loc.IsRegister()) {
DCHECK(!param_loc.IsAnyRegister());
Register param_reg = Register::from_code(param_loc.AsRegister());
if (param_reg.bit() & __ kGpCacheRegs) {
// This is a cache register, just use it.
__ PushRegister(type, param_reg);
} else {
// No cache register. Push to the stack.
__ Spill(param_idx, param_reg);
__ cache_state()->stack_state.emplace_back(type);
}
} else if (param_loc.IsCallerFrameSlot()) {
Register tmp_reg = __ GetUnusedRegister(reg_class_for(type));
__ LoadCallerFrameSlot(tmp_reg, -param_loc.AsCallerFrameSlot());
__ PushRegister(type, tmp_reg);
} else {
UNIMPLEMENTED();
}
constexpr int kFirstActualParameterIndex = kContextParameterIndex + 1;
ProcessParameter(param_idx, param_idx + kFirstActualParameterIndex);
}
for (; param_idx < __ num_locals(); ++param_idx) {
ValueType type = decoder->GetLocalType(param_idx);
......@@ -258,11 +265,12 @@ class LiftoffCompiler {
}
#undef CASE_EMIT_FN
LiftoffAssembler::PinnedRegisterScope pinned_regs;
Register target_reg = pinned_regs.pin(__ GetBinaryOpTargetRegister(kGpReg));
Register rhs_reg = pinned_regs.pin(__ PopToRegister(kGpReg, pinned_regs));
Register lhs_reg = __ PopToRegister(kGpReg, pinned_regs);
(asm_->*emit_fn)(target_reg, lhs_reg, rhs_reg);
LiftoffRegList pinned;
LiftoffRegister target_reg =
pinned.set(__ GetBinaryOpTargetRegister(kGpReg));
LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
LiftoffRegister lhs_reg = __ PopToRegister(kGpReg, pinned);
(asm_->*emit_fn)(target_reg.gp(), lhs_reg.gp(), rhs_reg.gp());
__ PushRegister(kWasmI32, target_reg);
}
......@@ -298,7 +306,7 @@ class LiftoffCompiler {
// TODO(clemensh): Handle other types.
if (values[0].type != kWasmI32)
return unsupported(decoder, "non-i32 return");
Register reg = __ PopToRegister(kGpReg);
LiftoffRegister reg = __ PopToRegister(kGpReg);
__ MoveToReturnRegister(reg);
}
__ LeaveFrame(StackFrame::WASM_COMPILED);
......@@ -308,9 +316,10 @@ class LiftoffCompiler {
void GetLocal(Decoder* decoder, Value* result,
const LocalIndexOperand<validate>& operand) {
auto& slot = __ cache_state()->stack_state[operand.index];
DCHECK_EQ(slot.type(), operand.type);
switch (slot.loc()) {
case kRegister:
__ PushRegister(operand.type, slot.reg());
__ PushRegister(slot.type(), slot.reg());
break;
case kConstant:
__ cache_state()->stack_state.emplace_back(operand.type,
......@@ -318,14 +327,35 @@ class LiftoffCompiler {
break;
case kStack: {
auto rc = reg_class_for(operand.type);
Register reg = __ GetUnusedRegister(rc);
LiftoffRegister reg = __ GetUnusedRegister(rc);
__ Fill(reg, operand.index);
__ PushRegister(operand.type, reg);
} break;
__ PushRegister(slot.type(), reg);
break;
}
}
CheckStackSizeLimit(decoder);
}
void SetLocalFromStackSlot(LiftoffAssembler::VarState& dst_slot,
uint32_t local_index) {
auto& state = *__ cache_state();
if (dst_slot.is_reg()) {
LiftoffRegister slot_reg = dst_slot.reg();
if (state.get_use_count(slot_reg) == 1) {
__ Fill(dst_slot.reg(), state.stack_height() - 1);
return;
}
state.dec_used(slot_reg);
}
ValueType type = dst_slot.type();
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
__ Fill(dst_reg, __ cache_state()->stack_height() - 1);
dst_slot = LiftoffAssembler::VarState(type, dst_reg);
__ cache_state()->inc_used(dst_reg);
}
void SetLocal(uint32_t local_index, bool is_tee) {
auto& state = *__ cache_state();
auto& source_slot = state.stack_state.back();
......@@ -340,27 +370,9 @@ class LiftoffCompiler {
__ DropStackSlot(&target_slot);
target_slot = source_slot;
break;
case kStack: {
switch (target_slot.loc()) {
case kRegister:
if (state.register_use_count[target_slot.reg().code()] == 1) {
__ Fill(target_slot.reg(), state.stack_height() - 1);
break;
} else {
state.dec_used(target_slot.reg());
// and fall through to use a new register.
}
case kConstant:
case kStack: {
ValueType type = __ local_type(local_index);
Register target_reg = __ GetUnusedRegister(reg_class_for(type));
__ Fill(target_reg, state.stack_height() - 1);
target_slot = LiftoffAssembler::VarState(type, target_reg);
state.inc_used(target_reg);
} break;
}
case kStack:
SetLocalFromStackSlot(target_slot, local_index);
break;
}
}
if (!is_tee) __ cache_state()->stack_state.pop_back();
}
......@@ -380,29 +392,30 @@ class LiftoffCompiler {
const auto* global = &env_->module->globals[operand.index];
if (global->type != kWasmI32 && global->type != kWasmI64)
return unsupported(decoder, "non-int global");
LiftoffAssembler::PinnedRegisterScope pinned;
Register addr = pinned.pin(__ GetUnusedRegister(kGpReg));
LiftoffRegList pinned;
Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, globals_start),
kPointerSize);
Register value =
pinned.pin(__ GetUnusedRegister(reg_class_for(global->type), pinned));
LiftoffRegister value =
pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
int size = 1 << ElementSizeLog2Of(global->type);
if (size > kPointerSize)
return unsupported(decoder, "global > kPointerSize");
__ Load(value, addr, global->offset, size, pinned);
__ PushRegister(global->type, value);
CheckStackSizeLimit(decoder);
}
void SetGlobal(Decoder* decoder, const Value& value,
const GlobalIndexOperand<validate>& operand) {
auto* global = &env_->module->globals[operand.index];
if (global->type != kWasmI32) return unsupported(decoder, "non-i32 global");
LiftoffAssembler::PinnedRegisterScope pinned;
Register addr = pinned.pin(__ GetUnusedRegister(kGpReg));
LiftoffRegList pinned;
Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, globals_start),
kPointerSize);
Register reg =
pinned.pin(__ PopToRegister(reg_class_for(global->type), pinned));
LiftoffRegister reg =
pinned.set(__ PopToRegister(reg_class_for(global->type), pinned));
int size = 1 << ElementSizeLog2Of(global->type);
__ Store(addr, global->offset, reg, size, pinned);
}
......@@ -425,7 +438,7 @@ class LiftoffCompiler {
void BrIf(Decoder* decoder, const Value& cond, Control* target) {
Label cont_false;
Register value = __ PopToRegister(kGpReg);
Register value = __ PopToRegister(kGpReg).gp();
__ JumpIfZero(value, &cont_false);
Br(decoder, target);
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_REGISTER_H_
#define V8_WASM_BASELINE_LIFTOFF_REGISTER_H_
#include <memory>
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
#include "src/base/bits.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
namespace wasm {
enum RegClass { kNoReg, kGpReg, kFpReg };
// TODO(clemensh): Use a switch once we require C++14 support.
static inline constexpr RegClass reg_class_for(ValueType type) {
return type == kWasmI32 || type == kWasmI64 // int types
? kGpReg
: type == kWasmF32 || type == kWasmF64 // float types
? kFpReg
: kNoReg; // other (unsupported) types
}
// Maximum code of a gp cache register.
static constexpr int kMaxGpRegCode =
8 * sizeof(kLiftoffAssemblerGpCacheRegs) -
base::bits::CountLeadingZeros(kLiftoffAssemblerGpCacheRegs);
// Maximum code of an fp cache register.
static constexpr int kMaxFpRegCode =
8 * sizeof(kLiftoffAssemblerFpCacheRegs) -
base::bits::CountLeadingZeros(kLiftoffAssemblerFpCacheRegs);
// LiftoffRegister encodes both gp and fp in a unified index space.
// [0 .. kMaxGpRegCode] encodes gp registers,
// [kMaxGpRegCode+1 .. kMaxGpRegCode + kMaxFpRegCode] encodes fp registers.
static constexpr int kAfterMaxLiftoffGpRegCode = kMaxGpRegCode + 1;
static constexpr int kAfterMaxLiftoffFpRegCode =
kAfterMaxLiftoffGpRegCode + kMaxFpRegCode + 1;
static constexpr int kAfterMaxLiftoffRegCode = kAfterMaxLiftoffFpRegCode;
static_assert(kAfterMaxLiftoffRegCode < 256,
"liftoff register codes can be stored in one uint8_t");
class LiftoffRegister {
public:
explicit LiftoffRegister(Register reg) : LiftoffRegister(reg.code()) {
DCHECK_EQ(reg, gp());
}
explicit LiftoffRegister(DoubleRegister reg)
: LiftoffRegister(kAfterMaxLiftoffGpRegCode + reg.code()) {
DCHECK_EQ(reg, fp());
}
static LiftoffRegister from_liftoff_code(int code) {
DCHECK_LE(0, code);
DCHECK_GT(kAfterMaxLiftoffRegCode, code);
return LiftoffRegister(code);
}
constexpr bool is_gp() const { return code_ < kAfterMaxLiftoffGpRegCode; }
constexpr bool is_fp() const {
return code_ >= kAfterMaxLiftoffGpRegCode &&
code_ < kAfterMaxLiftoffFpRegCode;
}
Register gp() const {
DCHECK(is_gp());
return Register::from_code(code_);
}
DoubleRegister fp() const {
DCHECK(is_fp());
return DoubleRegister::from_code(code_ - kAfterMaxLiftoffGpRegCode);
}
int liftoff_code() const { return code_; }
RegClass reg_class() const {
DCHECK(is_gp() || is_fp());
return is_gp() ? kGpReg : kFpReg;
}
bool operator==(const LiftoffRegister other) const {
return code_ == other.code_;
}
bool operator!=(const LiftoffRegister other) const {
return code_ != other.code_;
}
private:
uint8_t code_;
explicit constexpr LiftoffRegister(uint8_t code) : code_(code) {}
};
static_assert(IS_TRIVIALLY_COPYABLE(LiftoffRegister),
"LiftoffRegister can efficiently be passed by value");
class LiftoffRegList {
public:
static constexpr bool use_u16 = kAfterMaxLiftoffRegCode <= 16;
static constexpr bool use_u32 = !use_u16 && kAfterMaxLiftoffRegCode <= 32;
using storage_t = std::conditional<
use_u16, uint16_t,
std::conditional<use_u32, uint32_t, uint64_t>::type>::type;
static constexpr storage_t kGpMask = storage_t{kLiftoffAssemblerGpCacheRegs};
static constexpr storage_t kFpMask = storage_t{kLiftoffAssemblerFpCacheRegs}
<< kAfterMaxLiftoffGpRegCode;
constexpr LiftoffRegList() = default;
Register set(Register reg) { return set(LiftoffRegister(reg)).gp(); }
DoubleRegister set(DoubleRegister reg) {
return set(LiftoffRegister(reg)).fp();
}
LiftoffRegister set(LiftoffRegister reg) {
regs_ |= storage_t{1} << reg.liftoff_code();
return reg;
}
LiftoffRegister clear(LiftoffRegister reg) {
regs_ &= ~(storage_t{1} << reg.liftoff_code());
return reg;
}
bool has(LiftoffRegister reg) const {
return (regs_ & (storage_t{1} << reg.liftoff_code())) != 0;
}
bool is_empty() const { return regs_ == 0; }
LiftoffRegList operator&(LiftoffRegList other) const {
return FromBits(regs_ & other.regs_);
}
LiftoffRegList operator~() const {
return FromBits(~regs_ & (kGpMask | kFpMask));
}
LiftoffRegister GetFirstRegSet() const {
DCHECK_NE(0, regs_);
unsigned first_code = base::bits::CountTrailingZeros(regs_);
return LiftoffRegister::from_liftoff_code(first_code);
}
LiftoffRegList MaskOut(storage_t mask) const {
// Masking out is guaranteed to return a correct reg list, hence no checks
// needed.
return FromBits(regs_ & ~mask);
}
static LiftoffRegList FromBits(storage_t bits) {
DCHECK_EQ(bits, bits & (kGpMask | kFpMask));
return LiftoffRegList(bits);
}
template <storage_t bits>
static constexpr LiftoffRegList FromBits() {
static_assert(bits == (bits & (kGpMask | kFpMask)), "illegal reg list");
return LiftoffRegList(bits);
}
private:
storage_t regs_ = 0;
// Unchecked constructor. Only use for valid bits.
explicit constexpr LiftoffRegList(storage_t bits) : regs_(bits) {}
};
static_assert(IS_TRIVIALLY_COPYABLE(LiftoffRegList),
"LiftoffRegList can be passed by value");
static constexpr LiftoffRegList kGpCacheRegList =
LiftoffRegList::FromBits<LiftoffRegList::kGpMask>();
static constexpr LiftoffRegList kFpCacheRegList =
LiftoffRegList::FromBits<LiftoffRegList::kFpMask>();
static constexpr LiftoffRegList GetCacheRegList(RegClass rc) {
return rc == kGpReg ? kGpCacheRegList : kFpCacheRegList;
}
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_REGISTER_H_
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
......@@ -13,33 +13,35 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {}
void LiftoffAssembler::SpillContext(Register context) {}
void LiftoffAssembler::Load(Register dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
uint32_t offset_imm, int size,
PinnedRegisterScope pinned) {}
LiftoffRegList pinned) {}
void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
Register src, int size,
PinnedRegisterScope pinned) {}
LiftoffRegister src, int size,
LiftoffRegList pinned) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
......@@ -13,33 +13,35 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {}
void LiftoffAssembler::SpillContext(Register context) {}
void LiftoffAssembler::Load(Register dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
uint32_t offset_imm, int size,
PinnedRegisterScope pinned) {}
LiftoffRegList pinned) {}
void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
Register src, int size,
PinnedRegisterScope pinned) {}
LiftoffRegister src, int size,
LiftoffRegList pinned) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
......@@ -13,33 +13,35 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {}
void LiftoffAssembler::SpillContext(Register context) {}
void LiftoffAssembler::Load(Register dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
uint32_t offset_imm, int size,
PinnedRegisterScope pinned) {}
LiftoffRegList pinned) {}
void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
Register src, int size,
PinnedRegisterScope pinned) {}
LiftoffRegister src, int size,
LiftoffRegList pinned) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_DEFS_H_
......@@ -13,33 +13,35 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {}
void LiftoffAssembler::SpillContext(Register context) {}
void LiftoffAssembler::Load(Register dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
uint32_t offset_imm, int size,
PinnedRegisterScope pinned) {}
LiftoffRegList pinned) {}
void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
Register src, int size,
PinnedRegisterScope pinned) {}
LiftoffRegister src, int size,
LiftoffRegList pinned) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
static constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<rax, rcx, rdx, rbx, rsi, rdi>();
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
......@@ -34,17 +34,17 @@ void LiftoffAssembler::ReserveStackSpace(uint32_t space) {
subl(rsp, Immediate(space));
}
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type()) {
case kWasmI32:
if (value.to_i32() == 0) {
xorl(reg, reg);
xorl(reg.gp(), reg.gp());
} else {
movl(reg, Immediate(value.to_i32()));
movl(reg.gp(), Immediate(value.to_i32()));
}
break;
default:
UNIMPLEMENTED();
UNREACHABLE();
}
}
......@@ -64,54 +64,55 @@ void LiftoffAssembler::SpillContext(Register context) {
movp(liftoff::GetContextOperand(), context);
}
void LiftoffAssembler::Load(Register dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
uint32_t offset_imm, int size,
PinnedRegisterScope pinned) {
LiftoffRegList pinned) {
Operand src_op = Operand(src_addr, offset_imm);
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
Register src = GetUnusedRegister(kGpReg, pinned);
Register src = GetUnusedRegister(kGpReg, pinned).gp();
movl(src, Immediate(offset_imm));
src_op = Operand(src_addr, src, times_1, 0);
}
DCHECK(size == 4 || size == 8);
if (size == 4) {
movl(dst, src_op);
movl(dst.gp(), src_op);
} else {
movq(dst, src_op);
movq(dst.gp(), src_op);
}
}
void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
Register src, int size,
PinnedRegisterScope pinned) {
LiftoffRegister src, int size,
LiftoffRegList pinned) {
Operand dst_op = Operand(dst_addr, offset_imm);
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
Register dst = GetUnusedRegister(kGpReg, pinned);
Register dst = GetUnusedRegister(kGpReg, pinned).gp();
movl(dst, Immediate(offset_imm));
dst_op = Operand(dst_addr, dst, times_1, 0);
}
DCHECK(size == 4 || size == 8);
if (src.is_fp()) UNIMPLEMENTED();
if (size == 4) {
movl(dst_op, src);
movl(dst_op, src.gp());
} else {
movp(dst_op, src);
movp(dst_op, src.gp());
}
}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {
constexpr int32_t kStackSlotSize = 8;
movl(dst, Operand(rbp, kStackSlotSize * (caller_slot_idx + 1)));
movl(dst.gp(), Operand(rbp, kStackSlotSize * (caller_slot_idx + 1)));
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
DCHECK_NE(dst_index, src_index);
if (cache_state_.has_unused_register()) {
Register reg = GetUnusedRegister(kGpReg);
if (cache_state_.has_unused_register(kGpReg)) {
LiftoffRegister reg = GetUnusedRegister(kGpReg);
Fill(reg, src_index);
Spill(dst_index, reg);
} else {
......@@ -120,14 +121,26 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
}
}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {
// TODO(clemensh): Handle different types here.
if (reg != rax) movl(rax, reg);
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
if (reg.gp() != rax) movl(rax, reg.gp());
}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {
// TODO(clemensh): Handle different types here.
movl(liftoff::GetStackSlot(index), reg);
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
// The caller should check that the registers are not equal. For most
// occurences, this is already guaranteed, so no need to check within this
// method.
DCHECK_NE(dst, src);
DCHECK_EQ(dst.reg_class(), src.reg_class());
// TODO(clemensh): Handle different sizes here.
if (dst.is_gp()) {
movq(dst.gp(), src.gp());
} else {
movsd(dst.fp(), src.fp());
}
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
movl(liftoff::GetStackSlot(index), reg.gp());
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
......@@ -135,9 +148,8 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
movl(liftoff::GetStackSlot(index), Immediate(value.to_i32()));
}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {
// TODO(clemensh): Handle different types here.
movl(reg, liftoff::GetStackSlot(index));
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
movl(reg.gp(), liftoff::GetStackSlot(index));
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment