Commit bc293c24 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Introduce LiftoffRegister type

LiftoffRegister is an abstraction over Register and DoubleRegister.
Many functions in Liftoff do not need to distinguish GP and FP
registers. LiftoffRegister allows to implement most functions
generically. Accessors allow to convert them back to Register or
DoubleRegister.
Both register types are represented in a unified index space, which
allows to extend this concept to more register types and implement
aliasing in a straight-forward manner.
Many functions currently only implement the GP case. FP will be added
later.

R=titzer@chromium.org

Bug: v8:6600
Change-Id: I043b787bc09dd1a06506ad515eb325b8ea19746d
Reviewed-on: https://chromium-review.googlesource.com/793390Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49780}
parent ffcb51b2
......@@ -2075,9 +2075,11 @@ v8_source_set("v8_base") {
"src/visitors.h",
"src/vm-state-inl.h",
"src/vm-state.h",
"src/wasm/baseline/liftoff-assembler-defs.h",
"src/wasm/baseline/liftoff-assembler.cc",
"src/wasm/baseline/liftoff-assembler.h",
"src/wasm/baseline/liftoff-compiler.cc",
"src/wasm/baseline/liftoff-register.h",
"src/wasm/compilation-manager.cc",
"src/wasm/compilation-manager.h",
"src/wasm/decoder.h",
......@@ -2186,7 +2188,6 @@ v8_source_set("v8_base") {
"src/ia32/sse-instr.h",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
"src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h",
"src/wasm/baseline/ia32/liftoff-assembler-ia32.h",
]
} else if (v8_current_cpu == "x64") {
......@@ -2201,7 +2202,6 @@ v8_source_set("v8_base") {
"src/regexp/x64/regexp-macro-assembler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.h",
"src/third_party/valgrind/valgrind.h",
"src/wasm/baseline/x64/liftoff-assembler-x64-defs.h",
"src/wasm/baseline/x64/liftoff-assembler-x64.h",
"src/x64/assembler-x64-inl.h",
"src/x64/assembler-x64.cc",
......@@ -2255,7 +2255,6 @@ v8_source_set("v8_base") {
"src/debug/arm/debug-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.h",
"src/wasm/baseline/arm/liftoff-assembler-arm-defs.h",
"src/wasm/baseline/arm/liftoff-assembler-arm.h",
]
} else if (v8_current_cpu == "arm64") {
......@@ -2301,7 +2300,6 @@ v8_source_set("v8_base") {
"src/debug/arm64/debug-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
"src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h",
"src/wasm/baseline/arm64/liftoff-assembler-arm64.h",
]
if (use_jumbo_build) {
......@@ -2338,7 +2336,6 @@ v8_source_set("v8_base") {
"src/mips/simulator-mips.h",
"src/regexp/mips/regexp-macro-assembler-mips.cc",
"src/regexp/mips/regexp-macro-assembler-mips.h",
"src/wasm/baseline/mips/liftoff-assembler-mips-defs.h",
"src/wasm/baseline/mips/liftoff-assembler-mips.h",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
......@@ -2368,7 +2365,6 @@ v8_source_set("v8_base") {
"src/mips64/simulator-mips64.h",
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
"src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h",
"src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
......@@ -2398,7 +2394,6 @@ v8_source_set("v8_base") {
"src/ppc/simulator-ppc.h",
"src/regexp/ppc/regexp-macro-assembler-ppc.cc",
"src/regexp/ppc/regexp-macro-assembler-ppc.h",
"src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h",
"src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
......@@ -2428,7 +2423,6 @@ v8_source_set("v8_base") {
"src/s390/macro-assembler-s390.h",
"src/s390/simulator-s390.cc",
"src/s390/simulator-s390.h",
"src/wasm/baseline/s390/liftoff-assembler-s390-defs.h",
"src/wasm/baseline/s390/liftoff-assembler-s390.h",
]
}
......
......@@ -1437,9 +1437,11 @@
'visitors.h',
'vm-state-inl.h',
'vm-state.h',
'wasm/baseline/liftoff-assembler-defs.h',
'wasm/baseline/liftoff-assembler.cc',
'wasm/baseline/liftoff-assembler.h',
'wasm/baseline/liftoff-compiler.cc',
'wasm/baseline/liftoff-register.h',
'wasm/compilation-manager.cc',
'wasm/compilation-manager.h',
'wasm/decoder.h',
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
......@@ -13,33 +13,35 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {}
void LiftoffAssembler::SpillContext(Register context) {}
void LiftoffAssembler::Load(Register dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
uint32_t offset_imm, int size,
PinnedRegisterScope pinned) {}
LiftoffRegList pinned) {}
void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
Register src, int size,
PinnedRegisterScope pinned) {}
LiftoffRegister src, int size,
LiftoffRegList pinned) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
......@@ -13,33 +13,35 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {}
void LiftoffAssembler::SpillContext(Register context) {}
void LiftoffAssembler::Load(Register dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
uint32_t offset_imm, int size,
PinnedRegisterScope pinned) {}
LiftoffRegList pinned) {}
void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
Register src, int size,
PinnedRegisterScope pinned) {}
LiftoffRegister src, int size,
LiftoffRegList pinned) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
static constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<eax, ecx, edx, ebx, esi, edi>();
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
......@@ -34,17 +34,17 @@ void LiftoffAssembler::ReserveStackSpace(uint32_t space) {
sub(esp, Immediate(space));
}
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type()) {
case kWasmI32:
if (value.to_i32() == 0) {
xor_(reg, reg);
xor_(reg.gp(), reg.gp());
} else {
mov(reg, Immediate(value.to_i32()));
mov(reg.gp(), Immediate(value.to_i32()));
}
break;
default:
UNIMPLEMENTED();
UNREACHABLE();
}
}
......@@ -60,46 +60,46 @@ void LiftoffAssembler::SpillContext(Register context) {
mov(liftoff::GetContextOperand(), context);
}
void LiftoffAssembler::Load(Register dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
uint32_t offset_imm, int size,
PinnedRegisterScope pinned) {
LiftoffRegList pinned) {
Operand src_op = Operand(src_addr, offset_imm);
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
Register src = GetUnusedRegister(kGpReg, pinned);
Register src = GetUnusedRegister(kGpReg, pinned).gp();
mov(src, Immediate(offset_imm));
src_op = Operand(src_addr, src, times_1, 0);
}
DCHECK_EQ(4, size);
mov(dst, src_op);
mov(dst.gp(), src_op);
}
void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
Register src, int size,
PinnedRegisterScope pinned) {
LiftoffRegister src, int size,
LiftoffRegList pinned) {
Operand dst_op = Operand(dst_addr, offset_imm);
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
Register dst = GetUnusedRegister(kGpReg, pinned);
Register dst = GetUnusedRegister(kGpReg, pinned).gp();
mov(dst, Immediate(offset_imm));
dst_op = Operand(dst_addr, dst, times_1, 0);
}
DCHECK_EQ(4, size);
mov(dst_op, src);
mov(dst_op, src.gp());
}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {
constexpr int32_t kCallerStackSlotSize = 4;
mov(dst, Operand(ebp, kCallerStackSlotSize * (caller_slot_idx + 1)));
mov(dst.gp(), Operand(ebp, kCallerStackSlotSize * (caller_slot_idx + 1)));
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
DCHECK_NE(dst_index, src_index);
if (cache_state_.has_unused_register()) {
Register reg = GetUnusedRegister(kGpReg);
if (cache_state_.has_unused_register(kGpReg)) {
LiftoffRegister reg = GetUnusedRegister(kGpReg);
Fill(reg, src_index);
Spill(dst_index, reg);
} else {
......@@ -108,13 +108,26 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
}
}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {
if (reg != eax) mov(eax, reg);
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
if (reg.gp() != eax) mov(eax, reg.gp());
}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {
// TODO(clemensh): Handle different types here.
mov(liftoff::GetStackSlot(index), reg);
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
// The caller should check that the registers are not equal. For most
// occurences, this is already guaranteed, so no need to check within this
// method.
DCHECK_NE(dst, src);
DCHECK_EQ(dst.reg_class(), src.reg_class());
// TODO(clemensh): Handle different sizes here.
if (dst.is_gp()) {
mov(dst.gp(), src.gp());
} else {
movsd(dst.fp(), src.fp());
}
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
mov(liftoff::GetStackSlot(index), reg.gp());
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
......@@ -122,9 +135,8 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
mov(liftoff::GetStackSlot(index), Immediate(value.to_i32()));
}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {
// TODO(clemensh): Handle different types here.
mov(reg, liftoff::GetStackSlot(index));
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
mov(reg.gp(), liftoff::GetStackSlot(index));
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
#include "src/reglist.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/assembler-ia32.h"
namespace v8 {
namespace internal {
namespace wasm {
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
static constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<eax, ecx, edx, ebx, esi, edi>();
// TODO(clemensh): Fix this once we support float operations.
static constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#elif V8_TARGET_ARCH_X64
#include "src/x64/assembler-x64.h"
namespace v8 {
namespace internal {
namespace wasm {
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
static constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<rax, rcx, rdx, rbx, rsi, rdi>();
// TODO(clemensh): Fix this once we support float operations.
static constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#else
namespace v8 {
namespace internal {
namespace wasm {
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
static constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
......@@ -29,20 +29,21 @@ namespace {
class StackTransferRecipe {
struct RegisterMove {
Register dst;
Register src;
constexpr RegisterMove(Register dst, Register src) : dst(dst), src(src) {}
LiftoffRegister dst;
LiftoffRegister src;
constexpr RegisterMove(LiftoffRegister dst, LiftoffRegister src)
: dst(dst), src(src) {}
};
struct RegisterLoad {
Register dst;
LiftoffRegister dst;
bool is_constant_load; // otherwise load it from the stack.
union {
uint32_t stack_slot;
WasmValue constant;
};
RegisterLoad(Register dst, WasmValue constant)
RegisterLoad(LiftoffRegister dst, WasmValue constant)
: dst(dst), is_constant_load(true), constant(constant) {}
RegisterLoad(Register dst, uint32_t stack_slot)
RegisterLoad(LiftoffRegister dst, uint32_t stack_slot)
: dst(dst), is_constant_load(false), stack_slot(stack_slot) {}
};
......@@ -54,18 +55,16 @@ class StackTransferRecipe {
// First, execute register moves. Then load constants and stack values into
// registers.
if ((move_dst_regs & move_src_regs) == 0) {
if ((move_dst_regs & move_src_regs).is_empty()) {
// No overlap in src and dst registers. Just execute the moves in any
// order.
for (RegisterMove& rm : register_moves) asm_->Move(rm.dst, rm.src);
register_moves.clear();
} else {
// Keep use counters of src registers.
constexpr size_t kRegArrSize =
LiftoffAssembler::CacheState::kMaxRegisterCode + 1;
uint32_t src_reg_use_count[kRegArrSize] = {0};
uint32_t src_reg_use_count[kAfterMaxLiftoffRegCode] = {0};
for (RegisterMove& rm : register_moves) {
++src_reg_use_count[rm.src.code()];
++src_reg_use_count[rm.src.liftoff_code()];
}
// Now repeatedly iterate the list of register moves, and execute those
// whose dst register does not appear as src any more. The remaining moves
......@@ -77,11 +76,11 @@ class StackTransferRecipe {
while (!register_moves.empty()) {
int executed_moves = 0;
for (auto& rm : register_moves) {
if (src_reg_use_count[rm.dst.code()] == 0) {
if (src_reg_use_count[rm.dst.liftoff_code()] == 0) {
asm_->Move(rm.dst, rm.src);
++executed_moves;
DCHECK_LT(0, src_reg_use_count[rm.src.code()]);
--src_reg_use_count[rm.src.code()];
DCHECK_LT(0, src_reg_use_count[rm.src.liftoff_code()]);
--src_reg_use_count[rm.src.liftoff_code()];
} else if (executed_moves) {
// Compaction: Move not-executed moves to the beginning of the list.
(&rm)[-executed_moves] = rm;
......@@ -89,17 +88,17 @@ class StackTransferRecipe {
}
if (executed_moves == 0) {
// There is a cycle. Spill one register, then continue.
Register spill_reg = register_moves.back().src;
LiftoffRegister spill_reg = register_moves.back().src;
asm_->Spill(next_spill_slot, spill_reg);
// Remember to reload into the destination register later.
LoadStackSlot(register_moves.back().dst, next_spill_slot);
DCHECK_EQ(1, src_reg_use_count[spill_reg.code()]);
src_reg_use_count[spill_reg.code()] = 0;
DCHECK_EQ(1, src_reg_use_count[spill_reg.liftoff_code()]);
src_reg_use_count[spill_reg.liftoff_code()] = 0;
++next_spill_slot;
executed_moves = 1;
}
constexpr RegisterMove dummy(no_reg, no_reg);
register_moves.resize(register_moves.size() - executed_moves, dummy);
register_moves.erase(register_moves.end() - executed_moves,
register_moves.end());
}
}
......@@ -134,46 +133,53 @@ class StackTransferRecipe {
}
break;
case VarState::kRegister:
switch (src.loc()) {
case VarState::kStack:
LoadStackSlot(dst.reg(), src_index);
break;
case VarState::kRegister:
if (dst.reg() != src.reg()) MoveRegister(dst.reg(), src.reg());
LoadIntoRegister(dst.reg(), src, src_index);
break;
case VarState::kConstant:
LoadConstant(dst.reg(), WasmValue(src.i32_const()));
DCHECK_EQ(dst, src);
break;
}
}
private:
// TODO(clemensh): Avoid unconditionally allocating on the heap.
std::vector<RegisterMove> register_moves;
std::vector<RegisterLoad> register_loads;
LiftoffRegList move_dst_regs;
LiftoffRegList move_src_regs;
LiftoffAssembler* const asm_;
void LoadIntoRegister(LiftoffRegister dst,
const LiftoffAssembler::VarState& src,
uint32_t src_index) {
switch (src.loc()) {
case VarState::kStack:
LoadStackSlot(dst, src_index);
break;
case VarState::kRegister:
DCHECK_EQ(dst.reg_class(), src.reg_class());
MoveRegister(dst, src.reg());
break;
case VarState::kConstant:
DCHECK_EQ(dst, src);
LoadConstant(dst, WasmValue(src.i32_const()));
break;
}
}
void MoveRegister(Register dst, Register src) {
DCHECK_EQ(0, move_dst_regs & dst.bit());
move_dst_regs |= dst.bit();
move_src_regs |= src.bit();
void MoveRegister(LiftoffRegister dst, LiftoffRegister src) {
DCHECK(!move_dst_regs.has(dst));
move_dst_regs.set(dst);
move_src_regs.set(src);
register_moves.emplace_back(dst, src);
}
void LoadConstant(Register dst, WasmValue value) {
void LoadConstant(LiftoffRegister dst, WasmValue value) {
register_loads.emplace_back(dst, value);
}
void LoadStackSlot(Register dst, uint32_t stack_index) {
void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index) {
register_loads.emplace_back(dst, stack_index);
}
private:
// TODO(clemensh): Avoid unconditionally allocating on the heap.
std::vector<RegisterMove> register_moves;
std::vector<RegisterLoad> register_loads;
RegList move_dst_regs = 0;
RegList move_src_regs = 0;
LiftoffAssembler* const asm_;
};
} // namespace
......@@ -199,11 +205,13 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
for (; src_idx < src_end; ++src_idx, ++dst_idx) {
auto& dst = stack_state[dst_idx];
auto& src = source.stack_state[src_idx];
Register reg = no_reg;
// Just initialize to any register; will be overwritten before use.
LiftoffRegister reg(Register::from_code<0>());
RegClass rc = src.is_reg() ? src.reg_class() : reg_class_for(src.type());
if (src.is_reg() && is_free(src.reg())) {
reg = src.reg();
} else if (has_unused_register()) {
reg = unused_register();
} else if (has_unused_register(rc)) {
reg = unused_register(rc);
} else {
// Make this a stack slot.
DCHECK(src.is_stack());
......@@ -237,7 +245,8 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
continue;
}
}
last_spilled_reg = source.last_spilled_reg;
last_spilled_gp_reg = source.last_spilled_gp_reg;
last_spilled_fp_reg = source.last_spilled_fp_reg;
}
void LiftoffAssembler::CacheState::Steal(CacheState& source) {
......@@ -250,6 +259,8 @@ void LiftoffAssembler::CacheState::Split(const CacheState& source) {
*this = source;
}
// TODO(clemensh): Provide a reasonably sized buffer, based on wasm function
// size.
LiftoffAssembler::LiftoffAssembler(Isolate* isolate)
: TurboAssembler(isolate, nullptr, 0, CodeObjectRequired::kYes) {}
......@@ -259,35 +270,38 @@ LiftoffAssembler::~LiftoffAssembler() {
}
}
Register LiftoffAssembler::GetBinaryOpTargetRegister(
RegClass rc, PinnedRegisterScope pinned) {
LiftoffRegister LiftoffAssembler::GetBinaryOpTargetRegister(
RegClass rc, LiftoffRegList pinned) {
auto& slot_lhs = *(cache_state_.stack_state.end() - 2);
if (slot_lhs.is_reg() && GetNumUses(slot_lhs.reg()) == 1) {
DCHECK_EQ(rc, slot_lhs.reg().reg_class());
return slot_lhs.reg();
}
auto& slot_rhs = *(cache_state_.stack_state.end() - 1);
if (slot_rhs.is_reg() && GetNumUses(slot_rhs.reg()) == 1) {
DCHECK_EQ(rc, slot_rhs.reg().reg_class());
return slot_rhs.reg();
}
return GetUnusedRegister(rc, pinned);
}
Register LiftoffAssembler::PopToRegister(RegClass rc,
PinnedRegisterScope pinned) {
LiftoffRegister LiftoffAssembler::PopToRegister(RegClass rc,
LiftoffRegList pinned) {
DCHECK(!cache_state_.stack_state.empty());
VarState slot = cache_state_.stack_state.back();
cache_state_.stack_state.pop_back();
switch (slot.loc()) {
case VarState::kStack: {
Register reg = GetUnusedRegister(rc, pinned);
LiftoffRegister reg = GetUnusedRegister(rc, pinned);
Fill(reg, cache_state_.stack_height());
return reg;
}
case VarState::kRegister:
DCHECK_EQ(rc, slot.reg_class());
cache_state_.dec_used(slot.reg());
return slot.reg();
case VarState::kConstant: {
Register reg = GetUnusedRegister(rc, pinned);
LiftoffRegister reg = GetUnusedRegister(rc, pinned);
LoadConstant(reg, WasmValue(slot.i32_const()));
return reg;
}
......@@ -346,24 +360,21 @@ void LiftoffAssembler::SpillLocals() {
}
}
Register LiftoffAssembler::SpillOneRegister(RegClass rc,
PinnedRegisterScope pinned_regs) {
DCHECK_EQ(kGpReg, rc);
LiftoffRegister LiftoffAssembler::SpillOneRegister(RegClass rc,
LiftoffRegList pinned) {
// Spill one cached value to free a register.
Register spill_reg = cache_state_.GetNextSpillReg(pinned_regs);
int remaining_uses = cache_state_.register_use_count[spill_reg.code()];
LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(rc, pinned);
int remaining_uses = cache_state_.get_use_count(spill_reg);
DCHECK_LT(0, remaining_uses);
for (uint32_t idx = cache_state_.stack_height() - 1;; --idx) {
DCHECK_GT(cache_state_.stack_height(), idx);
auto& slot = cache_state_.stack_state[idx];
if (!slot.is_reg() || slot.reg() != spill_reg) continue;
auto* slot = &cache_state_.stack_state[idx];
if (!slot->is_reg() || slot->reg() != spill_reg) continue;
Spill(idx, spill_reg);
slot.MakeStack();
slot->MakeStack();
if (--remaining_uses == 0) break;
}
cache_state_.register_use_count[spill_reg.code()] = 0;
cache_state_.used_registers &= ~spill_reg.bit();
cache_state_.clear_used(spill_reg);
return spill_reg;
}
......
......@@ -9,34 +9,16 @@
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
#include "src/base/bits.h"
#include "src/frames.h"
#include "src/macro-assembler.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-value.h"
// Include platform specific definitions.
#if V8_TARGET_ARCH_IA32
#include "src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h"
#elif V8_TARGET_ARCH_X64
#include "src/wasm/baseline/x64/liftoff-assembler-x64-defs.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h"
#elif V8_TARGET_ARCH_ARM
#include "src/wasm/baseline/arm/liftoff-assembler-arm-defs.h"
#elif V8_TARGET_ARCH_PPC
#include "src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/wasm/baseline/mips/liftoff-assembler-mips-defs.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h"
#elif V8_TARGET_ARCH_S390
#include "src/wasm/baseline/s390/liftoff-assembler-s390-defs.h"
#else
#error Unsupported architecture.
#endif
namespace v8 {
namespace internal {
namespace wasm {
......@@ -44,49 +26,21 @@ namespace wasm {
// Forward declarations.
struct ModuleEnv;
enum RegClass { kNoReg, kGpReg, kFpReg };
// TODO(clemensh): Switch to a switch once we require C++14 support.
static constexpr RegClass reg_class_for(ValueType type) {
return type == kWasmI32 || type == kWasmI64 // int types
? kGpReg
: type == kWasmF32 || type == kWasmF64 // float types
? kFpReg
: kNoReg; // other (unsupported) types
}
class LiftoffAssembler : public TurboAssembler {
public:
// TODO(clemensh): Remove this limitation by allocating more stack space if
// needed.
static constexpr int kMaxValueStackHeight = 8;
class PinnedRegisterScope {
public:
PinnedRegisterScope() : pinned_regs_(0) {}
explicit PinnedRegisterScope(RegList regs) : pinned_regs_(regs) {}
Register pin(Register reg) {
pinned_regs_ |= reg.bit();
return reg;
}
RegList pinned_regs() const { return pinned_regs_; }
bool has(Register reg) const { return (pinned_regs_ & reg.bit()) != 0; }
private:
RegList pinned_regs_ = 0;
};
static_assert(IS_TRIVIALLY_COPYABLE(PinnedRegisterScope),
"PinnedRegisterScope can be passed by value");
class VarState {
public:
enum Location : uint8_t { kStack, kRegister, kConstant };
explicit VarState(ValueType type) : loc_(kStack), type_(type) {}
explicit VarState(ValueType type, Register r)
: loc_(kRegister), type_(type), reg_(r) {}
explicit VarState(ValueType type, LiftoffRegister r)
: loc_(kRegister), type_(type), reg_(r) {
DCHECK_EQ(r.reg_class(), reg_class_for(type));
}
explicit VarState(ValueType type, uint32_t i32_const)
: loc_(kConstant), type_(type), i32_const_(i32_const) {
DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
......@@ -106,6 +60,8 @@ class LiftoffAssembler : public TurboAssembler {
}
bool is_stack() const { return loc_ == kStack; }
bool is_gp_reg() const { return loc_ == kRegister && reg_.is_gp(); }
bool is_fp_reg() const { return loc_ == kRegister && reg_.is_fp(); }
bool is_reg() const { return loc_ == kRegister; }
bool is_const() const { return loc_ == kConstant; }
......@@ -117,11 +73,13 @@ class LiftoffAssembler : public TurboAssembler {
DCHECK_EQ(loc_, kConstant);
return i32_const_;
}
Register reg() const {
Register gp_reg() const { return reg().gp(); }
DoubleRegister fp_reg() const { return reg().fp(); }
LiftoffRegister reg() const {
DCHECK_EQ(loc_, kRegister);
return reg_;
}
RegClass reg_class() const { return reg().reg_class(); }
void MakeStack() { loc_ = kStack; }
......@@ -132,10 +90,11 @@ class LiftoffAssembler : public TurboAssembler {
ValueType type_;
union {
Register reg_; // used if loc_ == kRegister
LiftoffRegister reg_; // used if loc_ == kRegister
uint32_t i32_const_; // used if loc_ == kConstant
};
};
static_assert(IS_TRIVIALLY_COPYABLE(VarState),
"VarState should be trivially copyable");
......@@ -147,80 +106,89 @@ class LiftoffAssembler : public TurboAssembler {
// TODO(clemensh): Improve memory management here; avoid std::vector.
std::vector<VarState> stack_state;
RegList used_registers = 0;
// TODO(clemensh): Replace this by CountLeadingZeros(kGpCacheRegs) once that
// method is constexpr.
static constexpr int kMaxRegisterCode = 7;
uint32_t register_use_count[kMaxRegisterCode + 1] = {0};
LiftoffRegList used_registers;
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
LiftoffRegister last_spilled_gp_reg = kGpCacheRegList.GetFirstRegSet();
LiftoffRegister last_spilled_fp_reg = kFpCacheRegList.GetFirstRegSet();
// TODO(clemensh): Remove stack_base; use ControlBase::stack_depth.
uint32_t stack_base = 0;
Register last_spilled_reg = Register::from_code<0>();
// InitMerge: Initialize this CacheState from the {source} cache state, but
// make sure that other code paths can still jump here (i.e. avoid constants
// in the locals or the merge region as specified by {arity}).
// TODO(clemensh): Don't copy the full parent state (this makes us N^2).
void InitMerge(const CacheState& source, uint32_t num_locals,
uint32_t arity);
void Steal(CacheState& source);
void Split(const CacheState& source);
bool has_unused_register(PinnedRegisterScope pinned_scope = {}) const {
RegList available_regs =
kGpCacheRegs & ~used_registers & ~pinned_scope.pinned_regs();
return available_regs != 0;
bool has_unused_register(RegClass rc,
LiftoffRegList pinned_scope = {}) const {
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList cache_regs = GetCacheRegList(rc);
LiftoffRegList available_regs =
cache_regs & ~used_registers & ~pinned_scope;
return !available_regs.is_empty();
}
Register unused_register(PinnedRegisterScope pinned_scope = {}) const {
RegList available_regs =
kGpCacheRegs & ~used_registers & ~pinned_scope.pinned_regs();
Register reg =
Register::from_code(base::bits::CountTrailingZeros(available_regs));
DCHECK_EQ(0, used_registers & reg.bit());
return reg;
LiftoffRegister unused_register(RegClass rc,
LiftoffRegList pinned_scope = {}) const {
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList cache_regs = GetCacheRegList(rc);
LiftoffRegList available_regs =
cache_regs & ~used_registers & ~pinned_scope;
return available_regs.GetFirstRegSet();
}
void inc_used(Register reg) {
used_registers |= reg.bit();
DCHECK_GE(kMaxRegisterCode, reg.code());
++register_use_count[reg.code()];
void inc_used(LiftoffRegister reg) {
used_registers.set(reg);
DCHECK_GT(kMaxInt, register_use_count[reg.liftoff_code()]);
++register_use_count[reg.liftoff_code()];
}
// Returns whether this was the last use.
bool dec_used(Register reg) {
bool dec_used(LiftoffRegister reg) {
DCHECK(is_used(reg));
DCHECK_GE(kMaxRegisterCode, reg.code());
if (--register_use_count[reg.code()] == 0) {
used_registers &= ~reg.bit();
int code = reg.liftoff_code();
DCHECK_LT(0, register_use_count[code]);
if (--register_use_count[code] != 0) return false;
used_registers.clear(reg);
return true;
}
return false;
}
bool is_used(Register reg) const {
DCHECK_GE(kMaxRegisterCode, reg.code());
bool used = used_registers & reg.bit();
DCHECK_EQ(used, register_use_count[reg.code()] != 0);
bool is_used(LiftoffRegister reg) const {
bool used = used_registers.has(reg);
DCHECK_EQ(used, register_use_count[reg.liftoff_code()] != 0);
return used;
}
bool is_free(Register reg) const { return !is_used(reg); }
uint32_t get_use_count(LiftoffRegister reg) const {
DCHECK_GT(arraysize(register_use_count), reg.liftoff_code());
return register_use_count[reg.liftoff_code()];
}
uint32_t stack_height() const {
return static_cast<uint32_t>(stack_state.size());
void clear_used(LiftoffRegister reg) {
register_use_count[reg.liftoff_code()] = 0;
used_registers.clear(reg);
}
Register GetNextSpillReg(PinnedRegisterScope scope = {}) {
uint32_t mask = (1u << (last_spilled_reg.code() + 1)) - 1;
RegList unpinned_regs = kGpCacheRegs & ~scope.pinned_regs();
DCHECK_NE(0, unpinned_regs);
RegList remaining_regs = unpinned_regs & ~mask;
if (!remaining_regs) remaining_regs = unpinned_regs;
last_spilled_reg =
Register::from_code(base::bits::CountTrailingZeros(remaining_regs));
return last_spilled_reg;
bool is_free(LiftoffRegister reg) const { return !is_used(reg); }
LiftoffRegister GetNextSpillReg(RegClass rc, LiftoffRegList pinned = {}) {
LiftoffRegister* last_spilled_p =
rc == kGpReg ? &last_spilled_gp_reg : &last_spilled_fp_reg;
LiftoffRegList cache_regs = GetCacheRegList(rc);
LiftoffRegList unpinned = cache_regs & ~pinned;
DCHECK(!unpinned.is_empty());
LiftoffRegList remaining_regs =
unpinned.MaskOut((1u << (last_spilled_p->liftoff_code() + 1)) - 1);
if (remaining_regs.is_empty()) remaining_regs = unpinned;
LiftoffRegister reg = remaining_regs.GetFirstRegSet();
*last_spilled_p = reg;
return reg;
}
// TODO(clemensh): Don't copy the full parent state (this makes us N^2).
void InitMerge(const CacheState& source, uint32_t num_locals,
uint32_t arity);
void Steal(CacheState& source);
void Split(const CacheState& source);
uint32_t stack_height() const {
return static_cast<uint32_t>(stack_state.size());
}
private:
......@@ -233,27 +201,25 @@ class LiftoffAssembler : public TurboAssembler {
explicit LiftoffAssembler(Isolate* isolate);
~LiftoffAssembler();
Register GetBinaryOpTargetRegister(RegClass, PinnedRegisterScope = {});
LiftoffRegister GetBinaryOpTargetRegister(RegClass,
LiftoffRegList pinned = {});
Register PopToRegister(RegClass, PinnedRegisterScope = {});
LiftoffRegister PopToRegister(RegClass, LiftoffRegList pinned = {});
void PushRegister(ValueType type, Register reg) {
void PushRegister(ValueType type, LiftoffRegister reg) {
cache_state_.inc_used(reg);
cache_state_.stack_state.emplace_back(type, reg);
}
uint32_t GetNumUses(Register reg) const {
DCHECK_GE(CacheState::kMaxRegisterCode, reg.code());
return cache_state_.register_use_count[reg.code()];
uint32_t GetNumUses(LiftoffRegister reg) {
return cache_state_.get_use_count(reg);
}
Register GetUnusedRegister(RegClass rc,
PinnedRegisterScope pinned_regs = {}) {
DCHECK_EQ(kGpReg, rc);
if (cache_state_.has_unused_register(pinned_regs)) {
return cache_state_.unused_register(pinned_regs);
LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
if (cache_state_.has_unused_register(rc, pinned)) {
return cache_state_.unused_register(rc, pinned);
}
return SpillOneRegister(rc, pinned_regs);
return SpillOneRegister(rc, pinned);
}
void DropStackSlot(VarState* slot) {
......@@ -278,21 +244,23 @@ class LiftoffAssembler : public TurboAssembler {
inline void ReserveStackSpace(uint32_t);
inline void LoadConstant(Register, WasmValue);
inline void LoadConstant(LiftoffRegister, WasmValue);
inline void LoadFromContext(Register dst, uint32_t offset, int size);
inline void SpillContext(Register context);
inline void Load(Register dst, Register src_addr, uint32_t offset_imm,
int size, PinnedRegisterScope = {});
inline void Store(Register dst_addr, uint32_t offset_imm, Register src,
int size, PinnedRegisterScope = {});
inline void LoadCallerFrameSlot(Register, uint32_t caller_slot_idx);
inline void Load(LiftoffRegister dst, Register src_addr, uint32_t offset_imm,
int size, LiftoffRegList = {});
inline void Store(Register dst_addr, uint32_t offset_imm, LiftoffRegister src,
int size, LiftoffRegList = {});
inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx);
inline void MoveStackValue(uint32_t dst_index, uint32_t src_index);
inline void MoveToReturnRegister(Register);
inline void MoveToReturnRegister(LiftoffRegister);
// TODO(clemensh): Pass the type to {Move}, to emit more efficient code.
inline void Move(LiftoffRegister dst, LiftoffRegister src);
inline void Spill(uint32_t index, Register);
inline void Spill(uint32_t index, LiftoffRegister);
inline void Spill(uint32_t index, WasmValue);
inline void Fill(Register, uint32_t index);
inline void Fill(LiftoffRegister, uint32_t index);
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
inline void emit_i32_sub(Register dst, Register lhs, Register rhs);
......@@ -303,9 +271,6 @@ class LiftoffAssembler : public TurboAssembler {
inline void JumpIfZero(Register, Label*);
// Platform-specific constant.
static constexpr RegList kGpCacheRegs = kLiftoffAssemblerGpCacheRegs;
////////////////////////////////////
// End of platform-specific part. //
////////////////////////////////////
......@@ -332,10 +297,6 @@ class LiftoffAssembler : public TurboAssembler {
CacheState* cache_state() { return &cache_state_; }
private:
static_assert(
base::bits::CountPopulation(kGpCacheRegs) >= 2,
"We need at least two cache registers to execute binary operations");
uint32_t num_locals_ = 0;
uint32_t stack_space_ = 0;
static constexpr uint32_t kInlineLocalTypes = 8;
......@@ -347,7 +308,7 @@ class LiftoffAssembler : public TurboAssembler {
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
Register SpillOneRegister(RegClass, PinnedRegisterScope = {});
LiftoffRegister SpillOneRegister(RegClass rc, LiftoffRegList pinned);
};
} // namespace wasm
......
......@@ -112,6 +112,33 @@ class LiftoffCompiler {
}
}
void ProcessParameter(uint32_t param_idx, uint32_t input_location) {
DCHECK_EQ(kWasmI32, __ local_type(param_idx));
compiler::LinkageLocation param_loc =
call_desc_->GetInputLocation(input_location);
if (param_loc.IsRegister()) {
DCHECK(!param_loc.IsAnyRegister());
int reg_code = param_loc.AsRegister();
LiftoffRegister reg = LiftoffRegister(Register::from_code(reg_code));
if (kGpCacheRegList.has(reg)) {
// This is a cache register, just use it.
__ PushRegister(kWasmI32, reg);
return;
}
// No cache register. Push to the stack.
__ Spill(param_idx, reg);
__ cache_state()->stack_state.emplace_back(kWasmI32);
return;
}
if (param_loc.IsCallerFrameSlot()) {
LiftoffRegister tmp_reg = __ GetUnusedRegister(kGpReg);
__ LoadCallerFrameSlot(tmp_reg, -param_loc.AsCallerFrameSlot());
__ PushRegister(kWasmI32, tmp_reg);
return;
}
UNREACHABLE();
}
void StartFunctionBody(Decoder* decoder, Control* block) {
if (!kLiftoffAssemblerImplementedOnThisPlatform) {
unsupported(decoder, "platform");
......@@ -140,28 +167,8 @@ class LiftoffCompiler {
__ SpillContext(context_reg);
uint32_t param_idx = 0;
for (; param_idx < num_params; ++param_idx) {
constexpr uint32_t kFirstActualParamIndex = kContextParameterIndex + 1;
ValueType type = __ local_type(param_idx);
compiler::LinkageLocation param_loc =
call_desc_->GetInputLocation(param_idx + kFirstActualParamIndex);
if (param_loc.IsRegister()) {
DCHECK(!param_loc.IsAnyRegister());
Register param_reg = Register::from_code(param_loc.AsRegister());
if (param_reg.bit() & __ kGpCacheRegs) {
// This is a cache register, just use it.
__ PushRegister(type, param_reg);
} else {
// No cache register. Push to the stack.
__ Spill(param_idx, param_reg);
__ cache_state()->stack_state.emplace_back(type);
}
} else if (param_loc.IsCallerFrameSlot()) {
Register tmp_reg = __ GetUnusedRegister(reg_class_for(type));
__ LoadCallerFrameSlot(tmp_reg, -param_loc.AsCallerFrameSlot());
__ PushRegister(type, tmp_reg);
} else {
UNIMPLEMENTED();
}
constexpr int kFirstActualParameterIndex = kContextParameterIndex + 1;
ProcessParameter(param_idx, param_idx + kFirstActualParameterIndex);
}
for (; param_idx < __ num_locals(); ++param_idx) {
ValueType type = decoder->GetLocalType(param_idx);
......@@ -258,11 +265,12 @@ class LiftoffCompiler {
}
#undef CASE_EMIT_FN
LiftoffAssembler::PinnedRegisterScope pinned_regs;
Register target_reg = pinned_regs.pin(__ GetBinaryOpTargetRegister(kGpReg));
Register rhs_reg = pinned_regs.pin(__ PopToRegister(kGpReg, pinned_regs));
Register lhs_reg = __ PopToRegister(kGpReg, pinned_regs);
(asm_->*emit_fn)(target_reg, lhs_reg, rhs_reg);
LiftoffRegList pinned;
LiftoffRegister target_reg =
pinned.set(__ GetBinaryOpTargetRegister(kGpReg));
LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
LiftoffRegister lhs_reg = __ PopToRegister(kGpReg, pinned);
(asm_->*emit_fn)(target_reg.gp(), lhs_reg.gp(), rhs_reg.gp());
__ PushRegister(kWasmI32, target_reg);
}
......@@ -298,7 +306,7 @@ class LiftoffCompiler {
// TODO(clemensh): Handle other types.
if (values[0].type != kWasmI32)
return unsupported(decoder, "non-i32 return");
Register reg = __ PopToRegister(kGpReg);
LiftoffRegister reg = __ PopToRegister(kGpReg);
__ MoveToReturnRegister(reg);
}
__ LeaveFrame(StackFrame::WASM_COMPILED);
......@@ -308,9 +316,10 @@ class LiftoffCompiler {
void GetLocal(Decoder* decoder, Value* result,
const LocalIndexOperand<validate>& operand) {
auto& slot = __ cache_state()->stack_state[operand.index];
DCHECK_EQ(slot.type(), operand.type);
switch (slot.loc()) {
case kRegister:
__ PushRegister(operand.type, slot.reg());
__ PushRegister(slot.type(), slot.reg());
break;
case kConstant:
__ cache_state()->stack_state.emplace_back(operand.type,
......@@ -318,14 +327,35 @@ class LiftoffCompiler {
break;
case kStack: {
auto rc = reg_class_for(operand.type);
Register reg = __ GetUnusedRegister(rc);
LiftoffRegister reg = __ GetUnusedRegister(rc);
__ Fill(reg, operand.index);
__ PushRegister(operand.type, reg);
} break;
__ PushRegister(slot.type(), reg);
break;
}
}
CheckStackSizeLimit(decoder);
}
void SetLocalFromStackSlot(LiftoffAssembler::VarState& dst_slot,
uint32_t local_index) {
auto& state = *__ cache_state();
if (dst_slot.is_reg()) {
LiftoffRegister slot_reg = dst_slot.reg();
if (state.get_use_count(slot_reg) == 1) {
__ Fill(dst_slot.reg(), state.stack_height() - 1);
return;
}
state.dec_used(slot_reg);
}
ValueType type = dst_slot.type();
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
__ Fill(dst_reg, __ cache_state()->stack_height() - 1);
dst_slot = LiftoffAssembler::VarState(type, dst_reg);
__ cache_state()->inc_used(dst_reg);
}
void SetLocal(uint32_t local_index, bool is_tee) {
auto& state = *__ cache_state();
auto& source_slot = state.stack_state.back();
......@@ -340,28 +370,10 @@ class LiftoffCompiler {
__ DropStackSlot(&target_slot);
target_slot = source_slot;
break;
case kStack: {
switch (target_slot.loc()) {
case kRegister:
if (state.register_use_count[target_slot.reg().code()] == 1) {
__ Fill(target_slot.reg(), state.stack_height() - 1);
break;
} else {
state.dec_used(target_slot.reg());
// and fall through to use a new register.
}
case kConstant:
case kStack: {
ValueType type = __ local_type(local_index);
Register target_reg = __ GetUnusedRegister(reg_class_for(type));
__ Fill(target_reg, state.stack_height() - 1);
target_slot = LiftoffAssembler::VarState(type, target_reg);
state.inc_used(target_reg);
} break;
}
case kStack:
SetLocalFromStackSlot(target_slot, local_index);
break;
}
}
if (!is_tee) __ cache_state()->stack_state.pop_back();
}
......@@ -380,29 +392,30 @@ class LiftoffCompiler {
const auto* global = &env_->module->globals[operand.index];
if (global->type != kWasmI32 && global->type != kWasmI64)
return unsupported(decoder, "non-int global");
LiftoffAssembler::PinnedRegisterScope pinned;
Register addr = pinned.pin(__ GetUnusedRegister(kGpReg));
LiftoffRegList pinned;
Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, globals_start),
kPointerSize);
Register value =
pinned.pin(__ GetUnusedRegister(reg_class_for(global->type), pinned));
LiftoffRegister value =
pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
int size = 1 << ElementSizeLog2Of(global->type);
if (size > kPointerSize)
return unsupported(decoder, "global > kPointerSize");
__ Load(value, addr, global->offset, size, pinned);
__ PushRegister(global->type, value);
CheckStackSizeLimit(decoder);
}
void SetGlobal(Decoder* decoder, const Value& value,
const GlobalIndexOperand<validate>& operand) {
auto* global = &env_->module->globals[operand.index];
if (global->type != kWasmI32) return unsupported(decoder, "non-i32 global");
LiftoffAssembler::PinnedRegisterScope pinned;
Register addr = pinned.pin(__ GetUnusedRegister(kGpReg));
LiftoffRegList pinned;
Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, globals_start),
kPointerSize);
Register reg =
pinned.pin(__ PopToRegister(reg_class_for(global->type), pinned));
LiftoffRegister reg =
pinned.set(__ PopToRegister(reg_class_for(global->type), pinned));
int size = 1 << ElementSizeLog2Of(global->type);
__ Store(addr, global->offset, reg, size, pinned);
}
......@@ -425,7 +438,7 @@ class LiftoffCompiler {
void BrIf(Decoder* decoder, const Value& cond, Control* target) {
Label cont_false;
Register value = __ PopToRegister(kGpReg);
Register value = __ PopToRegister(kGpReg).gp();
__ JumpIfZero(value, &cont_false);
Br(decoder, target);
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_REGISTER_H_
#define V8_WASM_BASELINE_LIFTOFF_REGISTER_H_
#include <memory>
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
#include "src/base/bits.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
namespace wasm {
enum RegClass { kNoReg, kGpReg, kFpReg };
// TODO(clemensh): Use a switch once we require C++14 support.
static inline constexpr RegClass reg_class_for(ValueType type) {
return type == kWasmI32 || type == kWasmI64 // int types
? kGpReg
: type == kWasmF32 || type == kWasmF64 // float types
? kFpReg
: kNoReg; // other (unsupported) types
}
// Maximum code of a gp cache register.
static constexpr int kMaxGpRegCode =
8 * sizeof(kLiftoffAssemblerGpCacheRegs) -
base::bits::CountLeadingZeros(kLiftoffAssemblerGpCacheRegs);
// Maximum code of an fp cache register.
static constexpr int kMaxFpRegCode =
8 * sizeof(kLiftoffAssemblerFpCacheRegs) -
base::bits::CountLeadingZeros(kLiftoffAssemblerFpCacheRegs);
// LiftoffRegister encodes both gp and fp in a unified index space.
// [0 .. kMaxGpRegCode] encodes gp registers,
// [kMaxGpRegCode+1 .. kMaxGpRegCode + kMaxFpRegCode] encodes fp registers.
static constexpr int kAfterMaxLiftoffGpRegCode = kMaxGpRegCode + 1;
static constexpr int kAfterMaxLiftoffFpRegCode =
kAfterMaxLiftoffGpRegCode + kMaxFpRegCode + 1;
static constexpr int kAfterMaxLiftoffRegCode = kAfterMaxLiftoffFpRegCode;
static_assert(kAfterMaxLiftoffRegCode < 256,
"liftoff register codes can be stored in one uint8_t");
class LiftoffRegister {
public:
explicit LiftoffRegister(Register reg) : LiftoffRegister(reg.code()) {
DCHECK_EQ(reg, gp());
}
explicit LiftoffRegister(DoubleRegister reg)
: LiftoffRegister(kAfterMaxLiftoffGpRegCode + reg.code()) {
DCHECK_EQ(reg, fp());
}
static LiftoffRegister from_liftoff_code(int code) {
DCHECK_LE(0, code);
DCHECK_GT(kAfterMaxLiftoffRegCode, code);
return LiftoffRegister(code);
}
constexpr bool is_gp() const { return code_ < kAfterMaxLiftoffGpRegCode; }
constexpr bool is_fp() const {
return code_ >= kAfterMaxLiftoffGpRegCode &&
code_ < kAfterMaxLiftoffFpRegCode;
}
Register gp() const {
DCHECK(is_gp());
return Register::from_code(code_);
}
DoubleRegister fp() const {
DCHECK(is_fp());
return DoubleRegister::from_code(code_ - kAfterMaxLiftoffGpRegCode);
}
int liftoff_code() const { return code_; }
RegClass reg_class() const {
DCHECK(is_gp() || is_fp());
return is_gp() ? kGpReg : kFpReg;
}
bool operator==(const LiftoffRegister other) const {
return code_ == other.code_;
}
bool operator!=(const LiftoffRegister other) const {
return code_ != other.code_;
}
private:
uint8_t code_;
explicit constexpr LiftoffRegister(uint8_t code) : code_(code) {}
};
static_assert(IS_TRIVIALLY_COPYABLE(LiftoffRegister),
"LiftoffRegister can efficiently be passed by value");
class LiftoffRegList {
public:
static constexpr bool use_u16 = kAfterMaxLiftoffRegCode <= 16;
static constexpr bool use_u32 = !use_u16 && kAfterMaxLiftoffRegCode <= 32;
using storage_t = std::conditional<
use_u16, uint16_t,
std::conditional<use_u32, uint32_t, uint64_t>::type>::type;
static constexpr storage_t kGpMask = storage_t{kLiftoffAssemblerGpCacheRegs};
static constexpr storage_t kFpMask = storage_t{kLiftoffAssemblerFpCacheRegs}
<< kAfterMaxLiftoffGpRegCode;
constexpr LiftoffRegList() = default;
Register set(Register reg) { return set(LiftoffRegister(reg)).gp(); }
DoubleRegister set(DoubleRegister reg) {
return set(LiftoffRegister(reg)).fp();
}
LiftoffRegister set(LiftoffRegister reg) {
regs_ |= storage_t{1} << reg.liftoff_code();
return reg;
}
LiftoffRegister clear(LiftoffRegister reg) {
regs_ &= ~(storage_t{1} << reg.liftoff_code());
return reg;
}
bool has(LiftoffRegister reg) const {
return (regs_ & (storage_t{1} << reg.liftoff_code())) != 0;
}
bool is_empty() const { return regs_ == 0; }
LiftoffRegList operator&(LiftoffRegList other) const {
return FromBits(regs_ & other.regs_);
}
LiftoffRegList operator~() const {
return FromBits(~regs_ & (kGpMask | kFpMask));
}
LiftoffRegister GetFirstRegSet() const {
DCHECK_NE(0, regs_);
unsigned first_code = base::bits::CountTrailingZeros(regs_);
return LiftoffRegister::from_liftoff_code(first_code);
}
LiftoffRegList MaskOut(storage_t mask) const {
// Masking out is guaranteed to return a correct reg list, hence no checks
// needed.
return FromBits(regs_ & ~mask);
}
static LiftoffRegList FromBits(storage_t bits) {
DCHECK_EQ(bits, bits & (kGpMask | kFpMask));
return LiftoffRegList(bits);
}
template <storage_t bits>
static constexpr LiftoffRegList FromBits() {
static_assert(bits == (bits & (kGpMask | kFpMask)), "illegal reg list");
return LiftoffRegList(bits);
}
private:
storage_t regs_ = 0;
// Unchecked constructor. Only use for valid bits.
explicit constexpr LiftoffRegList(storage_t bits) : regs_(bits) {}
};
static_assert(IS_TRIVIALLY_COPYABLE(LiftoffRegList),
"LiftoffRegList can be passed by value");
static constexpr LiftoffRegList kGpCacheRegList =
LiftoffRegList::FromBits<LiftoffRegList::kGpMask>();
static constexpr LiftoffRegList kFpCacheRegList =
LiftoffRegList::FromBits<LiftoffRegList::kFpMask>();
static constexpr LiftoffRegList GetCacheRegList(RegClass rc) {
return rc == kGpReg ? kGpCacheRegList : kFpCacheRegList;
}
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_REGISTER_H_
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
......@@ -13,33 +13,35 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {}
void LiftoffAssembler::SpillContext(Register context) {}
void LiftoffAssembler::Load(Register dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
uint32_t offset_imm, int size,
PinnedRegisterScope pinned) {}
LiftoffRegList pinned) {}
void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
Register src, int size,
PinnedRegisterScope pinned) {}
LiftoffRegister src, int size,
LiftoffRegList pinned) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
......@@ -13,33 +13,35 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {}
void LiftoffAssembler::SpillContext(Register context) {}
void LiftoffAssembler::Load(Register dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
uint32_t offset_imm, int size,
PinnedRegisterScope pinned) {}
LiftoffRegList pinned) {}
void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
Register src, int size,
PinnedRegisterScope pinned) {}
LiftoffRegister src, int size,
LiftoffRegList pinned) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
......@@ -13,33 +13,35 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {}
void LiftoffAssembler::SpillContext(Register context) {}
void LiftoffAssembler::Load(Register dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
uint32_t offset_imm, int size,
PinnedRegisterScope pinned) {}
LiftoffRegList pinned) {}
void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
Register src, int size,
PinnedRegisterScope pinned) {}
LiftoffRegister src, int size,
LiftoffRegList pinned) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_S390_DEFS_H_
......@@ -13,33 +13,35 @@ namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {}
void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
int size) {}
void LiftoffAssembler::SpillContext(Register context) {}
void LiftoffAssembler::Load(Register dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
uint32_t offset_imm, int size,
PinnedRegisterScope pinned) {}
LiftoffRegList pinned) {}
void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
Register src, int size,
PinnedRegisterScope pinned) {}
LiftoffRegister src, int size,
LiftoffRegList pinned) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
static constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<rax, rcx, rdx, rbx, rsi, rdi>();
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
......@@ -34,17 +34,17 @@ void LiftoffAssembler::ReserveStackSpace(uint32_t space) {
subl(rsp, Immediate(space));
}
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
switch (value.type()) {
case kWasmI32:
if (value.to_i32() == 0) {
xorl(reg, reg);
xorl(reg.gp(), reg.gp());
} else {
movl(reg, Immediate(value.to_i32()));
movl(reg.gp(), Immediate(value.to_i32()));
}
break;
default:
UNIMPLEMENTED();
UNREACHABLE();
}
}
......@@ -64,54 +64,55 @@ void LiftoffAssembler::SpillContext(Register context) {
movp(liftoff::GetContextOperand(), context);
}
void LiftoffAssembler::Load(Register dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
uint32_t offset_imm, int size,
PinnedRegisterScope pinned) {
LiftoffRegList pinned) {
Operand src_op = Operand(src_addr, offset_imm);
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
Register src = GetUnusedRegister(kGpReg, pinned);
Register src = GetUnusedRegister(kGpReg, pinned).gp();
movl(src, Immediate(offset_imm));
src_op = Operand(src_addr, src, times_1, 0);
}
DCHECK(size == 4 || size == 8);
if (size == 4) {
movl(dst, src_op);
movl(dst.gp(), src_op);
} else {
movq(dst, src_op);
movq(dst.gp(), src_op);
}
}
void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
Register src, int size,
PinnedRegisterScope pinned) {
LiftoffRegister src, int size,
LiftoffRegList pinned) {
Operand dst_op = Operand(dst_addr, offset_imm);
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
Register dst = GetUnusedRegister(kGpReg, pinned);
Register dst = GetUnusedRegister(kGpReg, pinned).gp();
movl(dst, Immediate(offset_imm));
dst_op = Operand(dst_addr, dst, times_1, 0);
}
DCHECK(size == 4 || size == 8);
if (src.is_fp()) UNIMPLEMENTED();
if (size == 4) {
movl(dst_op, src);
movl(dst_op, src.gp());
} else {
movp(dst_op, src);
movp(dst_op, src.gp());
}
}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {
constexpr int32_t kStackSlotSize = 8;
movl(dst, Operand(rbp, kStackSlotSize * (caller_slot_idx + 1)));
movl(dst.gp(), Operand(rbp, kStackSlotSize * (caller_slot_idx + 1)));
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
DCHECK_NE(dst_index, src_index);
if (cache_state_.has_unused_register()) {
Register reg = GetUnusedRegister(kGpReg);
if (cache_state_.has_unused_register(kGpReg)) {
LiftoffRegister reg = GetUnusedRegister(kGpReg);
Fill(reg, src_index);
Spill(dst_index, reg);
} else {
......@@ -120,14 +121,26 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
}
}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {
// TODO(clemensh): Handle different types here.
if (reg != rax) movl(rax, reg);
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
if (reg.gp() != rax) movl(rax, reg.gp());
}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {
// TODO(clemensh): Handle different types here.
movl(liftoff::GetStackSlot(index), reg);
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
// The caller should check that the registers are not equal. For most
// occurences, this is already guaranteed, so no need to check within this
// method.
DCHECK_NE(dst, src);
DCHECK_EQ(dst.reg_class(), src.reg_class());
// TODO(clemensh): Handle different sizes here.
if (dst.is_gp()) {
movq(dst.gp(), src.gp());
} else {
movsd(dst.fp(), src.fp());
}
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
movl(liftoff::GetStackSlot(index), reg.gp());
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
......@@ -135,9 +148,8 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
movl(liftoff::GetStackSlot(index), Immediate(value.to_i32()));
}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {
// TODO(clemensh): Handle different types here.
movl(reg, liftoff::GetStackSlot(index));
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
movl(reg.gp(), liftoff::GetStackSlot(index));
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment