Commit bca2b694 authored by Vincent Belliard's avatar Vincent Belliard Committed by Commit Bot

[Liftoff] Add LiftoffStackSlots

On AArch64, we can't push arguments one by one because sp must always be 16 byte aligned.
This patch adds a LiftoffStackSlots class which holds everything which has to be pushed.
This way, on AArch64, we will be able to reserve the needed space and eventually add some padding. Then, all pushes will be converted to stores into this reserved space.

Bug: v8:6600

Change-Id: I17480fb841f16e07356b35326c59e3c7f03ed012
Reviewed-on: https://chromium-review.googlesource.com/1023977
Commit-Queue: Vincent Belliard <vincent.belliard@arm.com>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52746}
parent f2b5a6da
......@@ -264,17 +264,6 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index,
RegPairHalf half) {
BAILOUT("PushCallerFrameSlot");
}
void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
ValueType type) {
BAILOUT("PushCallerFrameSlot reg");
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
BAILOUT("PushRegisters");
}
......@@ -317,6 +306,10 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
BAILOUT("DeallocateStackSlot");
}
void LiftoffStackSlots::Construct() {
asm_->BAILOUT("LiftoffStackSlots::Construct");
}
} // namespace wasm
} // namespace internal
} // namespace v8
......
......@@ -80,6 +80,22 @@ inline CPURegList PadVRegList(RegList list) {
return CPURegList(CPURegister::kVRegister, kDRegSizeInBits, list);
}
inline CPURegister AcquireByType(UseScratchRegisterScope* temps,
ValueType type) {
switch (type) {
case kWasmI32:
return temps->AcquireW();
case kWasmI64:
return temps->AcquireX();
case kWasmF32:
return temps->AcquireS();
case kWasmF64:
return temps->AcquireD();
default:
UNREACHABLE();
}
}
} // namespace liftoff
uint32_t LiftoffAssembler::PrepareStackFrame() {
......@@ -392,17 +408,6 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index,
RegPairHalf half) {
BAILOUT("PushCallerFrameSlot");
}
void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
ValueType type) {
BAILOUT("PushCallerFrameSlot reg");
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
PushCPURegList(liftoff::PadRegList(regs.GetGpList()));
PushCPURegList(liftoff::PadVRegList(regs.GetFpList()));
......@@ -448,6 +453,37 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
BAILOUT("DeallocateStackSlot");
}
void LiftoffStackSlots::Construct() {
size_t slot_count = slots_.size();
// The stack pointer is required to be quadword aligned.
asm_->Claim(RoundUp(slot_count, 2));
size_t slot_index = 0;
for (auto& slot : slots_) {
size_t poke_offset = (slot_count - slot_index - 1) * kXRegSize;
switch (slot.src_.loc()) {
case LiftoffAssembler::VarState::kStack: {
UseScratchRegisterScope temps(asm_);
CPURegister scratch = liftoff::AcquireByType(&temps, slot.src_.type());
asm_->Ldr(scratch, liftoff::GetStackSlot(slot.src_index_));
asm_->Poke(scratch, poke_offset);
break;
}
case LiftoffAssembler::VarState::kRegister:
asm_->Poke(liftoff::GetRegFromType(slot.src_.reg(), slot.src_.type()),
poke_offset);
break;
case LiftoffAssembler::VarState::KIntConst: {
UseScratchRegisterScope temps(asm_);
Register scratch = temps.AcquireW();
asm_->Mov(scratch, slot.src_.i32_const());
asm_->Poke(scratch, poke_offset);
break;
}
}
slot_index++;
}
}
} // namespace wasm
} // namespace internal
} // namespace v8
......
......@@ -1330,39 +1330,6 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
TurboAssembler::AssertUnreachable(reason);
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index,
RegPairHalf half) {
switch (src.loc()) {
case VarState::kStack:
if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, half);
push(liftoff::GetHalfStackSlot(2 * src_index - 1));
}
push(liftoff::GetHalfStackSlot(2 * src_index -
(half == kLowWord ? 0 : 1)));
break;
case VarState::kRegister:
if (src.type() == kWasmI64) {
PushCallerFrameSlot(
half == kLowWord ? src.reg().low() : src.reg().high(), kWasmI32);
} else {
PushCallerFrameSlot(src.reg(), src.type());
}
break;
case VarState::KIntConst:
// The high word is the sign extension of the low word.
push(Immediate(half == kLowWord ? src.i32_const()
: src.i32_const() >> 31));
break;
}
}
void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
ValueType type) {
liftoff::push(this, reg, type);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList gp_regs = regs & kGpCacheRegList;
while (!gp_regs.is_empty()) {
......@@ -1484,6 +1451,36 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
add(esp, Immediate(size));
}
void LiftoffStackSlots::Construct() {
for (auto& slot : slots_) {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, slot.half_);
asm_->push(liftoff::GetHalfStackSlot(2 * slot.src_index_ - 1));
}
asm_->push(liftoff::GetHalfStackSlot(2 * slot.src_index_ -
(slot.half_ == kLowWord ? 0 : 1)));
break;
case LiftoffAssembler::VarState::kRegister:
if (src.type() == kWasmI64) {
liftoff::push(
asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(),
kWasmI32);
} else {
liftoff::push(asm_, src.reg(), src.type());
}
break;
case LiftoffAssembler::VarState::KIntConst:
// The high word is the sign extension of the low word.
asm_->push(Immediate(slot.half_ == kLowWord ? src.i32_const()
: src.i32_const() >> 31));
break;
}
}
}
#undef REQUIRE_CPU_FEATURE
} // namespace wasm
......
......@@ -452,6 +452,7 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
slot.MakeStack();
}
LiftoffStackSlots stack_slots(this);
StackTransferRecipe stack_transfers(this);
LiftoffRegList param_regs;
......@@ -500,7 +501,7 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
}
} else {
DCHECK(loc.IsCallerFrameSlot());
PushCallerFrameSlot(slot, stack_idx, half);
stack_slots.Add(slot, stack_idx, half);
}
}
}
......@@ -518,11 +519,14 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
kWasmIntPtr);
*target = new_target.gp();
} else {
PushCallerFrameSlot(LiftoffRegister(*target), kWasmIntPtr);
stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kWasmIntPtr,
LiftoffRegister(*target)));
*target = no_reg;
}
}
// Create all the slots.
stack_slots.Construct();
// Execute the stack transfers before filling the instance register.
stack_transfers.Execute();
......
......@@ -501,10 +501,6 @@ class LiftoffAssembler : public TurboAssembler {
inline void AssertUnreachable(AbortReason reason);
// Push a value to the stack (will become a caller frame slot).
inline void PushCallerFrameSlot(const VarState& src, uint32_t src_index,
RegPairHalf half);
inline void PushCallerFrameSlot(LiftoffRegister reg, ValueType type);
inline void PushRegisters(LiftoffRegList);
inline void PopRegisters(LiftoffRegList);
......@@ -640,6 +636,37 @@ void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
// platform-dependent part.
// =======================================================================
class LiftoffStackSlots {
public:
explicit LiftoffStackSlots(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
void Add(const LiftoffAssembler::VarState& src, uint32_t src_index,
RegPairHalf half) {
slots_.emplace_back(src, src_index, half);
}
void Add(const LiftoffAssembler::VarState& src) { slots_.emplace_back(src); }
inline void Construct();
private:
struct Slot {
// Allow move construction.
Slot(Slot&&) = default;
Slot(const LiftoffAssembler::VarState& src, uint32_t src_index,
RegPairHalf half)
: src_(src), src_index_(src_index), half_(half) {}
explicit Slot(const LiftoffAssembler::VarState& src)
: src_(src), src_index_(0), half_(kLowWord) {}
const LiftoffAssembler::VarState src_;
uint32_t src_index_;
RegPairHalf half_;
};
std::vector<Slot> slots_;
LiftoffAssembler* const asm_;
};
} // namespace wasm
} // namespace internal
} // namespace v8
......
......@@ -1302,8 +1302,10 @@ class LiftoffCompiler {
LiftoffAssembler::kWasmIntPtr);
} else {
DCHECK(param_loc.IsCallerFrameSlot());
__ PushCallerFrameSlot(LiftoffRegister(args[0]),
LiftoffAssembler::kWasmIntPtr);
LiftoffStackSlots stack_slots(asm_);
stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kWasmIntPtr,
LiftoffRegister(args[0])));
stack_slots.Construct();
}
// Allocate the codegen zone if not done before.
......
......@@ -1045,44 +1045,6 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
if (emit_debug_code()) Abort(reason);
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index,
RegPairHalf half) {
switch (src.loc()) {
case VarState::kStack: {
if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, half);
lw(at, liftoff::GetHalfStackSlot(2 * src_index - 1));
push(at);
}
lw(at,
liftoff::GetHalfStackSlot(2 * src_index + (half == kLowWord ? 0 : 1)));
push(at);
break;
}
case VarState::kRegister:
if (src.type() == kWasmI64) {
PushCallerFrameSlot(
half == kLowWord ? src.reg().low() : src.reg().high(), kWasmI32);
} else {
PushCallerFrameSlot(src.reg(), src.type());
}
break;
case VarState::KIntConst: {
// The high word is the sign extension of the low word.
li(at,
Operand(half == kLowWord ? src.i32_const() : src.i32_const() >> 31));
push(at);
break;
}
}
}
void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
ValueType type) {
liftoff::push(this, reg, type);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList gp_regs = regs & kGpCacheRegList;
unsigned num_gp_regs = gp_regs.GetNumRegsSet();
......@@ -1212,6 +1174,42 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
addiu(sp, sp, size);
}
void LiftoffStackSlots::Construct() {
for (auto& slot : slots_) {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: {
if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, slot.half_);
asm_->lw(at, liftoff::GetHalfStackSlot(2 * slot.src_index_ - 1));
asm_->push(at);
}
asm_->lw(at,
liftoff::GetHalfStackSlot(2 * slot.src_index_ +
(slot.half_ == kLowWord ? 0 : 1)));
asm_->push(at);
break;
}
case LiftoffAssembler::VarState::kRegister:
if (src.type() == kWasmI64) {
liftoff::push(
asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(),
kWasmI32);
} else {
liftoff::push(asm_, src.reg(), src.type());
}
break;
case LiftoffAssembler::VarState::KIntConst: {
// The high word is the sign extension of the low word.
asm_->li(at, Operand(slot.half_ == kLowWord ? src.i32_const()
: src.i32_const() >> 31));
asm_->push(at);
break;
}
}
}
}
} // namespace wasm
} // namespace internal
} // namespace v8
......
......@@ -837,30 +837,6 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
if (emit_debug_code()) Abort(reason);
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index,
RegPairHalf half) {
switch (src.loc()) {
case VarState::kStack:
ld(at, liftoff::GetStackSlot(src_index));
push(at);
break;
case VarState::kRegister:
PushCallerFrameSlot(src.reg(), src.type());
break;
case VarState::KIntConst: {
li(at, Operand(src.i32_const()));
push(at);
break;
}
}
}
void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
ValueType type) {
liftoff::push(this, reg, type);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList gp_regs = regs & kGpCacheRegList;
unsigned num_gp_regs = gp_regs.GetNumRegsSet();
......@@ -990,6 +966,26 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
daddiu(sp, sp, size);
}
void LiftoffStackSlots::Construct() {
for (auto& slot : slots_) {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
asm_->ld(at, liftoff::GetStackSlot(slot.src_index_));
asm_->push(at);
break;
case LiftoffAssembler::VarState::kRegister:
liftoff::push(asm_, src.reg(), src.type());
break;
case LiftoffAssembler::VarState::KIntConst: {
asm_->li(at, Operand(src.i32_const()));
asm_->push(at);
break;
}
}
}
}
} // namespace wasm
} // namespace internal
} // namespace v8
......
......@@ -269,17 +269,6 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index,
RegPairHalf half) {
BAILOUT("PushCallerFrameSlot");
}
void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
ValueType type) {
BAILOUT("PushCallerFrameSlot reg");
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
BAILOUT("PushRegisters");
}
......@@ -322,6 +311,10 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
BAILOUT("DeallocateStackSlot");
}
void LiftoffStackSlots::Construct() {
asm_->BAILOUT("LiftoffStackSlots::Construct");
}
} // namespace wasm
} // namespace internal
} // namespace v8
......
......@@ -269,17 +269,6 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
BAILOUT("AssertUnreachable");
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index,
RegPairHalf half) {
BAILOUT("PushCallerFrameSlot");
}
void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
ValueType type) {
BAILOUT("PushCallerFrameSlot reg");
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
BAILOUT("PushRegisters");
}
......@@ -322,6 +311,10 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
BAILOUT("DeallocateStackSlot");
}
void LiftoffStackSlots::Construct() {
asm_->BAILOUT("LiftoffStackSlots::Construct");
}
} // namespace wasm
} // namespace internal
} // namespace v8
......
......@@ -1154,26 +1154,6 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
TurboAssembler::AssertUnreachable(reason);
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index, RegPairHalf) {
switch (src.loc()) {
case VarState::kStack:
pushq(liftoff::GetStackSlot(src_index));
break;
case VarState::kRegister:
PushCallerFrameSlot(src.reg(), src.type());
break;
case VarState::KIntConst:
pushq(Immediate(src.i32_const()));
break;
}
}
void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
ValueType type) {
liftoff::push(this, reg, type);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList gp_regs = regs & kGpCacheRegList;
while (!gp_regs.is_empty()) {
......@@ -1296,6 +1276,23 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
addp(rsp, Immediate(size));
}
void LiftoffStackSlots::Construct() {
for (auto& slot : slots_) {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
asm_->pushq(liftoff::GetStackSlot(slot.src_index_));
break;
case LiftoffAssembler::VarState::kRegister:
liftoff::push(asm_, src.reg(), src.type());
break;
case LiftoffAssembler::VarState::KIntConst:
asm_->pushq(Immediate(src.i32_const()));
break;
}
}
}
#undef REQUIRE_CPU_FEATURE
} // namespace wasm
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment