Commit 81bb9cc8 authored by Shu-yu Guo's avatar Shu-yu Guo Committed by Commit Bot

Revert "Reland "[wasm][liftoff] Respect CallDescriptor linkage""

This reverts commit 36a7cba2.

Reason for revert: On suspicion of making breakpoints fail more reliably: https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux/41129/overview

Original change's description:
> Reland "[wasm][liftoff] Respect CallDescriptor linkage"
>
> This is a reland of 94283811
>
> Patchset #2 fixes the Arm bug. A vpush is used to push the slot,
> so subtract kSimd128Size from the stack decrement to get padding.
>
> Original change's description:
> > [wasm][liftoff] Respect CallDescriptor linkage
> >
> > - Adds the actual stack slot location to LiftoffStackSlots::Slot.
> > - Adds SortInPushedOrder method for architectures that push
> >   parameters.
> > - Changes the LiftoffStackSlots::Construct signature to take the
> >   number of parameter slots in total, and changes implementations
> >   to insert padding when slots aren't contiguous.
> > - Changes Arm MacroAssembler::AllocateStackSpace to check the
> >   immediate value, and to be a nop when it's zero.
> >
> > Bug: v8:9198
> > Change-Id: Ibd5775dbed3a40051fa9e345556231a1c07cf4e9
> > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2717120
> > Reviewed-by: Andreas Haas <ahaas@chromium.org>
> > Reviewed-by: Clemens Backes <clemensb@chromium.org>
> > Commit-Queue: Bill Budge <bbudge@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#73191}
>
> Bug: v8:9198
> Change-Id: Iae4930e28dd7fc634e3709a5726379c6b37e5195
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2735984
> Reviewed-by: Bill Budge <bbudge@chromium.org>
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Reviewed-by: Andreas Haas <ahaas@chromium.org>
> Commit-Queue: Bill Budge <bbudge@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#73195}

Bug: v8:9198
Change-Id: I5e72a1f765eb49ec72198abd44798a6153e3dace
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2737796
Auto-Submit: Shu-yu Guo <syg@chromium.org>
Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Cr-Commit-Position: refs/heads/master@{#73202}
parent e73c7b21
...@@ -1389,7 +1389,6 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { ...@@ -1389,7 +1389,6 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
} }
void TurboAssembler::AllocateStackSpace(int bytes) { void TurboAssembler::AllocateStackSpace(int bytes) {
DCHECK_GE(bytes, 0);
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
DwVfpRegister scratch = no_dreg; DwVfpRegister scratch = no_dreg;
while (bytes > kStackPageSize) { while (bytes > kStackPageSize) {
...@@ -1400,7 +1399,6 @@ void TurboAssembler::AllocateStackSpace(int bytes) { ...@@ -1400,7 +1399,6 @@ void TurboAssembler::AllocateStackSpace(int bytes) {
vldr(scratch, MemOperand(sp)); vldr(scratch, MemOperand(sp));
bytes -= kStackPageSize; bytes -= kStackPageSize;
} }
if (bytes == 0) return;
sub(sp, sp, Operand(bytes)); sub(sp, sp, Operand(bytes));
} }
#endif #endif
......
...@@ -64,11 +64,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -64,11 +64,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void AllocateStackSpace(int bytes); void AllocateStackSpace(int bytes);
#else #else
void AllocateStackSpace(Register bytes) { sub(sp, sp, bytes); } void AllocateStackSpace(Register bytes) { sub(sp, sp, bytes); }
void AllocateStackSpace(int bytes) { void AllocateStackSpace(int bytes) { sub(sp, sp, Operand(bytes)); }
DCHECK_GE(bytes, 0);
if (bytes == 0) return;
sub(sp, sp, Operand(bytes));
}
#endif #endif
// Push a fixed frame, consisting of lr, fp // Push a fixed frame, consisting of lr, fp
......
...@@ -1771,7 +1771,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1771,7 +1771,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Slot-sized arguments are never padded but there may be a gap if // Slot-sized arguments are never padded but there may be a gap if
// the slot allocator reclaimed other padding slots. Adjust the stack // the slot allocator reclaimed other padding slots. Adjust the stack
// here to skip any gap. // here to skip any gap.
__ AllocateStackSpace((slots - pushed_slots) * kSystemPointerSize); if (slots > pushed_slots) {
__ AllocateStackSpace((slots - pushed_slots) * kSystemPointerSize);
}
switch (rep) { switch (rep) {
case MachineRepresentation::kFloat32: case MachineRepresentation::kFloat32:
__ vpush(i.InputFloatRegister(1)); __ vpush(i.InputFloatRegister(1));
...@@ -3864,8 +3866,10 @@ void CodeGenerator::AssembleConstructFrame() { ...@@ -3864,8 +3866,10 @@ void CodeGenerator::AssembleConstructFrame() {
} }
const int returns = frame()->GetReturnSlotCount(); const int returns = frame()->GetReturnSlotCount();
// Create space for returns. if (returns != 0) {
__ AllocateStackSpace(returns * kSystemPointerSize); // Create space for returns.
__ AllocateStackSpace(returns * kSystemPointerSize);
}
} }
void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
......
...@@ -4232,15 +4232,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { ...@@ -4232,15 +4232,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
add(sp, sp, Operand(size)); add(sp, sp, Operand(size));
} }
void LiftoffStackSlots::Construct(int param_slots) { void LiftoffStackSlots::Construct() {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
int last_stack_slot = param_slots;
for (auto& slot : slots_) { for (auto& slot : slots_) {
const int stack_slot = slot.dst_slot_;
int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
DCHECK_LT(0, stack_decrement);
last_stack_slot = stack_slot;
const LiftoffAssembler::VarState& src = slot.src_; const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) { switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: { case LiftoffAssembler::VarState::kStack: {
...@@ -4252,7 +4245,6 @@ void LiftoffStackSlots::Construct(int param_slots) { ...@@ -4252,7 +4245,6 @@ void LiftoffStackSlots::Construct(int param_slots) {
case kF32: case kF32:
case kRef: case kRef:
case kOptRef: { case kOptRef: {
asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
UseScratchRegisterScope temps(asm_); UseScratchRegisterScope temps(asm_);
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
asm_->ldr(scratch, asm_->ldr(scratch,
...@@ -4260,14 +4252,12 @@ void LiftoffStackSlots::Construct(int param_slots) { ...@@ -4260,14 +4252,12 @@ void LiftoffStackSlots::Construct(int param_slots) {
asm_->Push(scratch); asm_->Push(scratch);
} break; } break;
case kF64: { case kF64: {
asm_->AllocateStackSpace(stack_decrement - kDoubleSize);
UseScratchRegisterScope temps(asm_); UseScratchRegisterScope temps(asm_);
DwVfpRegister scratch = temps.AcquireD(); DwVfpRegister scratch = temps.AcquireD();
asm_->vldr(scratch, liftoff::GetStackSlot(slot.src_offset_)); asm_->vldr(scratch, liftoff::GetStackSlot(slot.src_offset_));
asm_->vpush(scratch); asm_->vpush(scratch);
} break; } break;
case kS128: { case kS128: {
asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
MemOperand mem_op = liftoff::GetStackSlot(slot.src_offset_); MemOperand mem_op = liftoff::GetStackSlot(slot.src_offset_);
UseScratchRegisterScope temps(asm_); UseScratchRegisterScope temps(asm_);
Register addr = liftoff::CalculateActualAddress( Register addr = liftoff::CalculateActualAddress(
...@@ -4282,9 +4272,7 @@ void LiftoffStackSlots::Construct(int param_slots) { ...@@ -4282,9 +4272,7 @@ void LiftoffStackSlots::Construct(int param_slots) {
} }
break; break;
} }
case LiftoffAssembler::VarState::kRegister: { case LiftoffAssembler::VarState::kRegister:
int pushed_bytes = SlotSizeInBytes(slot);
asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
switch (src.kind()) { switch (src.kind()) {
case kI64: { case kI64: {
LiftoffRegister reg = LiftoffRegister reg =
...@@ -4309,9 +4297,7 @@ void LiftoffStackSlots::Construct(int param_slots) { ...@@ -4309,9 +4297,7 @@ void LiftoffStackSlots::Construct(int param_slots) {
UNREACHABLE(); UNREACHABLE();
} }
break; break;
}
case LiftoffAssembler::VarState::kIntConst: { case LiftoffAssembler::VarState::kIntConst: {
asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
DCHECK(src.kind() == kI32 || src.kind() == kI64); DCHECK(src.kind() == kI32 || src.kind() == kI64);
UseScratchRegisterScope temps(asm_); UseScratchRegisterScope temps(asm_);
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
......
...@@ -3209,12 +3209,16 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { ...@@ -3209,12 +3209,16 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
Drop(size, 1); Drop(size, 1);
} }
void LiftoffStackSlots::Construct(int param_slots) { void LiftoffStackSlots::Construct() {
DCHECK_LT(0, slots_.size()); size_t num_slots = 0;
for (auto& slot : slots_) {
num_slots += slot.src_.kind() == kS128 ? 2 : 1;
}
// The stack pointer is required to be quadword aligned. // The stack pointer is required to be quadword aligned.
asm_->Claim(RoundUp(param_slots, 2)); asm_->Claim(RoundUp(num_slots, 2));
size_t poke_offset = num_slots * kXRegSize;
for (auto& slot : slots_) { for (auto& slot : slots_) {
int poke_offset = slot.dst_slot_ * kSystemPointerSize; poke_offset -= slot.src_.kind() == kS128 ? kXRegSize * 2 : kXRegSize;
switch (slot.src_.loc()) { switch (slot.src_.loc()) {
case LiftoffAssembler::VarState::kStack: { case LiftoffAssembler::VarState::kStack: {
UseScratchRegisterScope temps(asm_); UseScratchRegisterScope temps(asm_);
......
...@@ -118,30 +118,27 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset, ...@@ -118,30 +118,27 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
} }
} }
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind, inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
int padding = 0) {
switch (kind) { switch (kind) {
case kI32: case kI32:
case kRef: case kRef:
case kOptRef: case kOptRef:
assm->AllocateStackSpace(padding);
assm->push(reg.gp()); assm->push(reg.gp());
break; break;
case kI64: case kI64:
assm->AllocateStackSpace(padding);
assm->push(reg.high_gp()); assm->push(reg.high_gp());
assm->push(reg.low_gp()); assm->push(reg.low_gp());
break; break;
case kF32: case kF32:
assm->AllocateStackSpace(sizeof(float) + padding); assm->AllocateStackSpace(sizeof(float));
assm->movss(Operand(esp, 0), reg.fp()); assm->movss(Operand(esp, 0), reg.fp());
break; break;
case kF64: case kF64:
assm->AllocateStackSpace(sizeof(double) + padding); assm->AllocateStackSpace(sizeof(double));
assm->movsd(Operand(esp, 0), reg.fp()); assm->movsd(Operand(esp, 0), reg.fp());
break; break;
case kS128: case kS128:
assm->AllocateStackSpace(sizeof(double) * 2 + padding); assm->AllocateStackSpace(sizeof(double) * 2);
assm->movdqu(Operand(esp, 0), reg.fp()); assm->movdqu(Operand(esp, 0), reg.fp());
break; break;
default: default:
...@@ -4908,49 +4905,36 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { ...@@ -4908,49 +4905,36 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
add(esp, Immediate(size)); add(esp, Immediate(size));
} }
void LiftoffStackSlots::Construct(int param_slots) { void LiftoffStackSlots::Construct() {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
int last_stack_slot = param_slots;
for (auto& slot : slots_) { for (auto& slot : slots_) {
const int stack_slot = slot.dst_slot_;
int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
DCHECK_LT(0, stack_decrement);
last_stack_slot = stack_slot;
const LiftoffAssembler::VarState& src = slot.src_; const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) { switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: case LiftoffAssembler::VarState::kStack:
// The combination of AllocateStackSpace and 2 movdqu is usually smaller // The combination of AllocateStackSpace and 2 movdqu is usually smaller
// in code size than doing 4 pushes. // in code size than doing 4 pushes.
if (src.kind() == kS128) { if (src.kind() == kS128) {
asm_->AllocateStackSpace(stack_decrement); asm_->AllocateStackSpace(sizeof(double) * 2);
asm_->movdqu(liftoff::kScratchDoubleReg, asm_->movdqu(liftoff::kScratchDoubleReg,
liftoff::GetStackSlot(slot.src_offset_)); liftoff::GetStackSlot(slot.src_offset_));
asm_->movdqu(Operand(esp, 0), liftoff::kScratchDoubleReg); asm_->movdqu(Operand(esp, 0), liftoff::kScratchDoubleReg);
break; break;
} }
if (src.kind() == kF64) { if (src.kind() == kF64) {
asm_->AllocateStackSpace(stack_decrement - kDoubleSize);
DCHECK_EQ(kLowWord, slot.half_); DCHECK_EQ(kLowWord, slot.half_);
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord)); asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
stack_decrement = kSystemPointerSize;
} }
asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_)); asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
break; break;
case LiftoffAssembler::VarState::kRegister: case LiftoffAssembler::VarState::kRegister:
if (src.kind() == kI64) { if (src.kind() == kI64) {
liftoff::push( liftoff::push(
asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(), asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(),
kI32, stack_decrement - kSystemPointerSize); kI32);
} else { } else {
int pushed_bytes = SlotSizeInBytes(slot); liftoff::push(asm_, src.reg(), src.kind());
liftoff::push(asm_, src.reg(), src.kind(),
stack_decrement - pushed_bytes);
} }
break; break;
case LiftoffAssembler::VarState::kIntConst: case LiftoffAssembler::VarState::kIntConst:
asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
// The high word is the sign extension of the low word. // The high word is the sign extension of the low word.
asm_->push(Immediate(slot.half_ == kLowWord ? src.i32_const() asm_->push(Immediate(slot.half_ == kLowWord ? src.i32_const()
: src.i32_const() >> 31)); : src.i32_const() >> 31));
......
...@@ -799,9 +799,8 @@ void PrepareStackTransfers(const ValueKindSig* sig, ...@@ -799,9 +799,8 @@ void PrepareStackTransfers(const ValueKindSig* sig,
LiftoffStackSlots* stack_slots, LiftoffStackSlots* stack_slots,
StackTransferRecipe* stack_transfers, StackTransferRecipe* stack_transfers,
LiftoffRegList* param_regs) { LiftoffRegList* param_regs) {
// Process parameters backwards, to reduce the amount of Slot sorting for // Process parameters backwards, such that pushes of caller frame slots are
// the most common case - a normal Wasm Call. Slots will be mostly unsorted // in the correct order.
// in the Builtin call case.
uint32_t call_desc_input_idx = uint32_t call_desc_input_idx =
static_cast<uint32_t>(call_descriptor->InputCount()); static_cast<uint32_t>(call_descriptor->InputCount());
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count()); uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
...@@ -835,8 +834,7 @@ void PrepareStackTransfers(const ValueKindSig* sig, ...@@ -835,8 +834,7 @@ void PrepareStackTransfers(const ValueKindSig* sig,
} }
} else { } else {
DCHECK(loc.IsCallerFrameSlot()); DCHECK(loc.IsCallerFrameSlot());
int param_offset = -loc.GetLocation() - 1; stack_slots->Add(slot, stack_offset, half);
stack_slots->Add(slot, stack_offset, half, param_offset);
} }
} }
} }
...@@ -853,10 +851,10 @@ void LiftoffAssembler::PrepareBuiltinCall( ...@@ -853,10 +851,10 @@ void LiftoffAssembler::PrepareBuiltinCall(
PrepareStackTransfers(sig, call_descriptor, params.begin(), &stack_slots, PrepareStackTransfers(sig, call_descriptor, params.begin(), &stack_slots,
&stack_transfers, &param_regs); &stack_transfers, &param_regs);
SpillAllRegisters(); SpillAllRegisters();
int param_slots = static_cast<int>(call_descriptor->StackParameterCount()); // Create all the slots.
if (param_slots > 0) { // Builtin stack parameters are pushed in reversed order.
stack_slots.Construct(param_slots); stack_slots.Reverse();
} stack_slots.Construct();
// Execute the stack transfers before filling the instance register. // Execute the stack transfers before filling the instance register.
stack_transfers.Execute(); stack_transfers.Execute();
...@@ -899,7 +897,6 @@ void LiftoffAssembler::PrepareCall(const ValueKindSig* sig, ...@@ -899,7 +897,6 @@ void LiftoffAssembler::PrepareCall(const ValueKindSig* sig,
LiftoffRegister(*target_instance), kIntPtr); LiftoffRegister(*target_instance), kIntPtr);
} }
int param_slots = static_cast<int>(call_descriptor->StackParameterCount());
if (num_params) { if (num_params) {
uint32_t param_base = cache_state_.stack_height() - num_params; uint32_t param_base = cache_state_.stack_height() - num_params;
PrepareStackTransfers(sig, call_descriptor, PrepareStackTransfers(sig, call_descriptor,
...@@ -919,16 +916,13 @@ void LiftoffAssembler::PrepareCall(const ValueKindSig* sig, ...@@ -919,16 +916,13 @@ void LiftoffAssembler::PrepareCall(const ValueKindSig* sig,
*target = new_target.gp(); *target = new_target.gp();
} else { } else {
stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kIntPtr, stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kIntPtr,
LiftoffRegister(*target), 0), LiftoffRegister(*target), 0));
param_slots);
param_slots++;
*target = no_reg; *target = no_reg;
} }
} }
if (param_slots > 0) { // Create all the slots.
stack_slots.Construct(param_slots); stack_slots.Construct();
}
// Execute the stack transfers before filling the instance register. // Execute the stack transfers before filling the instance register.
stack_transfers.Execute(); stack_transfers.Execute();
// Pop parameters from the value stack. // Pop parameters from the value stack.
......
...@@ -1563,50 +1563,28 @@ class LiftoffStackSlots { ...@@ -1563,50 +1563,28 @@ class LiftoffStackSlots {
LiftoffStackSlots& operator=(const LiftoffStackSlots&) = delete; LiftoffStackSlots& operator=(const LiftoffStackSlots&) = delete;
void Add(const LiftoffAssembler::VarState& src, uint32_t src_offset, void Add(const LiftoffAssembler::VarState& src, uint32_t src_offset,
RegPairHalf half, int dst_slot) { RegPairHalf half) {
DCHECK_LE(0, dst_slot); slots_.emplace_back(src, src_offset, half);
slots_.emplace_back(src, src_offset, half, dst_slot);
} }
void Add(const LiftoffAssembler::VarState& src) { slots_.emplace_back(src); }
void Add(const LiftoffAssembler::VarState& src, int dst_slot) { void Reverse() { std::reverse(slots_.begin(), slots_.end()); }
DCHECK_LE(0, dst_slot);
slots_.emplace_back(src, dst_slot);
}
void SortInPushOrder() {
std::sort(slots_.begin(), slots_.end(), [](const Slot& a, const Slot& b) {
return a.dst_slot_ > b.dst_slot_;
});
}
inline void Construct(int param_slots); inline void Construct();
private: private:
// A logical slot, which may occupy multiple stack slots.
struct Slot { struct Slot {
Slot(const LiftoffAssembler::VarState& src, uint32_t src_offset, Slot(const LiftoffAssembler::VarState& src, uint32_t src_offset,
RegPairHalf half, int dst_slot) RegPairHalf half)
: src_(src), : src_(src), src_offset_(src_offset), half_(half) {}
src_offset_(src_offset), explicit Slot(const LiftoffAssembler::VarState& src)
half_(half), : src_(src), half_(kLowWord) {}
dst_slot_(dst_slot) {}
Slot(const LiftoffAssembler::VarState& src, int dst_slot)
: src_(src), half_(kLowWord), dst_slot_(dst_slot) {}
LiftoffAssembler::VarState src_; LiftoffAssembler::VarState src_;
uint32_t src_offset_ = 0; uint32_t src_offset_ = 0;
RegPairHalf half_; RegPairHalf half_;
int dst_slot_ = 0;
}; };
// Returns the size in bytes of the given logical slot.
static int SlotSizeInBytes(const Slot& slot) {
const ValueKind kind = slot.src_.kind();
if (kind == kS128) return kSimd128Size;
if (kind == kF64) return kDoubleSize;
return kSystemPointerSize;
}
base::SmallVector<Slot, 8> slots_; base::SmallVector<Slot, 8> slots_;
LiftoffAssembler* const asm_; LiftoffAssembler* const asm_;
}; };
......
...@@ -133,26 +133,24 @@ inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src, ...@@ -133,26 +133,24 @@ inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src,
} }
} }
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind, inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
int padding = 0) {
switch (kind) { switch (kind) {
case kI32: case kI32:
case kI64: case kI64:
case kRef: case kRef:
case kOptRef: case kOptRef:
assm->AllocateStackSpace(padding);
assm->pushq(reg.gp()); assm->pushq(reg.gp());
break; break;
case kF32: case kF32:
assm->AllocateStackSpace(kSystemPointerSize + padding); assm->AllocateStackSpace(kSystemPointerSize);
assm->Movss(Operand(rsp, 0), reg.fp()); assm->Movss(Operand(rsp, 0), reg.fp());
break; break;
case kF64: case kF64:
assm->AllocateStackSpace(kSystemPointerSize + padding); assm->AllocateStackSpace(kSystemPointerSize);
assm->Movsd(Operand(rsp, 0), reg.fp()); assm->Movsd(Operand(rsp, 0), reg.fp());
break; break;
case kS128: case kS128:
assm->AllocateStackSpace(kSystemPointerSize * 2 + padding); assm->AllocateStackSpace(kSystemPointerSize * 2);
assm->Movdqu(Operand(rsp, 0), reg.fp()); assm->Movdqu(Operand(rsp, 0), reg.fp());
break; break;
default: default:
...@@ -4415,32 +4413,22 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { ...@@ -4415,32 +4413,22 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
addq(rsp, Immediate(size)); addq(rsp, Immediate(size));
} }
void LiftoffStackSlots::Construct(int param_slots) { void LiftoffStackSlots::Construct() {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
int last_stack_slot = param_slots;
for (auto& slot : slots_) { for (auto& slot : slots_) {
const int stack_slot = slot.dst_slot_;
int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
last_stack_slot = stack_slot;
const LiftoffAssembler::VarState& src = slot.src_; const LiftoffAssembler::VarState& src = slot.src_;
DCHECK_LT(0, stack_decrement);
switch (src.loc()) { switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: case LiftoffAssembler::VarState::kStack:
if (src.kind() == kI32) { if (src.kind() == kI32) {
asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
// Load i32 values to a register first to ensure they are zero // Load i32 values to a register first to ensure they are zero
// extended. // extended.
asm_->movl(kScratchRegister, liftoff::GetStackSlot(slot.src_offset_)); asm_->movl(kScratchRegister, liftoff::GetStackSlot(slot.src_offset_));
asm_->pushq(kScratchRegister); asm_->pushq(kScratchRegister);
} else if (src.kind() == kS128) { } else if (src.kind() == kS128) {
asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
// Since offsets are subtracted from sp, we need a smaller offset to // Since offsets are subtracted from sp, we need a smaller offset to
// push the top of a s128 value. // push the top of a s128 value.
asm_->pushq(liftoff::GetStackSlot(slot.src_offset_ - 8)); asm_->pushq(liftoff::GetStackSlot(slot.src_offset_ - 8));
asm_->pushq(liftoff::GetStackSlot(slot.src_offset_)); asm_->pushq(liftoff::GetStackSlot(slot.src_offset_));
} else { } else {
asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
// For all other types, just push the whole (8-byte) stack slot. // For all other types, just push the whole (8-byte) stack slot.
// This is also ok for f32 values (even though we copy 4 uninitialized // This is also ok for f32 values (even though we copy 4 uninitialized
// bytes), because f32 and f64 values are clearly distinguished in // bytes), because f32 and f64 values are clearly distinguished in
...@@ -4448,13 +4436,10 @@ void LiftoffStackSlots::Construct(int param_slots) { ...@@ -4448,13 +4436,10 @@ void LiftoffStackSlots::Construct(int param_slots) {
asm_->pushq(liftoff::GetStackSlot(slot.src_offset_)); asm_->pushq(liftoff::GetStackSlot(slot.src_offset_));
} }
break; break;
case LiftoffAssembler::VarState::kRegister: { case LiftoffAssembler::VarState::kRegister:
int pushed = src.kind() == kS128 ? kSimd128Size : kSystemPointerSize; liftoff::push(asm_, src.reg(), src.kind());
liftoff::push(asm_, src.reg(), src.kind(), stack_decrement - pushed);
break; break;
}
case LiftoffAssembler::VarState::kIntConst: case LiftoffAssembler::VarState::kIntConst:
asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
asm_->pushq(Immediate(src.i32_const())); asm_->pushq(Immediate(src.i32_const()));
break; break;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment