Commit d3cd2702 authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

Reland "[liftoff] Use stack slot offsets instead of indices"

This is a reland of 20727725

The fix is in liftoff-assembler-arm64.h in FillStackSlotsWithZero,
in the else case for bigger counts to fill, the argument passed to Sub
was incorrect. We were passing offset relative to first slot, but it
should be offset relative to instance, so there is an off by 1 slot error
when zeroing, and ended up zeroing the stack slot holding instance.

Original change's description:
> [liftoff] Use stack slot offsets instead of indices
>
> Spill/fill now take offsets instead of indices. We provide a
> helper, GetStackOffsetFromIndex, for callers. This is currently only
> useful while slot sizes are still fixed to 8 bytes.
>
> StackTransferRecipe's RegisterLoad now works in terms of offset.
>
> LiftoffStackSlots work in terms of offset as well.
>
> TransferStackSlot currently still works in terms of indicies, but can be
> converted to use offsets in a subsequent change.
>
> Bug: v8:9909
> Change-Id: If54fb844309bdfd641720d063135dd59551813e0
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1922489
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Commit-Queue: Zhi An Ng <zhin@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#65049}

Bug: v8:9909
Change-Id: I311da9d3bb1db8faf8693079177c77a7b3754243
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1925131Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65107}
parent f6a76fad
......@@ -46,20 +46,18 @@ constexpr int32_t kConstantStackSpace = kSystemPointerSize;
// Three instructions are required to sub a large constant, movw + movt + sub.
constexpr int32_t kPatchInstructionsRequired = 3;
inline int GetStackSlotOffset(uint32_t index) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset;
}
inline MemOperand GetStackSlot(uint32_t index) {
return MemOperand(fp, -GetStackSlotOffset(index));
inline MemOperand GetStackSlot(uint32_t offset) {
return MemOperand(fp, -GetStackSlotOffset(offset));
}
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
int32_t offset = kFirstStackSlotOffset +
index * LiftoffAssembler::kStackSlotSize - half_offset;
return MemOperand(fp, -offset);
return MemOperand(fp, -kFirstStackSlotOffset - offset + half_offset);
}
inline MemOperand GetInstanceOperand() {
......@@ -539,12 +537,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
}
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_index, src_index);
DCHECK_NE(dst_offset, src_offset);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
Fill(reg, src_index, type);
Spill(dst_index, reg, type);
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
......@@ -564,17 +562,17 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
}
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(index);
MemOperand dst = liftoff::GetStackSlot(index);
RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
str(reg.gp(), dst);
break;
case kWasmI64:
str(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
str(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
str(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
str(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break;
case kWasmF32:
vstr(liftoff::GetFloatRegister(reg.fp()), dst);
......@@ -587,9 +585,9 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
}
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
RecordUsedSpillSlot(index);
MemOperand dst = liftoff::GetStackSlot(index);
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
UseScratchRegisterScope temps(this);
Register src = no_reg;
// The scratch register will be required by str if multiple instructions
......@@ -607,10 +605,10 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
case kWasmI64: {
int32_t low_word = value.to_i64();
mov(src, Operand(low_word));
str(src, liftoff::GetHalfStackSlot(index, kLowWord));
str(src, liftoff::GetHalfStackSlot(offset, kLowWord));
int32_t high_word = value.to_i64() >> 32;
mov(src, Operand(high_word));
str(src, liftoff::GetHalfStackSlot(index, kHighWord));
str(src, liftoff::GetHalfStackSlot(offset, kHighWord));
break;
}
default:
......@@ -619,36 +617,36 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) {
switch (type) {
case kWasmI32:
ldr(reg.gp(), liftoff::GetStackSlot(index));
ldr(reg.gp(), liftoff::GetStackSlot(offset));
break;
case kWasmI64:
ldr(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
ldr(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
ldr(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
ldr(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break;
case kWasmF32:
vldr(liftoff::GetFloatRegister(reg.fp()), liftoff::GetStackSlot(index));
vldr(liftoff::GetFloatRegister(reg.fp()), liftoff::GetStackSlot(offset));
break;
case kWasmF64:
vldr(reg.fp(), liftoff::GetStackSlot(index));
vldr(reg.fp(), liftoff::GetStackSlot(offset));
break;
default:
UNREACHABLE();
}
}
void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset,
RegPairHalf half) {
ldr(reg, liftoff::GetHalfStackSlot(index, half));
ldr(reg, liftoff::GetHalfStackSlot(offset, half));
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(last_stack_slot);
RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
// We need a zero reg. Always use r0 for that, and push it before to restore
// its value afterwards.
......@@ -659,16 +657,22 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
// Special straight-line code for up to five slots. Generates two
// instructions per slot.
for (uint32_t offset = 0; offset < count; ++offset) {
str(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord));
str(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord));
str(r0, liftoff::GetHalfStackSlot(GetStackOffsetFromIndex(index + offset),
kLowWord));
str(r0, liftoff::GetHalfStackSlot(GetStackOffsetFromIndex(index + offset),
kHighWord));
}
} else {
// General case for bigger counts (9 instructions).
// Use r1 for start address (inclusive), r2 for end address (exclusive).
push(r1);
push(r2);
sub(r1, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot)));
sub(r2, fp, Operand(liftoff::GetStackSlotOffset(index) - kStackSlotSize));
sub(r1, fp,
Operand(liftoff::GetStackSlotOffset(
GetStackOffsetFromIndex(last_stack_slot))));
sub(r2, fp,
Operand(liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index)) -
kStackSlotSize));
Label loop;
bind(&loop);
......@@ -1634,13 +1638,13 @@ void LiftoffStackSlots::Construct() {
UseScratchRegisterScope temps(asm_);
Register scratch = temps.Acquire();
asm_->ldr(scratch,
liftoff::GetHalfStackSlot(slot.src_index_, slot.half_));
liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
asm_->Push(scratch);
} break;
case kWasmF64: {
UseScratchRegisterScope temps(asm_);
DwVfpRegister scratch = temps.AcquireD();
asm_->vldr(scratch, liftoff::GetStackSlot(slot.src_index_));
asm_->vldr(scratch, liftoff::GetStackSlot(slot.src_offset_));
asm_->vpush(scratch);
} break;
default:
......
......@@ -43,12 +43,12 @@ constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + kSystemPointerSize;
constexpr int32_t kConstantStackSpace = 0;
inline int GetStackSlotOffset(uint32_t index) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset;
}
inline MemOperand GetStackSlot(uint32_t index) {
return MemOperand(fp, -GetStackSlotOffset(index));
inline MemOperand GetStackSlot(uint32_t offset) {
return MemOperand(fp, -GetStackSlotOffset(offset));
}
inline MemOperand GetInstanceOperand() {
......@@ -327,12 +327,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
Ldr(liftoff::GetRegFromType(dst, type), MemOperand(fp, offset));
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
UseScratchRegisterScope temps(this);
CPURegister scratch = liftoff::AcquireByType(&temps, type);
Ldr(scratch, liftoff::GetStackSlot(src_index));
Str(scratch, liftoff::GetStackSlot(dst_index));
Ldr(scratch, liftoff::GetStackSlot(src_offset));
Str(scratch, liftoff::GetStackSlot(dst_offset));
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
......@@ -354,16 +354,16 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
}
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(index);
MemOperand dst = liftoff::GetStackSlot(index);
RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
Str(liftoff::GetRegFromType(reg, type), dst);
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
RecordUsedSpillSlot(index);
MemOperand dst = liftoff::GetStackSlot(index);
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
UseScratchRegisterScope temps(this);
CPURegister src = CPURegister::no_reg();
switch (value.type()) {
......@@ -390,37 +390,42 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
Str(src, dst);
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) {
MemOperand src = liftoff::GetStackSlot(index);
MemOperand src = liftoff::GetStackSlot(offset);
Ldr(liftoff::GetRegFromType(reg, type), src);
}
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
UNREACHABLE();
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(last_stack_slot);
RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
int max_stp_offset = -liftoff::GetStackSlotOffset(index + count - 1);
int max_stp_offset =
-liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index + count - 1));
if (count <= 12 && IsImmLSPair(max_stp_offset, kXRegSizeLog2)) {
// Special straight-line code for up to 12 slots. Generates one
// instruction per two slots (<= 6 instructions total).
for (; count > 1; count -= 2) {
STATIC_ASSERT(kStackSlotSize == kSystemPointerSize);
stp(xzr, xzr, liftoff::GetStackSlot(index + count - 1));
stp(xzr, xzr,
liftoff::GetStackSlot(GetStackOffsetFromIndex(index + count - 1)));
}
DCHECK(count == 0 || count == 1);
if (count) str(xzr, liftoff::GetStackSlot(index));
if (count) {
str(xzr, liftoff::GetStackSlot(GetStackOffsetFromIndex(index)));
}
} else {
// General case for bigger counts (5-8 instructions).
UseScratchRegisterScope temps(this);
Register address_reg = temps.AcquireX();
// This {Sub} might use another temp register if the offset is too large.
Sub(address_reg, fp, liftoff::GetStackSlotOffset(last_stack_slot));
Sub(address_reg, fp,
liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(last_stack_slot)));
Register count_reg = temps.AcquireX();
Mov(count_reg, count);
......@@ -1104,7 +1109,7 @@ void LiftoffStackSlots::Construct() {
case LiftoffAssembler::VarState::kStack: {
UseScratchRegisterScope temps(asm_);
CPURegister scratch = liftoff::AcquireByType(&temps, slot.src_.type());
asm_->Ldr(scratch, liftoff::GetStackSlot(slot.src_index_));
asm_->Ldr(scratch, liftoff::GetStackSlot(slot.src_offset_));
asm_->Poke(scratch, poke_offset);
break;
}
......
......@@ -26,16 +26,14 @@ constexpr int32_t kConstantStackSpace = 8;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline Operand GetStackSlot(uint32_t index) {
int32_t offset = index * LiftoffAssembler::kStackSlotSize;
inline Operand GetStackSlot(uint32_t offset) {
return Operand(ebp, -kFirstStackSlotOffset - offset);
}
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
int32_t offset = index * LiftoffAssembler::kStackSlotSize - half_offset;
return Operand(ebp, -kFirstStackSlotOffset - offset);
return Operand(ebp, -kFirstStackSlotOffset - offset + half_offset);
}
// TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
......@@ -406,18 +404,18 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
if (needs_reg_pair(type)) {
liftoff::MoveStackValue(this,
liftoff::GetHalfStackSlot(src_index, kLowWord),
liftoff::GetHalfStackSlot(dst_index, kLowWord));
liftoff::GetHalfStackSlot(src_offset, kLowWord),
liftoff::GetHalfStackSlot(dst_offset, kLowWord));
liftoff::MoveStackValue(this,
liftoff::GetHalfStackSlot(src_index, kHighWord),
liftoff::GetHalfStackSlot(dst_index, kHighWord));
liftoff::GetHalfStackSlot(src_offset, kHighWord),
liftoff::GetHalfStackSlot(dst_offset, kHighWord));
} else {
liftoff::MoveStackValue(this, liftoff::GetStackSlot(src_index),
liftoff::GetStackSlot(dst_index));
liftoff::MoveStackValue(this, liftoff::GetStackSlot(src_offset),
liftoff::GetStackSlot(dst_offset));
}
}
......@@ -438,17 +436,17 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
}
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
RecordUsedSpillSlot(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
mov(dst, reg.gp());
break;
case kWasmI64:
mov(liftoff::GetHalfStackSlot(index, kLowWord), reg.low_gp());
mov(liftoff::GetHalfStackSlot(index, kHighWord), reg.high_gp());
mov(liftoff::GetHalfStackSlot(offset, kLowWord), reg.low_gp());
mov(liftoff::GetHalfStackSlot(offset, kHighWord), reg.high_gp());
break;
case kWasmF32:
movss(dst, reg.fp());
......@@ -461,9 +459,9 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
}
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (value.type()) {
case kWasmI32:
mov(dst, Immediate(value.to_i32()));
......@@ -471,8 +469,8 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
case kWasmI64: {
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
mov(liftoff::GetHalfStackSlot(index, kLowWord), Immediate(low_word));
mov(liftoff::GetHalfStackSlot(index, kHighWord), Immediate(high_word));
mov(liftoff::GetHalfStackSlot(offset, kLowWord), Immediate(low_word));
mov(liftoff::GetHalfStackSlot(offset, kHighWord), Immediate(high_word));
break;
}
default:
......@@ -481,16 +479,16 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) {
Operand src = liftoff::GetStackSlot(index);
Operand src = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
mov(reg.gp(), src);
break;
case kWasmI64:
mov(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
mov(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
mov(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
mov(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break;
case kWasmF32:
movss(reg.fp(), src);
......@@ -503,22 +501,26 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
}
}
void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset,
RegPairHalf half) {
mov(reg, liftoff::GetHalfStackSlot(index, half));
mov(reg, liftoff::GetHalfStackSlot(offset, half));
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(last_stack_slot);
RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
if (count <= 2) {
// Special straight-line code for up to two slots (6-9 bytes per word:
// C7 <1-4 bytes operand> <4 bytes imm>, makes 12-18 bytes per slot).
for (uint32_t offset = 0; offset < count; ++offset) {
mov(liftoff::GetHalfStackSlot(index + offset, kLowWord), Immediate(0));
mov(liftoff::GetHalfStackSlot(index + offset, kHighWord), Immediate(0));
mov(liftoff::GetHalfStackSlot(GetStackOffsetFromIndex(index + offset),
kLowWord),
Immediate(0));
mov(liftoff::GetHalfStackSlot(GetStackOffsetFromIndex(index + offset),
kHighWord),
Immediate(0));
}
} else {
// General case for bigger counts.
......@@ -528,7 +530,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
push(eax);
push(ecx);
push(edi);
lea(edi, liftoff::GetStackSlot(last_stack_slot));
lea(edi, liftoff::GetStackSlot(GetStackOffsetFromIndex(last_stack_slot)));
xor_(eax, eax);
// Number of words is number of slots times two.
mov(ecx, Immediate(count * 2));
......@@ -1931,9 +1933,9 @@ void LiftoffStackSlots::Construct() {
case LiftoffAssembler::VarState::kStack:
if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, slot.half_);
asm_->push(liftoff::GetHalfStackSlot(slot.src_index_, kHighWord));
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
}
asm_->push(liftoff::GetHalfStackSlot(slot.src_index_, slot.half_));
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
break;
case LiftoffAssembler::VarState::kRegister:
if (src.type() == kWasmI64) {
......
......@@ -42,7 +42,7 @@ class StackTransferRecipe {
LoadKind kind;
ValueType type;
int32_t value; // i32 constant value or stack index, depending on kind.
int32_t value; // i32 constant value or stack offset, depending on kind.
// Named constructors.
static RegisterLoad Const(WasmValue constant) {
......@@ -53,12 +53,12 @@ class StackTransferRecipe {
DCHECK_EQ(constant.to_i32_unchecked(), constant.to_i64_unchecked());
return {kConstant, kWasmI64, constant.to_i32_unchecked()};
}
static RegisterLoad Stack(int32_t stack_index, ValueType type) {
return {kStack, type, stack_index};
static RegisterLoad Stack(int32_t offset, ValueType type) {
return {kStack, type, offset};
}
static RegisterLoad HalfStack(int32_t stack_index, RegPairHalf half) {
static RegisterLoad HalfStack(int32_t offset, RegPairHalf half) {
return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kWasmI32,
stack_index};
offset};
}
private:
......@@ -91,18 +91,23 @@ class StackTransferRecipe {
switch (src.loc()) {
case VarState::kStack:
if (src_index == dst_index) break;
asm_->MoveStackValue(dst_index, src_index, src.type());
asm_->MoveStackValue(asm_->GetStackOffsetFromIndex(dst_index),
asm_->GetStackOffsetFromIndex(src_index),
src.type());
break;
case VarState::kRegister:
asm_->Spill(dst_index, src.reg(), src.type());
asm_->Spill(asm_->GetStackOffsetFromIndex(dst_index), src.reg(),
src.type());
break;
case VarState::kIntConst:
asm_->Spill(dst_index, src.constant());
asm_->Spill(asm_->GetStackOffsetFromIndex(dst_index),
src.constant());
break;
}
break;
case VarState::kRegister:
LoadIntoRegister(dst.reg(), src, src_index);
LoadIntoRegister(dst.reg(), src,
asm_->GetStackOffsetFromIndex(src_index));
break;
case VarState::kIntConst:
DCHECK_EQ(dst, src);
......@@ -112,10 +117,10 @@ class StackTransferRecipe {
void LoadIntoRegister(LiftoffRegister dst,
const LiftoffAssembler::VarState& src,
uint32_t src_index) {
uint32_t src_offset) {
switch (src.loc()) {
case VarState::kStack:
LoadStackSlot(dst, src_index, src.type());
LoadStackSlot(dst, src_offset, src.type());
break;
case VarState::kRegister:
DCHECK_EQ(dst.reg_class(), src.reg_class());
......@@ -129,14 +134,14 @@ class StackTransferRecipe {
void LoadI64HalfIntoRegister(LiftoffRegister dst,
const LiftoffAssembler::VarState& src,
uint32_t index, RegPairHalf half) {
uint32_t offset, RegPairHalf half) {
// Use CHECK such that the remaining code is statically dead if
// {kNeedI64RegPair} is false.
CHECK(kNeedI64RegPair);
DCHECK_EQ(kWasmI64, src.type());
switch (src.loc()) {
case VarState::kStack:
LoadI64HalfStackSlot(dst, index, half);
LoadI64HalfStackSlot(dst, offset, half);
break;
case VarState::kRegister: {
LiftoffRegister src_half =
......@@ -194,7 +199,7 @@ class StackTransferRecipe {
}
}
void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index,
void LoadStackSlot(LiftoffRegister dst, uint32_t stack_offset,
ValueType type) {
if (load_dst_regs_.has(dst)) {
// It can happen that we spilled the same register to different stack
......@@ -206,15 +211,15 @@ class StackTransferRecipe {
if (dst.is_pair()) {
DCHECK_EQ(kWasmI64, type);
*register_load(dst.low()) =
RegisterLoad::HalfStack(stack_index, kLowWord);
RegisterLoad::HalfStack(stack_offset, kLowWord);
*register_load(dst.high()) =
RegisterLoad::HalfStack(stack_index, kHighWord);
RegisterLoad::HalfStack(stack_offset, kHighWord);
} else {
*register_load(dst) = RegisterLoad::Stack(stack_index, type);
*register_load(dst) = RegisterLoad::Stack(stack_offset, type);
}
}
void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t stack_index,
void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t offset,
RegPairHalf half) {
if (load_dst_regs_.has(dst)) {
// It can happen that we spilled the same register to different stack
......@@ -223,7 +228,7 @@ class StackTransferRecipe {
return;
}
load_dst_regs_.set(dst);
*register_load(dst) = RegisterLoad::HalfStack(stack_index, half);
*register_load(dst) = RegisterLoad::HalfStack(offset, half);
}
private:
......@@ -294,9 +299,11 @@ class StackTransferRecipe {
LiftoffRegister dst = move_dst_regs_.GetFirstRegSet();
RegisterMove* move = register_move(dst);
LiftoffRegister spill_reg = move->src;
asm_->Spill(next_spill_slot, spill_reg, move->type);
asm_->Spill(LiftoffAssembler::GetStackOffsetFromIndex(next_spill_slot),
spill_reg, move->type);
// Remember to reload into the destination register later.
LoadStackSlot(dst, next_spill_slot, move->type);
LoadStackSlot(dst, asm_->GetStackOffsetFromIndex(next_spill_slot),
move->type);
++next_spill_slot;
ClearExecutedMove(dst);
}
......@@ -497,7 +504,8 @@ LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
case VarState::kStack: {
LiftoffRegister reg =
GetUnusedRegister(reg_class_for(slot.type()), pinned);
Fill(reg, cache_state_.stack_height(), slot.type());
Fill(reg, GetStackOffsetFromIndex(cache_state_.stack_height()),
slot.type());
return reg;
}
case VarState::kRegister:
......@@ -554,11 +562,11 @@ void LiftoffAssembler::Spill(uint32_t index) {
case VarState::kStack:
return;
case VarState::kRegister:
Spill(index, slot.reg(), slot.type());
Spill(GetStackOffsetFromIndex(index), slot.reg(), slot.type());
cache_state_.dec_used(slot.reg());
break;
case VarState::kIntConst:
Spill(index, slot.constant());
Spill(GetStackOffsetFromIndex(index), slot.constant());
break;
}
slot.MakeStack();
......@@ -574,7 +582,7 @@ void LiftoffAssembler::SpillAllRegisters() {
for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
auto& slot = cache_state_.stack_state[i];
if (!slot.is_reg()) continue;
Spill(i, slot.reg(), slot.type());
Spill(GetStackOffsetFromIndex(i), slot.reg(), slot.type());
slot.MakeStack();
}
cache_state_.reset_used_registers();
......@@ -594,7 +602,7 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
idx < end; ++idx) {
VarState& slot = cache_state_.stack_state[idx];
if (!slot.is_reg()) continue;
Spill(idx, slot.reg(), slot.type());
Spill(GetStackOffsetFromIndex(idx), slot.reg(), slot.type());
slot.MakeStack();
}
......@@ -628,6 +636,7 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
const bool is_pair = kNeedI64RegPair && type == kWasmI64;
const int num_lowered_params = is_pair ? 2 : 1;
const uint32_t stack_idx = param_base + param;
const uint32_t stack_offset = GetStackOffsetFromIndex(stack_idx);
const VarState& slot = cache_state_.stack_state[stack_idx];
// Process both halfs of a register pair separately, because they are passed
// as separate parameters. One or both of them could end up on the stack.
......@@ -654,13 +663,14 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
#endif
param_regs.set(reg);
if (is_pair) {
stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_idx, half);
stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_offset,
half);
} else {
stack_transfers.LoadIntoRegister(reg, slot, stack_idx);
stack_transfers.LoadIntoRegister(reg, slot, stack_offset);
}
} else {
DCHECK(loc.IsCallerFrameSlot());
stack_slots.Add(slot, stack_idx, half);
stack_slots.Add(slot, stack_offset, half);
}
}
}
......@@ -765,8 +775,9 @@ void LiftoffAssembler::MoveToReturnRegisters(FunctionSig* sig) {
: reg_class_for(return_type) == kGpReg
? LiftoffRegister(kGpReturnRegisters[0])
: LiftoffRegister(kFpReturnRegisters[0]);
stack_transfers.LoadIntoRegister(return_reg, cache_state_.stack_state.back(),
cache_state_.stack_height() - 1);
stack_transfers.LoadIntoRegister(
return_reg, cache_state_.stack_state.back(),
GetStackOffsetFromIndex(cache_state_.stack_height() - 1));
}
#ifdef ENABLE_SLOW_DCHECKS
......@@ -820,7 +831,7 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
cache_state_.dec_used(slot->reg().low());
cache_state_.dec_used(slot->reg().high());
}
Spill(idx, slot->reg(), slot->type());
Spill(GetStackOffsetFromIndex(idx), slot->reg(), slot->type());
slot->MakeStack();
if (--remaining_uses == 0) break;
}
......
......@@ -38,6 +38,16 @@ class LiftoffAssembler : public TurboAssembler {
static constexpr ValueType kWasmIntPtr =
kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
// TODO(zhin): Temporary while migrating away from fixed slot sizes.
inline static constexpr uint32_t SlotSizeForType(ValueType type) {
return kStackSlotSize;
}
// TODO(zhin): Temporary for migration from index to offset.
inline static uint32_t GetStackOffsetFromIndex(uint32_t index) {
return index * LiftoffAssembler::kStackSlotSize;
}
class VarState {
public:
enum Location : uint8_t { kStack, kRegister, kIntConst };
......@@ -103,6 +113,18 @@ class LiftoffAssembler : public TurboAssembler {
void MakeStack() { loc_ = kStack; }
// Copy src to this, except for offset, since src and this could have been
// from different stack states.
void Copy(VarState src) {
loc_ = src.loc();
type_ = src.type();
if (loc_ == kRegister) {
reg_ = src.reg();
} else if (loc_ == kIntConst) {
i32_const_ = src.i32_const();
}
}
private:
Location loc_;
// TODO(wasm): This is redundant, the decoder already knows the type of each
......@@ -259,29 +281,29 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
uint32_t NextSpillOffset() {
uint32_t NextSpillOffset(ValueType type) {
if (cache_state_.stack_state.empty()) {
return 0;
return SlotSizeForType(type);
}
VarState last = cache_state_.stack_state.back();
uint32_t offset =
last.offset() + ValueTypes::ElementSizeInBytes(last.type());
uint32_t offset = last.offset() + SlotSizeForType(type);
return offset;
}
void PushRegister(ValueType type, LiftoffRegister reg) {
DCHECK_EQ(reg_class_for(type), reg.reg_class());
cache_state_.inc_used(reg);
cache_state_.stack_state.emplace_back(type, reg, NextSpillOffset());
cache_state_.stack_state.emplace_back(type, reg, NextSpillOffset(type));
}
void PushConstant(ValueType type, int32_t i32_const) {
DCHECK(type == kWasmI32 || type == kWasmI64);
cache_state_.stack_state.emplace_back(type, i32_const, NextSpillOffset());
cache_state_.stack_state.emplace_back(type, i32_const,
NextSpillOffset(type));
}
void PushStack(ValueType type) {
cache_state_.stack_state.emplace_back(type, NextSpillOffset());
cache_state_.stack_state.emplace_back(type, NextSpillOffset(type));
}
void SpillRegister(LiftoffRegister);
......@@ -333,7 +355,9 @@ class LiftoffAssembler : public TurboAssembler {
// Call this method whenever spilling something, such that the number of used
// spill slot can be tracked and the stack frame will be allocated big enough.
void RecordUsedSpillSlot(uint32_t index) {
void RecordUsedSpillSlot(uint32_t offset) {
// TODO(zhin): Temporary for migration from index to offset.
uint32_t index = offset / kStackSlotSize;
if (index >= num_used_spill_slots_) num_used_spill_slots_ = index + 1;
}
......@@ -401,17 +425,18 @@ class LiftoffAssembler : public TurboAssembler {
bool is_store_mem = false);
inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
ValueType);
inline void MoveStackValue(uint32_t dst_index, uint32_t src_index, ValueType);
inline void MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType);
inline void Move(Register dst, Register src, ValueType);
inline void Move(DoubleRegister dst, DoubleRegister src, ValueType);
inline void Spill(uint32_t index, LiftoffRegister, ValueType);
inline void Spill(uint32_t index, WasmValue);
inline void Fill(LiftoffRegister, uint32_t index, ValueType);
inline void Spill(uint32_t offset, LiftoffRegister, ValueType);
inline void Spill(uint32_t offset, WasmValue);
inline void Fill(LiftoffRegister, uint32_t offset, ValueType);
// Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
// 4 bytes on the stack holding half of a 64-bit value.
inline void FillI64Half(Register, uint32_t index, RegPairHalf);
inline void FillI64Half(Register, uint32_t offset, RegPairHalf);
inline void FillStackSlotsWithZero(uint32_t index, uint32_t count);
// i32 binops.
......@@ -810,9 +835,9 @@ class LiftoffStackSlots {
public:
explicit LiftoffStackSlots(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
void Add(const LiftoffAssembler::VarState& src, uint32_t src_index,
void Add(const LiftoffAssembler::VarState& src, uint32_t src_offset,
RegPairHalf half) {
slots_.emplace_back(src, src_index, half);
slots_.emplace_back(src, src_offset, half);
}
void Add(const LiftoffAssembler::VarState& src) { slots_.emplace_back(src); }
......@@ -822,14 +847,14 @@ class LiftoffStackSlots {
struct Slot {
// Allow move construction.
Slot(Slot&&) V8_NOEXCEPT = default;
Slot(const LiftoffAssembler::VarState& src, uint32_t src_index,
Slot(const LiftoffAssembler::VarState& src, uint32_t src_offset,
RegPairHalf half)
: src_(src), src_index_(src_index), half_(half) {}
: src_(src), src_offset_(src_offset), half_(half) {}
explicit Slot(const LiftoffAssembler::VarState& src)
: src_(src), half_(kLowWord) {}
const LiftoffAssembler::VarState src_;
uint32_t src_index_ = 0;
uint32_t src_offset_ = 0;
RegPairHalf half_;
};
......
......@@ -1292,7 +1292,7 @@ class LiftoffCompiler {
case kStack: {
auto rc = reg_class_for(imm.type);
LiftoffRegister reg = __ GetUnusedRegister(rc);
__ Fill(reg, imm.index, imm.type);
__ Fill(reg, __ GetStackOffsetFromIndex(imm.index), imm.type);
__ PushRegister(slot.type(), reg);
break;
}
......@@ -1306,7 +1306,8 @@ class LiftoffCompiler {
if (dst_slot->is_reg()) {
LiftoffRegister slot_reg = dst_slot->reg();
if (state.get_use_count(slot_reg) == 1) {
__ Fill(dst_slot->reg(), state.stack_height() - 1, type);
__ Fill(dst_slot->reg(),
__ GetStackOffsetFromIndex(state.stack_height() - 1), type);
return;
}
state.dec_used(slot_reg);
......@@ -1315,7 +1316,9 @@ class LiftoffCompiler {
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
__ Fill(dst_reg, __ cache_state()->stack_height() - 1, type);
__ Fill(dst_reg,
__ GetStackOffsetFromIndex(__ cache_state()->stack_height() - 1),
type);
*dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset());
__ cache_state()->inc_used(dst_reg);
}
......@@ -1327,12 +1330,12 @@ class LiftoffCompiler {
switch (source_slot.loc()) {
case kRegister:
if (target_slot.is_reg()) state.dec_used(target_slot.reg());
target_slot = source_slot;
target_slot.Copy(source_slot);
if (is_tee) state.inc_used(target_slot.reg());
break;
case kIntConst:
if (target_slot.is_reg()) state.dec_used(target_slot.reg());
target_slot = source_slot;
target_slot.Copy(source_slot);
break;
case kStack:
LocalSetFromStackSlot(&target_slot, local_index);
......
......@@ -49,19 +49,18 @@ constexpr int32_t kConstantStackSpace = 8;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline int GetStackSlotOffset(uint32_t index) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset;
}
inline MemOperand GetStackSlot(uint32_t index) {
return MemOperand(fp, -GetStackSlotOffset(index));
inline MemOperand GetStackSlot(uint32_t offset) {
return MemOperand(fp, -GetStackSlotOffset(offset));
}
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
int32_t offset = index * LiftoffAssembler::kStackSlotSize + half_offset;
return MemOperand(fp, -kFirstStackSlotOffset - offset);
return MemOperand(fp, -kFirstStackSlotOffset - offset + half_offset);
}
inline MemOperand GetInstanceOperand() { return MemOperand(fp, -8); }
......@@ -522,12 +521,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
liftoff::Load(this, dst, fp, offset, type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_index, src_index);
DCHECK_NE(dst_offset, src_offset);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
Fill(reg, src_index, type);
Spill(dst_index, reg, type);
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
......@@ -541,17 +540,17 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
TurboAssembler::Move(dst, src);
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(index);
MemOperand dst = liftoff::GetStackSlot(index);
RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
sw(reg.gp(), dst);
break;
case kWasmI64:
sw(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
sw(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
sw(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
sw(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break;
case kWasmF32:
swc1(reg.fp(), dst);
......@@ -564,9 +563,9 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
}
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
RecordUsedSpillSlot(index);
MemOperand dst = liftoff::GetStackSlot(index);
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type()) {
case kWasmI32: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg);
......@@ -582,8 +581,8 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
TurboAssembler::li(tmp.low_gp(), Operand(low_word));
TurboAssembler::li(tmp.high_gp(), Operand(high_word));
sw(tmp.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
sw(tmp.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
sw(tmp.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
sw(tmp.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break;
}
default:
......@@ -593,16 +592,16 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) {
MemOperand src = liftoff::GetStackSlot(index);
MemOperand src = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
lw(reg.gp(), src);
break;
case kWasmI64:
lw(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
lw(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
lw(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
lw(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break;
case kWasmF32:
lwc1(reg.fp(), src);
......@@ -615,28 +614,33 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
}
}
void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset,
RegPairHalf half) {
lw(reg, liftoff::GetHalfStackSlot(index, half));
lw(reg, liftoff::GetHalfStackSlot(offset, half));
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(last_stack_slot);
RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
if (count <= 12) {
// Special straight-line code for up to 12 slots. Generates one
// instruction per slot (<=12 instructions total).
for (uint32_t offset = 0; offset < count; ++offset) {
Sw(zero_reg, liftoff::GetStackSlot(index + offset));
Sw(zero_reg,
liftoff::GetStackSlot(GetStackOffsetFromIndex(index + offset)));
}
} else {
// General case for bigger counts (12 instructions).
// Use a0 for start address (inclusive), a1 for end address (exclusive).
Push(a1, a0);
Addu(a0, fp, Operand(-liftoff::GetStackSlotOffset(last_stack_slot)));
Addu(a1, fp, Operand(-liftoff::GetStackSlotOffset(index) + kStackSlotSize));
Addu(a0, fp,
Operand(-liftoff::GetStackSlotOffset(
GetStackOffsetFromIndex(last_stack_slot))));
Addu(a1, fp,
Operand(-liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index)) +
kStackSlotSize));
Label loop;
bind(&loop);
......@@ -1610,11 +1614,11 @@ void LiftoffStackSlots::Construct() {
if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, slot.half_);
asm_->lw(kScratchReg,
liftoff::GetHalfStackSlot(slot.src_index_, kHighWord));
liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
asm_->push(kScratchReg);
}
asm_->lw(kScratchReg,
liftoff::GetHalfStackSlot(slot.src_index_, slot.half_));
liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
asm_->push(kScratchReg);
break;
}
......
......@@ -45,12 +45,12 @@ constexpr int32_t kConstantStackSpace = 16;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline int GetStackSlotOffset(uint32_t index) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset;
}
inline MemOperand GetStackSlot(uint32_t index) {
return MemOperand(fp, -GetStackSlotOffset(index));
inline MemOperand GetStackSlot(uint32_t offset) {
return MemOperand(fp, -GetStackSlotOffset(offset));
}
inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); }
......@@ -437,12 +437,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_index, src_index);
DCHECK_NE(dst_offset, src_offset);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
Fill(reg, src_index, type);
Spill(dst_index, reg, type);
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
......@@ -457,10 +457,10 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
TurboAssembler::Move(dst, src);
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(index);
MemOperand dst = liftoff::GetStackSlot(index);
RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
Sw(reg.gp(), dst);
......@@ -479,9 +479,9 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
}
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
RecordUsedSpillSlot(index);
MemOperand dst = liftoff::GetStackSlot(index);
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type()) {
case kWasmI32: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg);
......@@ -502,9 +502,9 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) {
MemOperand src = liftoff::GetStackSlot(index);
MemOperand src = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
Lw(reg.gp(), src);
......@@ -523,28 +523,32 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
}
}
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
UNREACHABLE();
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(last_stack_slot);
RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
if (count <= 12) {
// Special straight-line code for up to 12 slots. Generates one
// instruction per slot (<= 12 instructions total).
for (uint32_t offset = 0; offset < count; ++offset) {
Sd(zero_reg, liftoff::GetStackSlot(index + offset));
Sd(zero_reg,
liftoff::GetStackSlot(GetStackOffsetFromIndex(index + offset)));
}
} else {
// General case for bigger counts (12 instructions).
// Use a0 for start address (inclusive), a1 for end address (exclusive).
Push(a1, a0);
Daddu(a0, fp, Operand(-liftoff::GetStackSlotOffset(last_stack_slot)));
Daddu(a0, fp,
Operand(-liftoff::GetStackSlotOffset(
GetStackOffsetFromIndex(last_stack_slot))));
Daddu(a1, fp,
Operand(-liftoff::GetStackSlotOffset(index) + kStackSlotSize));
Operand(-liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index)) +
kStackSlotSize));
Label loop;
bind(&loop);
......@@ -1399,7 +1403,7 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
asm_->ld(kScratchReg, liftoff::GetStackSlot(slot.src_index_));
asm_->ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
asm_->push(kScratchReg);
break;
case LiftoffAssembler::VarState::kRegister:
......
......@@ -41,16 +41,14 @@ constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset =
kInstanceOffset + 2 * kSystemPointerSize;
inline int GetStackSlotOffset(uint32_t index) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset;
}
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
int32_t offset = kFirstStackSlotOffset +
index * LiftoffAssembler::kStackSlotSize - half_offset;
return MemOperand(fp, -offset);
return MemOperand(fp, -kFirstStackSlotOffset - offset + half_offset);
}
} // namespace liftoff
......@@ -119,7 +117,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
bailout(kUnsupportedArchitecture, "MoveStackValue");
}
......@@ -133,21 +131,21 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
bailout(kUnsupportedArchitecture, "Move DoubleRegister");
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
bailout(kUnsupportedArchitecture, "Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
bailout(kUnsupportedArchitecture, "Spill value");
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) {
bailout(kUnsupportedArchitecture, "Fill");
}
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
bailout(kUnsupportedArchitecture, "FillI64Half");
}
......@@ -165,16 +163,22 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
// Special straight-line code for up to five slots. Generates two
// instructions per slot.
for (uint32_t offset = 0; offset < count; ++offset) {
StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord));
StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord));
StoreP(r0, liftoff::GetHalfStackSlot(
GetStackOffsetFromIndex(index + offset), kLowWord));
StoreP(r0, liftoff::GetHalfStackSlot(
GetStackOffsetFromIndex(index + offset), kHighWord));
}
} else {
// General case for bigger counts (9 instructions).
// Use r4 for start address (inclusive), r5 for end address (exclusive).
push(r4);
push(r5);
subi(r4, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot)));
subi(r5, fp, Operand(liftoff::GetStackSlotOffset(index) - kStackSlotSize));
subi(r4, fp,
Operand(liftoff::GetStackSlotOffset(
GetStackOffsetFromIndex(last_stack_slot))));
subi(r5, fp,
Operand(liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index)) -
kStackSlotSize));
Label loop;
bind(&loop);
......
......@@ -40,16 +40,14 @@ constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset =
kInstanceOffset + 2 * kSystemPointerSize;
inline int GetStackSlotOffset(uint32_t index) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset;
}
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
int32_t offset = kFirstStackSlotOffset +
index * LiftoffAssembler::kStackSlotSize - half_offset;
return MemOperand(fp, -offset);
return MemOperand(fp, -kFirstStackSlotOffset - offset + half_offset);
}
} // namespace liftoff
......@@ -118,7 +116,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
bailout(kUnsupportedArchitecture, "MoveStackValue");
}
......@@ -132,21 +130,21 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
bailout(kUnsupportedArchitecture, "Move DoubleRegister");
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
bailout(kUnsupportedArchitecture, "Spill register");
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
bailout(kUnsupportedArchitecture, "Spill value");
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) {
bailout(kUnsupportedArchitecture, "Fill");
}
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
bailout(kUnsupportedArchitecture, "FillI64Half");
}
......@@ -164,16 +162,22 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
// Special straight-line code for up to five slots. Generates two
// instructions per slot.
for (uint32_t offset = 0; offset < count; ++offset) {
StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord));
StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord));
StoreP(r0, liftoff::GetHalfStackSlot(
GetStackOffsetFromIndex(index + offset), kLowWord));
StoreP(r0, liftoff::GetHalfStackSlot(
GetStackOffsetFromIndex(index + offset), kHighWord));
}
} else {
// General case for bigger counts (9 instructions).
// Use r3 for start address (inclusive), r4 for end address (exclusive).
push(r3);
push(r4);
SubP(r3, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot)));
SubP(r4, fp, Operand(liftoff::GetStackSlotOffset(index) - kStackSlotSize));
SubP(r3, fp,
Operand(liftoff::GetStackSlotOffset(
GetStackOffsetFromIndex(last_stack_slot))));
SubP(r4, fp,
Operand(liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index)) -
kStackSlotSize));
Label loop;
bind(&loop);
......
......@@ -39,8 +39,7 @@ constexpr int32_t kConstantStackSpace = 16;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline Operand GetStackSlot(uint32_t index) {
int32_t offset = index * LiftoffAssembler::kStackSlotSize;
inline Operand GetStackSlot(uint32_t offset) {
return Operand(rbp, -kFirstStackSlotOffset - offset);
}
......@@ -339,11 +338,11 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_index, src_index);
Operand src = liftoff::GetStackSlot(src_index);
Operand dst = liftoff::GetStackSlot(dst_index);
DCHECK_NE(dst_offset, src_offset);
Operand dst = liftoff::GetStackSlot(dst_offset);
Operand src = liftoff::GetStackSlot(src_offset);
if (ValueTypes::ElementSizeLog2Of(type) == 2) {
movl(kScratchRegister, src);
movl(dst, kScratchRegister);
......@@ -375,10 +374,10 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
}
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
RecordUsedSpillSlot(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
movl(dst, reg.gp());
......@@ -397,9 +396,9 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
}
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (value.type()) {
case kWasmI32:
movl(dst, Immediate(value.to_i32()));
......@@ -424,9 +423,9 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) {
Operand src = liftoff::GetStackSlot(index);
Operand src = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
movl(reg.gp(), src);
......@@ -445,20 +444,22 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
}
}
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
UNREACHABLE();
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(last_stack_slot);
RecordUsedSpillSlot(
LiftoffAssembler::GetStackOffsetFromIndex(last_stack_slot));
if (count <= 3) {
// Special straight-line code for up to three slots
// (7-10 bytes per slot: REX C7 <1-4 bytes op> <4 bytes imm>).
for (uint32_t offset = 0; offset < count; ++offset) {
movq(liftoff::GetStackSlot(index + offset), Immediate(0));
movq(liftoff::GetStackSlot(GetStackOffsetFromIndex(index + offset)),
Immediate(0));
}
} else {
// General case for bigger counts.
......@@ -468,7 +469,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
pushq(rax);
pushq(rcx);
pushq(rdi);
leaq(rdi, liftoff::GetStackSlot(last_stack_slot));
leaq(rdi, liftoff::GetStackSlot(GetStackOffsetFromIndex(last_stack_slot)));
xorl(rax, rax);
movl(rcx, Immediate(count));
repstosq();
......@@ -1663,14 +1664,14 @@ void LiftoffStackSlots::Construct() {
if (src.type() == kWasmI32) {
// Load i32 values to a register first to ensure they are zero
// extended.
asm_->movl(kScratchRegister, liftoff::GetStackSlot(slot.src_index_));
asm_->movl(kScratchRegister, liftoff::GetStackSlot(slot.src_offset_));
asm_->pushq(kScratchRegister);
} else {
// For all other types, just push the whole (8-byte) stack slot.
// This is also ok for f32 values (even though we copy 4 uninitialized
// bytes), because f32 and f64 values are clearly distinguished in
// Turbofan, so the uninitialized bytes are never accessed.
asm_->pushq(liftoff::GetStackSlot(slot.src_index_));
asm_->pushq(liftoff::GetStackSlot(slot.src_offset_));
}
break;
case LiftoffAssembler::VarState::kRegister:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment