Commit d3cd2702 authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

Reland "[liftoff] Use stack slot offsets instead of indices"

This is a reland of 20727725

The fix is in liftoff-assembler-arm64.h in FillStackSlotsWithZero,
in the else case for bigger counts to fill, the argument passed to Sub
was incorrect. We were passing offset relative to first slot, but it
should be offset relative to instance, so there is an off by 1 slot error
when zeroing, and ended up zeroing the stack slot holding instance.

Original change's description:
> [liftoff] Use stack slot offsets instead of indices
>
> Spill/fill now take offsets instead of indices. We provide a
> helper, GetStackOffsetFromIndex, for callers. This is currently only
> useful while slot sizes are still fixed to 8 bytes.
>
> StackTransferRecipe's RegisterLoad now works in terms of offset.
>
> LiftoffStackSlots work in terms of offset as well.
>
> TransferStackSlot currently still works in terms of indicies, but can be
> converted to use offsets in a subsequent change.
>
> Bug: v8:9909
> Change-Id: If54fb844309bdfd641720d063135dd59551813e0
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1922489
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Commit-Queue: Zhi An Ng <zhin@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#65049}

Bug: v8:9909
Change-Id: I311da9d3bb1db8faf8693079177c77a7b3754243
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1925131Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65107}
parent f6a76fad
...@@ -46,20 +46,18 @@ constexpr int32_t kConstantStackSpace = kSystemPointerSize; ...@@ -46,20 +46,18 @@ constexpr int32_t kConstantStackSpace = kSystemPointerSize;
// Three instructions are required to sub a large constant, movw + movt + sub. // Three instructions are required to sub a large constant, movw + movt + sub.
constexpr int32_t kPatchInstructionsRequired = 3; constexpr int32_t kPatchInstructionsRequired = 3;
inline int GetStackSlotOffset(uint32_t index) { inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize; return kFirstStackSlotOffset + offset;
} }
inline MemOperand GetStackSlot(uint32_t index) { inline MemOperand GetStackSlot(uint32_t offset) {
return MemOperand(fp, -GetStackSlotOffset(index)); return MemOperand(fp, -GetStackSlotOffset(offset));
} }
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) { inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
int32_t offset = kFirstStackSlotOffset + return MemOperand(fp, -kFirstStackSlotOffset - offset + half_offset);
index * LiftoffAssembler::kStackSlotSize - half_offset;
return MemOperand(fp, -offset);
} }
inline MemOperand GetInstanceOperand() { inline MemOperand GetInstanceOperand() {
...@@ -539,12 +537,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, ...@@ -539,12 +537,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
} }
} }
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index, void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) { ValueType type) {
DCHECK_NE(dst_index, src_index); DCHECK_NE(dst_offset, src_offset);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type)); LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
Fill(reg, src_index, type); Fill(reg, src_offset, type);
Spill(dst_index, reg, type); Spill(dst_offset, reg, type);
} }
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) { void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
...@@ -564,17 +562,17 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -564,17 +562,17 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
} }
} }
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg, void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) { ValueType type) {
RecordUsedSpillSlot(index); RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(index); MemOperand dst = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
str(reg.gp(), dst); str(reg.gp(), dst);
break; break;
case kWasmI64: case kWasmI64:
str(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord)); str(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
str(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord)); str(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break; break;
case kWasmF32: case kWasmF32:
vstr(liftoff::GetFloatRegister(reg.fp()), dst); vstr(liftoff::GetFloatRegister(reg.fp()), dst);
...@@ -587,9 +585,9 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg, ...@@ -587,9 +585,9 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
} }
} }
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(index); RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(index); MemOperand dst = liftoff::GetStackSlot(offset);
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register src = no_reg; Register src = no_reg;
// The scratch register will be required by str if multiple instructions // The scratch register will be required by str if multiple instructions
...@@ -607,10 +605,10 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { ...@@ -607,10 +605,10 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
case kWasmI64: { case kWasmI64: {
int32_t low_word = value.to_i64(); int32_t low_word = value.to_i64();
mov(src, Operand(low_word)); mov(src, Operand(low_word));
str(src, liftoff::GetHalfStackSlot(index, kLowWord)); str(src, liftoff::GetHalfStackSlot(offset, kLowWord));
int32_t high_word = value.to_i64() >> 32; int32_t high_word = value.to_i64() >> 32;
mov(src, Operand(high_word)); mov(src, Operand(high_word));
str(src, liftoff::GetHalfStackSlot(index, kHighWord)); str(src, liftoff::GetHalfStackSlot(offset, kHighWord));
break; break;
} }
default: default:
...@@ -619,36 +617,36 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { ...@@ -619,36 +617,36 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
} }
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index, void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) { ValueType type) {
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
ldr(reg.gp(), liftoff::GetStackSlot(index)); ldr(reg.gp(), liftoff::GetStackSlot(offset));
break; break;
case kWasmI64: case kWasmI64:
ldr(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord)); ldr(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
ldr(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord)); ldr(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break; break;
case kWasmF32: case kWasmF32:
vldr(liftoff::GetFloatRegister(reg.fp()), liftoff::GetStackSlot(index)); vldr(liftoff::GetFloatRegister(reg.fp()), liftoff::GetStackSlot(offset));
break; break;
case kWasmF64: case kWasmF64:
vldr(reg.fp(), liftoff::GetStackSlot(index)); vldr(reg.fp(), liftoff::GetStackSlot(offset));
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
} }
void LiftoffAssembler::FillI64Half(Register reg, uint32_t index, void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset,
RegPairHalf half) { RegPairHalf half) {
ldr(reg, liftoff::GetHalfStackSlot(index, half)); ldr(reg, liftoff::GetHalfStackSlot(offset, half));
} }
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count); DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1; uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(last_stack_slot); RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
// We need a zero reg. Always use r0 for that, and push it before to restore // We need a zero reg. Always use r0 for that, and push it before to restore
// its value afterwards. // its value afterwards.
...@@ -659,16 +657,22 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { ...@@ -659,16 +657,22 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
// Special straight-line code for up to five slots. Generates two // Special straight-line code for up to five slots. Generates two
// instructions per slot. // instructions per slot.
for (uint32_t offset = 0; offset < count; ++offset) { for (uint32_t offset = 0; offset < count; ++offset) {
str(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord)); str(r0, liftoff::GetHalfStackSlot(GetStackOffsetFromIndex(index + offset),
str(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord)); kLowWord));
str(r0, liftoff::GetHalfStackSlot(GetStackOffsetFromIndex(index + offset),
kHighWord));
} }
} else { } else {
// General case for bigger counts (9 instructions). // General case for bigger counts (9 instructions).
// Use r1 for start address (inclusive), r2 for end address (exclusive). // Use r1 for start address (inclusive), r2 for end address (exclusive).
push(r1); push(r1);
push(r2); push(r2);
sub(r1, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot))); sub(r1, fp,
sub(r2, fp, Operand(liftoff::GetStackSlotOffset(index) - kStackSlotSize)); Operand(liftoff::GetStackSlotOffset(
GetStackOffsetFromIndex(last_stack_slot))));
sub(r2, fp,
Operand(liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index)) -
kStackSlotSize));
Label loop; Label loop;
bind(&loop); bind(&loop);
...@@ -1634,13 +1638,13 @@ void LiftoffStackSlots::Construct() { ...@@ -1634,13 +1638,13 @@ void LiftoffStackSlots::Construct() {
UseScratchRegisterScope temps(asm_); UseScratchRegisterScope temps(asm_);
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
asm_->ldr(scratch, asm_->ldr(scratch,
liftoff::GetHalfStackSlot(slot.src_index_, slot.half_)); liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
asm_->Push(scratch); asm_->Push(scratch);
} break; } break;
case kWasmF64: { case kWasmF64: {
UseScratchRegisterScope temps(asm_); UseScratchRegisterScope temps(asm_);
DwVfpRegister scratch = temps.AcquireD(); DwVfpRegister scratch = temps.AcquireD();
asm_->vldr(scratch, liftoff::GetStackSlot(slot.src_index_)); asm_->vldr(scratch, liftoff::GetStackSlot(slot.src_offset_));
asm_->vpush(scratch); asm_->vpush(scratch);
} break; } break;
default: default:
......
...@@ -43,12 +43,12 @@ constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize; ...@@ -43,12 +43,12 @@ constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + kSystemPointerSize; constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + kSystemPointerSize;
constexpr int32_t kConstantStackSpace = 0; constexpr int32_t kConstantStackSpace = 0;
inline int GetStackSlotOffset(uint32_t index) { inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize; return kFirstStackSlotOffset + offset;
} }
inline MemOperand GetStackSlot(uint32_t index) { inline MemOperand GetStackSlot(uint32_t offset) {
return MemOperand(fp, -GetStackSlotOffset(index)); return MemOperand(fp, -GetStackSlotOffset(offset));
} }
inline MemOperand GetInstanceOperand() { inline MemOperand GetInstanceOperand() {
...@@ -327,12 +327,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, ...@@ -327,12 +327,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
Ldr(liftoff::GetRegFromType(dst, type), MemOperand(fp, offset)); Ldr(liftoff::GetRegFromType(dst, type), MemOperand(fp, offset));
} }
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index, void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) { ValueType type) {
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
CPURegister scratch = liftoff::AcquireByType(&temps, type); CPURegister scratch = liftoff::AcquireByType(&temps, type);
Ldr(scratch, liftoff::GetStackSlot(src_index)); Ldr(scratch, liftoff::GetStackSlot(src_offset));
Str(scratch, liftoff::GetStackSlot(dst_index)); Str(scratch, liftoff::GetStackSlot(dst_offset));
} }
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) { void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
...@@ -354,16 +354,16 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -354,16 +354,16 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
} }
} }
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg, void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) { ValueType type) {
RecordUsedSpillSlot(index); RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(index); MemOperand dst = liftoff::GetStackSlot(offset);
Str(liftoff::GetRegFromType(reg, type), dst); Str(liftoff::GetRegFromType(reg, type), dst);
} }
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(index); RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(index); MemOperand dst = liftoff::GetStackSlot(offset);
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
CPURegister src = CPURegister::no_reg(); CPURegister src = CPURegister::no_reg();
switch (value.type()) { switch (value.type()) {
...@@ -390,37 +390,42 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { ...@@ -390,37 +390,42 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
Str(src, dst); Str(src, dst);
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index, void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) { ValueType type) {
MemOperand src = liftoff::GetStackSlot(index); MemOperand src = liftoff::GetStackSlot(offset);
Ldr(liftoff::GetRegFromType(reg, type), src); Ldr(liftoff::GetRegFromType(reg, type), src);
} }
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) { void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
UNREACHABLE(); UNREACHABLE();
} }
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count); DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1; uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(last_stack_slot); RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
int max_stp_offset = -liftoff::GetStackSlotOffset(index + count - 1); int max_stp_offset =
-liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index + count - 1));
if (count <= 12 && IsImmLSPair(max_stp_offset, kXRegSizeLog2)) { if (count <= 12 && IsImmLSPair(max_stp_offset, kXRegSizeLog2)) {
// Special straight-line code for up to 12 slots. Generates one // Special straight-line code for up to 12 slots. Generates one
// instruction per two slots (<= 6 instructions total). // instruction per two slots (<= 6 instructions total).
for (; count > 1; count -= 2) { for (; count > 1; count -= 2) {
STATIC_ASSERT(kStackSlotSize == kSystemPointerSize); STATIC_ASSERT(kStackSlotSize == kSystemPointerSize);
stp(xzr, xzr, liftoff::GetStackSlot(index + count - 1)); stp(xzr, xzr,
liftoff::GetStackSlot(GetStackOffsetFromIndex(index + count - 1)));
} }
DCHECK(count == 0 || count == 1); DCHECK(count == 0 || count == 1);
if (count) str(xzr, liftoff::GetStackSlot(index)); if (count) {
str(xzr, liftoff::GetStackSlot(GetStackOffsetFromIndex(index)));
}
} else { } else {
// General case for bigger counts (5-8 instructions). // General case for bigger counts (5-8 instructions).
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register address_reg = temps.AcquireX(); Register address_reg = temps.AcquireX();
// This {Sub} might use another temp register if the offset is too large. // This {Sub} might use another temp register if the offset is too large.
Sub(address_reg, fp, liftoff::GetStackSlotOffset(last_stack_slot)); Sub(address_reg, fp,
liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(last_stack_slot)));
Register count_reg = temps.AcquireX(); Register count_reg = temps.AcquireX();
Mov(count_reg, count); Mov(count_reg, count);
...@@ -1104,7 +1109,7 @@ void LiftoffStackSlots::Construct() { ...@@ -1104,7 +1109,7 @@ void LiftoffStackSlots::Construct() {
case LiftoffAssembler::VarState::kStack: { case LiftoffAssembler::VarState::kStack: {
UseScratchRegisterScope temps(asm_); UseScratchRegisterScope temps(asm_);
CPURegister scratch = liftoff::AcquireByType(&temps, slot.src_.type()); CPURegister scratch = liftoff::AcquireByType(&temps, slot.src_.type());
asm_->Ldr(scratch, liftoff::GetStackSlot(slot.src_index_)); asm_->Ldr(scratch, liftoff::GetStackSlot(slot.src_offset_));
asm_->Poke(scratch, poke_offset); asm_->Poke(scratch, poke_offset);
break; break;
} }
......
...@@ -26,16 +26,14 @@ constexpr int32_t kConstantStackSpace = 8; ...@@ -26,16 +26,14 @@ constexpr int32_t kConstantStackSpace = 8;
constexpr int32_t kFirstStackSlotOffset = constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize; kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline Operand GetStackSlot(uint32_t index) { inline Operand GetStackSlot(uint32_t offset) {
int32_t offset = index * LiftoffAssembler::kStackSlotSize;
return Operand(ebp, -kFirstStackSlotOffset - offset); return Operand(ebp, -kFirstStackSlotOffset - offset);
} }
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) { inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
int32_t offset = index * LiftoffAssembler::kStackSlotSize - half_offset; return Operand(ebp, -kFirstStackSlotOffset - offset + half_offset);
return Operand(ebp, -kFirstStackSlotOffset - offset);
} }
// TODO(clemensb): Make this a constexpr variable once Operand is constexpr. // TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
...@@ -406,18 +404,18 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, ...@@ -406,18 +404,18 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
type); type);
} }
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index, void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) { ValueType type) {
if (needs_reg_pair(type)) { if (needs_reg_pair(type)) {
liftoff::MoveStackValue(this, liftoff::MoveStackValue(this,
liftoff::GetHalfStackSlot(src_index, kLowWord), liftoff::GetHalfStackSlot(src_offset, kLowWord),
liftoff::GetHalfStackSlot(dst_index, kLowWord)); liftoff::GetHalfStackSlot(dst_offset, kLowWord));
liftoff::MoveStackValue(this, liftoff::MoveStackValue(this,
liftoff::GetHalfStackSlot(src_index, kHighWord), liftoff::GetHalfStackSlot(src_offset, kHighWord),
liftoff::GetHalfStackSlot(dst_index, kHighWord)); liftoff::GetHalfStackSlot(dst_offset, kHighWord));
} else { } else {
liftoff::MoveStackValue(this, liftoff::GetStackSlot(src_index), liftoff::MoveStackValue(this, liftoff::GetStackSlot(src_offset),
liftoff::GetStackSlot(dst_index)); liftoff::GetStackSlot(dst_offset));
} }
} }
...@@ -438,17 +436,17 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -438,17 +436,17 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
} }
} }
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg, void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) { ValueType type) {
RecordUsedSpillSlot(index); RecordUsedSpillSlot(offset);
Operand dst = liftoff::GetStackSlot(index); Operand dst = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
mov(dst, reg.gp()); mov(dst, reg.gp());
break; break;
case kWasmI64: case kWasmI64:
mov(liftoff::GetHalfStackSlot(index, kLowWord), reg.low_gp()); mov(liftoff::GetHalfStackSlot(offset, kLowWord), reg.low_gp());
mov(liftoff::GetHalfStackSlot(index, kHighWord), reg.high_gp()); mov(liftoff::GetHalfStackSlot(offset, kHighWord), reg.high_gp());
break; break;
case kWasmF32: case kWasmF32:
movss(dst, reg.fp()); movss(dst, reg.fp());
...@@ -461,9 +459,9 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg, ...@@ -461,9 +459,9 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
} }
} }
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(index); RecordUsedSpillSlot(offset);
Operand dst = liftoff::GetStackSlot(index); Operand dst = liftoff::GetStackSlot(offset);
switch (value.type()) { switch (value.type()) {
case kWasmI32: case kWasmI32:
mov(dst, Immediate(value.to_i32())); mov(dst, Immediate(value.to_i32()));
...@@ -471,8 +469,8 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { ...@@ -471,8 +469,8 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
case kWasmI64: { case kWasmI64: {
int32_t low_word = value.to_i64(); int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32; int32_t high_word = value.to_i64() >> 32;
mov(liftoff::GetHalfStackSlot(index, kLowWord), Immediate(low_word)); mov(liftoff::GetHalfStackSlot(offset, kLowWord), Immediate(low_word));
mov(liftoff::GetHalfStackSlot(index, kHighWord), Immediate(high_word)); mov(liftoff::GetHalfStackSlot(offset, kHighWord), Immediate(high_word));
break; break;
} }
default: default:
...@@ -481,16 +479,16 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { ...@@ -481,16 +479,16 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
} }
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index, void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) { ValueType type) {
Operand src = liftoff::GetStackSlot(index); Operand src = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
mov(reg.gp(), src); mov(reg.gp(), src);
break; break;
case kWasmI64: case kWasmI64:
mov(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord)); mov(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
mov(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord)); mov(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break; break;
case kWasmF32: case kWasmF32:
movss(reg.fp(), src); movss(reg.fp(), src);
...@@ -503,22 +501,26 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index, ...@@ -503,22 +501,26 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
} }
} }
void LiftoffAssembler::FillI64Half(Register reg, uint32_t index, void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset,
RegPairHalf half) { RegPairHalf half) {
mov(reg, liftoff::GetHalfStackSlot(index, half)); mov(reg, liftoff::GetHalfStackSlot(offset, half));
} }
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count); DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1; uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(last_stack_slot); RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
if (count <= 2) { if (count <= 2) {
// Special straight-line code for up to two slots (6-9 bytes per word: // Special straight-line code for up to two slots (6-9 bytes per word:
// C7 <1-4 bytes operand> <4 bytes imm>, makes 12-18 bytes per slot). // C7 <1-4 bytes operand> <4 bytes imm>, makes 12-18 bytes per slot).
for (uint32_t offset = 0; offset < count; ++offset) { for (uint32_t offset = 0; offset < count; ++offset) {
mov(liftoff::GetHalfStackSlot(index + offset, kLowWord), Immediate(0)); mov(liftoff::GetHalfStackSlot(GetStackOffsetFromIndex(index + offset),
mov(liftoff::GetHalfStackSlot(index + offset, kHighWord), Immediate(0)); kLowWord),
Immediate(0));
mov(liftoff::GetHalfStackSlot(GetStackOffsetFromIndex(index + offset),
kHighWord),
Immediate(0));
} }
} else { } else {
// General case for bigger counts. // General case for bigger counts.
...@@ -528,7 +530,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { ...@@ -528,7 +530,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
push(eax); push(eax);
push(ecx); push(ecx);
push(edi); push(edi);
lea(edi, liftoff::GetStackSlot(last_stack_slot)); lea(edi, liftoff::GetStackSlot(GetStackOffsetFromIndex(last_stack_slot)));
xor_(eax, eax); xor_(eax, eax);
// Number of words is number of slots times two. // Number of words is number of slots times two.
mov(ecx, Immediate(count * 2)); mov(ecx, Immediate(count * 2));
...@@ -1931,9 +1933,9 @@ void LiftoffStackSlots::Construct() { ...@@ -1931,9 +1933,9 @@ void LiftoffStackSlots::Construct() {
case LiftoffAssembler::VarState::kStack: case LiftoffAssembler::VarState::kStack:
if (src.type() == kWasmF64) { if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, slot.half_); DCHECK_EQ(kLowWord, slot.half_);
asm_->push(liftoff::GetHalfStackSlot(slot.src_index_, kHighWord)); asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
} }
asm_->push(liftoff::GetHalfStackSlot(slot.src_index_, slot.half_)); asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
break; break;
case LiftoffAssembler::VarState::kRegister: case LiftoffAssembler::VarState::kRegister:
if (src.type() == kWasmI64) { if (src.type() == kWasmI64) {
......
...@@ -42,7 +42,7 @@ class StackTransferRecipe { ...@@ -42,7 +42,7 @@ class StackTransferRecipe {
LoadKind kind; LoadKind kind;
ValueType type; ValueType type;
int32_t value; // i32 constant value or stack index, depending on kind. int32_t value; // i32 constant value or stack offset, depending on kind.
// Named constructors. // Named constructors.
static RegisterLoad Const(WasmValue constant) { static RegisterLoad Const(WasmValue constant) {
...@@ -53,12 +53,12 @@ class StackTransferRecipe { ...@@ -53,12 +53,12 @@ class StackTransferRecipe {
DCHECK_EQ(constant.to_i32_unchecked(), constant.to_i64_unchecked()); DCHECK_EQ(constant.to_i32_unchecked(), constant.to_i64_unchecked());
return {kConstant, kWasmI64, constant.to_i32_unchecked()}; return {kConstant, kWasmI64, constant.to_i32_unchecked()};
} }
static RegisterLoad Stack(int32_t stack_index, ValueType type) { static RegisterLoad Stack(int32_t offset, ValueType type) {
return {kStack, type, stack_index}; return {kStack, type, offset};
} }
static RegisterLoad HalfStack(int32_t stack_index, RegPairHalf half) { static RegisterLoad HalfStack(int32_t offset, RegPairHalf half) {
return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kWasmI32, return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kWasmI32,
stack_index}; offset};
} }
private: private:
...@@ -91,18 +91,23 @@ class StackTransferRecipe { ...@@ -91,18 +91,23 @@ class StackTransferRecipe {
switch (src.loc()) { switch (src.loc()) {
case VarState::kStack: case VarState::kStack:
if (src_index == dst_index) break; if (src_index == dst_index) break;
asm_->MoveStackValue(dst_index, src_index, src.type()); asm_->MoveStackValue(asm_->GetStackOffsetFromIndex(dst_index),
asm_->GetStackOffsetFromIndex(src_index),
src.type());
break; break;
case VarState::kRegister: case VarState::kRegister:
asm_->Spill(dst_index, src.reg(), src.type()); asm_->Spill(asm_->GetStackOffsetFromIndex(dst_index), src.reg(),
src.type());
break; break;
case VarState::kIntConst: case VarState::kIntConst:
asm_->Spill(dst_index, src.constant()); asm_->Spill(asm_->GetStackOffsetFromIndex(dst_index),
src.constant());
break; break;
} }
break; break;
case VarState::kRegister: case VarState::kRegister:
LoadIntoRegister(dst.reg(), src, src_index); LoadIntoRegister(dst.reg(), src,
asm_->GetStackOffsetFromIndex(src_index));
break; break;
case VarState::kIntConst: case VarState::kIntConst:
DCHECK_EQ(dst, src); DCHECK_EQ(dst, src);
...@@ -112,10 +117,10 @@ class StackTransferRecipe { ...@@ -112,10 +117,10 @@ class StackTransferRecipe {
void LoadIntoRegister(LiftoffRegister dst, void LoadIntoRegister(LiftoffRegister dst,
const LiftoffAssembler::VarState& src, const LiftoffAssembler::VarState& src,
uint32_t src_index) { uint32_t src_offset) {
switch (src.loc()) { switch (src.loc()) {
case VarState::kStack: case VarState::kStack:
LoadStackSlot(dst, src_index, src.type()); LoadStackSlot(dst, src_offset, src.type());
break; break;
case VarState::kRegister: case VarState::kRegister:
DCHECK_EQ(dst.reg_class(), src.reg_class()); DCHECK_EQ(dst.reg_class(), src.reg_class());
...@@ -129,14 +134,14 @@ class StackTransferRecipe { ...@@ -129,14 +134,14 @@ class StackTransferRecipe {
void LoadI64HalfIntoRegister(LiftoffRegister dst, void LoadI64HalfIntoRegister(LiftoffRegister dst,
const LiftoffAssembler::VarState& src, const LiftoffAssembler::VarState& src,
uint32_t index, RegPairHalf half) { uint32_t offset, RegPairHalf half) {
// Use CHECK such that the remaining code is statically dead if // Use CHECK such that the remaining code is statically dead if
// {kNeedI64RegPair} is false. // {kNeedI64RegPair} is false.
CHECK(kNeedI64RegPair); CHECK(kNeedI64RegPair);
DCHECK_EQ(kWasmI64, src.type()); DCHECK_EQ(kWasmI64, src.type());
switch (src.loc()) { switch (src.loc()) {
case VarState::kStack: case VarState::kStack:
LoadI64HalfStackSlot(dst, index, half); LoadI64HalfStackSlot(dst, offset, half);
break; break;
case VarState::kRegister: { case VarState::kRegister: {
LiftoffRegister src_half = LiftoffRegister src_half =
...@@ -194,7 +199,7 @@ class StackTransferRecipe { ...@@ -194,7 +199,7 @@ class StackTransferRecipe {
} }
} }
void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index, void LoadStackSlot(LiftoffRegister dst, uint32_t stack_offset,
ValueType type) { ValueType type) {
if (load_dst_regs_.has(dst)) { if (load_dst_regs_.has(dst)) {
// It can happen that we spilled the same register to different stack // It can happen that we spilled the same register to different stack
...@@ -206,15 +211,15 @@ class StackTransferRecipe { ...@@ -206,15 +211,15 @@ class StackTransferRecipe {
if (dst.is_pair()) { if (dst.is_pair()) {
DCHECK_EQ(kWasmI64, type); DCHECK_EQ(kWasmI64, type);
*register_load(dst.low()) = *register_load(dst.low()) =
RegisterLoad::HalfStack(stack_index, kLowWord); RegisterLoad::HalfStack(stack_offset, kLowWord);
*register_load(dst.high()) = *register_load(dst.high()) =
RegisterLoad::HalfStack(stack_index, kHighWord); RegisterLoad::HalfStack(stack_offset, kHighWord);
} else { } else {
*register_load(dst) = RegisterLoad::Stack(stack_index, type); *register_load(dst) = RegisterLoad::Stack(stack_offset, type);
} }
} }
void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t stack_index, void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t offset,
RegPairHalf half) { RegPairHalf half) {
if (load_dst_regs_.has(dst)) { if (load_dst_regs_.has(dst)) {
// It can happen that we spilled the same register to different stack // It can happen that we spilled the same register to different stack
...@@ -223,7 +228,7 @@ class StackTransferRecipe { ...@@ -223,7 +228,7 @@ class StackTransferRecipe {
return; return;
} }
load_dst_regs_.set(dst); load_dst_regs_.set(dst);
*register_load(dst) = RegisterLoad::HalfStack(stack_index, half); *register_load(dst) = RegisterLoad::HalfStack(offset, half);
} }
private: private:
...@@ -294,9 +299,11 @@ class StackTransferRecipe { ...@@ -294,9 +299,11 @@ class StackTransferRecipe {
LiftoffRegister dst = move_dst_regs_.GetFirstRegSet(); LiftoffRegister dst = move_dst_regs_.GetFirstRegSet();
RegisterMove* move = register_move(dst); RegisterMove* move = register_move(dst);
LiftoffRegister spill_reg = move->src; LiftoffRegister spill_reg = move->src;
asm_->Spill(next_spill_slot, spill_reg, move->type); asm_->Spill(LiftoffAssembler::GetStackOffsetFromIndex(next_spill_slot),
spill_reg, move->type);
// Remember to reload into the destination register later. // Remember to reload into the destination register later.
LoadStackSlot(dst, next_spill_slot, move->type); LoadStackSlot(dst, asm_->GetStackOffsetFromIndex(next_spill_slot),
move->type);
++next_spill_slot; ++next_spill_slot;
ClearExecutedMove(dst); ClearExecutedMove(dst);
} }
...@@ -497,7 +504,8 @@ LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) { ...@@ -497,7 +504,8 @@ LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
case VarState::kStack: { case VarState::kStack: {
LiftoffRegister reg = LiftoffRegister reg =
GetUnusedRegister(reg_class_for(slot.type()), pinned); GetUnusedRegister(reg_class_for(slot.type()), pinned);
Fill(reg, cache_state_.stack_height(), slot.type()); Fill(reg, GetStackOffsetFromIndex(cache_state_.stack_height()),
slot.type());
return reg; return reg;
} }
case VarState::kRegister: case VarState::kRegister:
...@@ -554,11 +562,11 @@ void LiftoffAssembler::Spill(uint32_t index) { ...@@ -554,11 +562,11 @@ void LiftoffAssembler::Spill(uint32_t index) {
case VarState::kStack: case VarState::kStack:
return; return;
case VarState::kRegister: case VarState::kRegister:
Spill(index, slot.reg(), slot.type()); Spill(GetStackOffsetFromIndex(index), slot.reg(), slot.type());
cache_state_.dec_used(slot.reg()); cache_state_.dec_used(slot.reg());
break; break;
case VarState::kIntConst: case VarState::kIntConst:
Spill(index, slot.constant()); Spill(GetStackOffsetFromIndex(index), slot.constant());
break; break;
} }
slot.MakeStack(); slot.MakeStack();
...@@ -574,7 +582,7 @@ void LiftoffAssembler::SpillAllRegisters() { ...@@ -574,7 +582,7 @@ void LiftoffAssembler::SpillAllRegisters() {
for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) { for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
auto& slot = cache_state_.stack_state[i]; auto& slot = cache_state_.stack_state[i];
if (!slot.is_reg()) continue; if (!slot.is_reg()) continue;
Spill(i, slot.reg(), slot.type()); Spill(GetStackOffsetFromIndex(i), slot.reg(), slot.type());
slot.MakeStack(); slot.MakeStack();
} }
cache_state_.reset_used_registers(); cache_state_.reset_used_registers();
...@@ -594,7 +602,7 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig, ...@@ -594,7 +602,7 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
idx < end; ++idx) { idx < end; ++idx) {
VarState& slot = cache_state_.stack_state[idx]; VarState& slot = cache_state_.stack_state[idx];
if (!slot.is_reg()) continue; if (!slot.is_reg()) continue;
Spill(idx, slot.reg(), slot.type()); Spill(GetStackOffsetFromIndex(idx), slot.reg(), slot.type());
slot.MakeStack(); slot.MakeStack();
} }
...@@ -628,6 +636,7 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig, ...@@ -628,6 +636,7 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
const bool is_pair = kNeedI64RegPair && type == kWasmI64; const bool is_pair = kNeedI64RegPair && type == kWasmI64;
const int num_lowered_params = is_pair ? 2 : 1; const int num_lowered_params = is_pair ? 2 : 1;
const uint32_t stack_idx = param_base + param; const uint32_t stack_idx = param_base + param;
const uint32_t stack_offset = GetStackOffsetFromIndex(stack_idx);
const VarState& slot = cache_state_.stack_state[stack_idx]; const VarState& slot = cache_state_.stack_state[stack_idx];
// Process both halfs of a register pair separately, because they are passed // Process both halfs of a register pair separately, because they are passed
// as separate parameters. One or both of them could end up on the stack. // as separate parameters. One or both of them could end up on the stack.
...@@ -654,13 +663,14 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig, ...@@ -654,13 +663,14 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
#endif #endif
param_regs.set(reg); param_regs.set(reg);
if (is_pair) { if (is_pair) {
stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_idx, half); stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_offset,
half);
} else { } else {
stack_transfers.LoadIntoRegister(reg, slot, stack_idx); stack_transfers.LoadIntoRegister(reg, slot, stack_offset);
} }
} else { } else {
DCHECK(loc.IsCallerFrameSlot()); DCHECK(loc.IsCallerFrameSlot());
stack_slots.Add(slot, stack_idx, half); stack_slots.Add(slot, stack_offset, half);
} }
} }
} }
...@@ -765,8 +775,9 @@ void LiftoffAssembler::MoveToReturnRegisters(FunctionSig* sig) { ...@@ -765,8 +775,9 @@ void LiftoffAssembler::MoveToReturnRegisters(FunctionSig* sig) {
: reg_class_for(return_type) == kGpReg : reg_class_for(return_type) == kGpReg
? LiftoffRegister(kGpReturnRegisters[0]) ? LiftoffRegister(kGpReturnRegisters[0])
: LiftoffRegister(kFpReturnRegisters[0]); : LiftoffRegister(kFpReturnRegisters[0]);
stack_transfers.LoadIntoRegister(return_reg, cache_state_.stack_state.back(), stack_transfers.LoadIntoRegister(
cache_state_.stack_height() - 1); return_reg, cache_state_.stack_state.back(),
GetStackOffsetFromIndex(cache_state_.stack_height() - 1));
} }
#ifdef ENABLE_SLOW_DCHECKS #ifdef ENABLE_SLOW_DCHECKS
...@@ -820,7 +831,7 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) { ...@@ -820,7 +831,7 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
cache_state_.dec_used(slot->reg().low()); cache_state_.dec_used(slot->reg().low());
cache_state_.dec_used(slot->reg().high()); cache_state_.dec_used(slot->reg().high());
} }
Spill(idx, slot->reg(), slot->type()); Spill(GetStackOffsetFromIndex(idx), slot->reg(), slot->type());
slot->MakeStack(); slot->MakeStack();
if (--remaining_uses == 0) break; if (--remaining_uses == 0) break;
} }
......
...@@ -38,6 +38,16 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -38,6 +38,16 @@ class LiftoffAssembler : public TurboAssembler {
static constexpr ValueType kWasmIntPtr = static constexpr ValueType kWasmIntPtr =
kSystemPointerSize == 8 ? kWasmI64 : kWasmI32; kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
// TODO(zhin): Temporary while migrating away from fixed slot sizes.
inline static constexpr uint32_t SlotSizeForType(ValueType type) {
return kStackSlotSize;
}
// TODO(zhin): Temporary for migration from index to offset.
inline static uint32_t GetStackOffsetFromIndex(uint32_t index) {
return index * LiftoffAssembler::kStackSlotSize;
}
class VarState { class VarState {
public: public:
enum Location : uint8_t { kStack, kRegister, kIntConst }; enum Location : uint8_t { kStack, kRegister, kIntConst };
...@@ -103,6 +113,18 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -103,6 +113,18 @@ class LiftoffAssembler : public TurboAssembler {
void MakeStack() { loc_ = kStack; } void MakeStack() { loc_ = kStack; }
// Copy src to this, except for offset, since src and this could have been
// from different stack states.
void Copy(VarState src) {
loc_ = src.loc();
type_ = src.type();
if (loc_ == kRegister) {
reg_ = src.reg();
} else if (loc_ == kIntConst) {
i32_const_ = src.i32_const();
}
}
private: private:
Location loc_; Location loc_;
// TODO(wasm): This is redundant, the decoder already knows the type of each // TODO(wasm): This is redundant, the decoder already knows the type of each
...@@ -259,29 +281,29 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -259,29 +281,29 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister PopToRegister(LiftoffRegList pinned = {}); LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
uint32_t NextSpillOffset() { uint32_t NextSpillOffset(ValueType type) {
if (cache_state_.stack_state.empty()) { if (cache_state_.stack_state.empty()) {
return 0; return SlotSizeForType(type);
} }
VarState last = cache_state_.stack_state.back(); VarState last = cache_state_.stack_state.back();
uint32_t offset = uint32_t offset = last.offset() + SlotSizeForType(type);
last.offset() + ValueTypes::ElementSizeInBytes(last.type());
return offset; return offset;
} }
void PushRegister(ValueType type, LiftoffRegister reg) { void PushRegister(ValueType type, LiftoffRegister reg) {
DCHECK_EQ(reg_class_for(type), reg.reg_class()); DCHECK_EQ(reg_class_for(type), reg.reg_class());
cache_state_.inc_used(reg); cache_state_.inc_used(reg);
cache_state_.stack_state.emplace_back(type, reg, NextSpillOffset()); cache_state_.stack_state.emplace_back(type, reg, NextSpillOffset(type));
} }
void PushConstant(ValueType type, int32_t i32_const) { void PushConstant(ValueType type, int32_t i32_const) {
DCHECK(type == kWasmI32 || type == kWasmI64); DCHECK(type == kWasmI32 || type == kWasmI64);
cache_state_.stack_state.emplace_back(type, i32_const, NextSpillOffset()); cache_state_.stack_state.emplace_back(type, i32_const,
NextSpillOffset(type));
} }
void PushStack(ValueType type) { void PushStack(ValueType type) {
cache_state_.stack_state.emplace_back(type, NextSpillOffset()); cache_state_.stack_state.emplace_back(type, NextSpillOffset(type));
} }
void SpillRegister(LiftoffRegister); void SpillRegister(LiftoffRegister);
...@@ -333,7 +355,9 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -333,7 +355,9 @@ class LiftoffAssembler : public TurboAssembler {
// Call this method whenever spilling something, such that the number of used // Call this method whenever spilling something, such that the number of used
// spill slot can be tracked and the stack frame will be allocated big enough. // spill slot can be tracked and the stack frame will be allocated big enough.
void RecordUsedSpillSlot(uint32_t index) { void RecordUsedSpillSlot(uint32_t offset) {
// TODO(zhin): Temporary for migration from index to offset.
uint32_t index = offset / kStackSlotSize;
if (index >= num_used_spill_slots_) num_used_spill_slots_ = index + 1; if (index >= num_used_spill_slots_) num_used_spill_slots_ = index + 1;
} }
...@@ -401,17 +425,18 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -401,17 +425,18 @@ class LiftoffAssembler : public TurboAssembler {
bool is_store_mem = false); bool is_store_mem = false);
inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx, inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
ValueType); ValueType);
inline void MoveStackValue(uint32_t dst_index, uint32_t src_index, ValueType); inline void MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType);
inline void Move(Register dst, Register src, ValueType); inline void Move(Register dst, Register src, ValueType);
inline void Move(DoubleRegister dst, DoubleRegister src, ValueType); inline void Move(DoubleRegister dst, DoubleRegister src, ValueType);
inline void Spill(uint32_t index, LiftoffRegister, ValueType); inline void Spill(uint32_t offset, LiftoffRegister, ValueType);
inline void Spill(uint32_t index, WasmValue); inline void Spill(uint32_t offset, WasmValue);
inline void Fill(LiftoffRegister, uint32_t index, ValueType); inline void Fill(LiftoffRegister, uint32_t offset, ValueType);
// Only used on 32-bit systems: Fill a register from a "half stack slot", i.e. // Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
// 4 bytes on the stack holding half of a 64-bit value. // 4 bytes on the stack holding half of a 64-bit value.
inline void FillI64Half(Register, uint32_t index, RegPairHalf); inline void FillI64Half(Register, uint32_t offset, RegPairHalf);
inline void FillStackSlotsWithZero(uint32_t index, uint32_t count); inline void FillStackSlotsWithZero(uint32_t index, uint32_t count);
// i32 binops. // i32 binops.
...@@ -810,9 +835,9 @@ class LiftoffStackSlots { ...@@ -810,9 +835,9 @@ class LiftoffStackSlots {
public: public:
explicit LiftoffStackSlots(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {} explicit LiftoffStackSlots(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
void Add(const LiftoffAssembler::VarState& src, uint32_t src_index, void Add(const LiftoffAssembler::VarState& src, uint32_t src_offset,
RegPairHalf half) { RegPairHalf half) {
slots_.emplace_back(src, src_index, half); slots_.emplace_back(src, src_offset, half);
} }
void Add(const LiftoffAssembler::VarState& src) { slots_.emplace_back(src); } void Add(const LiftoffAssembler::VarState& src) { slots_.emplace_back(src); }
...@@ -822,14 +847,14 @@ class LiftoffStackSlots { ...@@ -822,14 +847,14 @@ class LiftoffStackSlots {
struct Slot { struct Slot {
// Allow move construction. // Allow move construction.
Slot(Slot&&) V8_NOEXCEPT = default; Slot(Slot&&) V8_NOEXCEPT = default;
Slot(const LiftoffAssembler::VarState& src, uint32_t src_index, Slot(const LiftoffAssembler::VarState& src, uint32_t src_offset,
RegPairHalf half) RegPairHalf half)
: src_(src), src_index_(src_index), half_(half) {} : src_(src), src_offset_(src_offset), half_(half) {}
explicit Slot(const LiftoffAssembler::VarState& src) explicit Slot(const LiftoffAssembler::VarState& src)
: src_(src), half_(kLowWord) {} : src_(src), half_(kLowWord) {}
const LiftoffAssembler::VarState src_; const LiftoffAssembler::VarState src_;
uint32_t src_index_ = 0; uint32_t src_offset_ = 0;
RegPairHalf half_; RegPairHalf half_;
}; };
......
...@@ -1292,7 +1292,7 @@ class LiftoffCompiler { ...@@ -1292,7 +1292,7 @@ class LiftoffCompiler {
case kStack: { case kStack: {
auto rc = reg_class_for(imm.type); auto rc = reg_class_for(imm.type);
LiftoffRegister reg = __ GetUnusedRegister(rc); LiftoffRegister reg = __ GetUnusedRegister(rc);
__ Fill(reg, imm.index, imm.type); __ Fill(reg, __ GetStackOffsetFromIndex(imm.index), imm.type);
__ PushRegister(slot.type(), reg); __ PushRegister(slot.type(), reg);
break; break;
} }
...@@ -1306,7 +1306,8 @@ class LiftoffCompiler { ...@@ -1306,7 +1306,8 @@ class LiftoffCompiler {
if (dst_slot->is_reg()) { if (dst_slot->is_reg()) {
LiftoffRegister slot_reg = dst_slot->reg(); LiftoffRegister slot_reg = dst_slot->reg();
if (state.get_use_count(slot_reg) == 1) { if (state.get_use_count(slot_reg) == 1) {
__ Fill(dst_slot->reg(), state.stack_height() - 1, type); __ Fill(dst_slot->reg(),
__ GetStackOffsetFromIndex(state.stack_height() - 1), type);
return; return;
} }
state.dec_used(slot_reg); state.dec_used(slot_reg);
...@@ -1315,7 +1316,9 @@ class LiftoffCompiler { ...@@ -1315,7 +1316,9 @@ class LiftoffCompiler {
DCHECK_EQ(type, __ local_type(local_index)); DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type); RegClass rc = reg_class_for(type);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc); LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
__ Fill(dst_reg, __ cache_state()->stack_height() - 1, type); __ Fill(dst_reg,
__ GetStackOffsetFromIndex(__ cache_state()->stack_height() - 1),
type);
*dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset()); *dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset());
__ cache_state()->inc_used(dst_reg); __ cache_state()->inc_used(dst_reg);
} }
...@@ -1327,12 +1330,12 @@ class LiftoffCompiler { ...@@ -1327,12 +1330,12 @@ class LiftoffCompiler {
switch (source_slot.loc()) { switch (source_slot.loc()) {
case kRegister: case kRegister:
if (target_slot.is_reg()) state.dec_used(target_slot.reg()); if (target_slot.is_reg()) state.dec_used(target_slot.reg());
target_slot = source_slot; target_slot.Copy(source_slot);
if (is_tee) state.inc_used(target_slot.reg()); if (is_tee) state.inc_used(target_slot.reg());
break; break;
case kIntConst: case kIntConst:
if (target_slot.is_reg()) state.dec_used(target_slot.reg()); if (target_slot.is_reg()) state.dec_used(target_slot.reg());
target_slot = source_slot; target_slot.Copy(source_slot);
break; break;
case kStack: case kStack:
LocalSetFromStackSlot(&target_slot, local_index); LocalSetFromStackSlot(&target_slot, local_index);
......
...@@ -49,19 +49,18 @@ constexpr int32_t kConstantStackSpace = 8; ...@@ -49,19 +49,18 @@ constexpr int32_t kConstantStackSpace = 8;
constexpr int32_t kFirstStackSlotOffset = constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize; kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline int GetStackSlotOffset(uint32_t index) { inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize; return kFirstStackSlotOffset + offset;
} }
inline MemOperand GetStackSlot(uint32_t index) { inline MemOperand GetStackSlot(uint32_t offset) {
return MemOperand(fp, -GetStackSlotOffset(index)); return MemOperand(fp, -GetStackSlotOffset(offset));
} }
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) { inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
int32_t offset = index * LiftoffAssembler::kStackSlotSize + half_offset; return MemOperand(fp, -kFirstStackSlotOffset - offset + half_offset);
return MemOperand(fp, -kFirstStackSlotOffset - offset);
} }
inline MemOperand GetInstanceOperand() { return MemOperand(fp, -8); } inline MemOperand GetInstanceOperand() { return MemOperand(fp, -8); }
...@@ -522,12 +521,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, ...@@ -522,12 +521,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
liftoff::Load(this, dst, fp, offset, type); liftoff::Load(this, dst, fp, offset, type);
} }
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index, void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) { ValueType type) {
DCHECK_NE(dst_index, src_index); DCHECK_NE(dst_offset, src_offset);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type)); LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
Fill(reg, src_index, type); Fill(reg, src_offset, type);
Spill(dst_index, reg, type); Spill(dst_offset, reg, type);
} }
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) { void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
...@@ -541,17 +540,17 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -541,17 +540,17 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
TurboAssembler::Move(dst, src); TurboAssembler::Move(dst, src);
} }
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg, void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) { ValueType type) {
RecordUsedSpillSlot(index); RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(index); MemOperand dst = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
sw(reg.gp(), dst); sw(reg.gp(), dst);
break; break;
case kWasmI64: case kWasmI64:
sw(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord)); sw(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
sw(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord)); sw(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break; break;
case kWasmF32: case kWasmF32:
swc1(reg.fp(), dst); swc1(reg.fp(), dst);
...@@ -564,9 +563,9 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg, ...@@ -564,9 +563,9 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
} }
} }
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(index); RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(index); MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type()) { switch (value.type()) {
case kWasmI32: { case kWasmI32: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg); LiftoffRegister tmp = GetUnusedRegister(kGpReg);
...@@ -582,8 +581,8 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { ...@@ -582,8 +581,8 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
TurboAssembler::li(tmp.low_gp(), Operand(low_word)); TurboAssembler::li(tmp.low_gp(), Operand(low_word));
TurboAssembler::li(tmp.high_gp(), Operand(high_word)); TurboAssembler::li(tmp.high_gp(), Operand(high_word));
sw(tmp.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord)); sw(tmp.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
sw(tmp.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord)); sw(tmp.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break; break;
} }
default: default:
...@@ -593,16 +592,16 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { ...@@ -593,16 +592,16 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
} }
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index, void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) { ValueType type) {
MemOperand src = liftoff::GetStackSlot(index); MemOperand src = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
lw(reg.gp(), src); lw(reg.gp(), src);
break; break;
case kWasmI64: case kWasmI64:
lw(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord)); lw(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
lw(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord)); lw(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break; break;
case kWasmF32: case kWasmF32:
lwc1(reg.fp(), src); lwc1(reg.fp(), src);
...@@ -615,28 +614,33 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index, ...@@ -615,28 +614,33 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
} }
} }
void LiftoffAssembler::FillI64Half(Register reg, uint32_t index, void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset,
RegPairHalf half) { RegPairHalf half) {
lw(reg, liftoff::GetHalfStackSlot(index, half)); lw(reg, liftoff::GetHalfStackSlot(offset, half));
} }
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count); DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1; uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(last_stack_slot); RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
if (count <= 12) { if (count <= 12) {
// Special straight-line code for up to 12 slots. Generates one // Special straight-line code for up to 12 slots. Generates one
// instruction per slot (<=12 instructions total). // instruction per slot (<=12 instructions total).
for (uint32_t offset = 0; offset < count; ++offset) { for (uint32_t offset = 0; offset < count; ++offset) {
Sw(zero_reg, liftoff::GetStackSlot(index + offset)); Sw(zero_reg,
liftoff::GetStackSlot(GetStackOffsetFromIndex(index + offset)));
} }
} else { } else {
// General case for bigger counts (12 instructions). // General case for bigger counts (12 instructions).
// Use a0 for start address (inclusive), a1 for end address (exclusive). // Use a0 for start address (inclusive), a1 for end address (exclusive).
Push(a1, a0); Push(a1, a0);
Addu(a0, fp, Operand(-liftoff::GetStackSlotOffset(last_stack_slot))); Addu(a0, fp,
Addu(a1, fp, Operand(-liftoff::GetStackSlotOffset(index) + kStackSlotSize)); Operand(-liftoff::GetStackSlotOffset(
GetStackOffsetFromIndex(last_stack_slot))));
Addu(a1, fp,
Operand(-liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index)) +
kStackSlotSize));
Label loop; Label loop;
bind(&loop); bind(&loop);
...@@ -1610,11 +1614,11 @@ void LiftoffStackSlots::Construct() { ...@@ -1610,11 +1614,11 @@ void LiftoffStackSlots::Construct() {
if (src.type() == kWasmF64) { if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, slot.half_); DCHECK_EQ(kLowWord, slot.half_);
asm_->lw(kScratchReg, asm_->lw(kScratchReg,
liftoff::GetHalfStackSlot(slot.src_index_, kHighWord)); liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
asm_->push(kScratchReg); asm_->push(kScratchReg);
} }
asm_->lw(kScratchReg, asm_->lw(kScratchReg,
liftoff::GetHalfStackSlot(slot.src_index_, slot.half_)); liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
asm_->push(kScratchReg); asm_->push(kScratchReg);
break; break;
} }
......
...@@ -45,12 +45,12 @@ constexpr int32_t kConstantStackSpace = 16; ...@@ -45,12 +45,12 @@ constexpr int32_t kConstantStackSpace = 16;
constexpr int32_t kFirstStackSlotOffset = constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize; kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline int GetStackSlotOffset(uint32_t index) { inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize; return kFirstStackSlotOffset + offset;
} }
inline MemOperand GetStackSlot(uint32_t index) { inline MemOperand GetStackSlot(uint32_t offset) {
return MemOperand(fp, -GetStackSlotOffset(index)); return MemOperand(fp, -GetStackSlotOffset(offset));
} }
inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); } inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); }
...@@ -437,12 +437,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, ...@@ -437,12 +437,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
liftoff::Load(this, dst, src, type); liftoff::Load(this, dst, src, type);
} }
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index, void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) { ValueType type) {
DCHECK_NE(dst_index, src_index); DCHECK_NE(dst_offset, src_offset);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type)); LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
Fill(reg, src_index, type); Fill(reg, src_offset, type);
Spill(dst_index, reg, type); Spill(dst_offset, reg, type);
} }
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) { void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
...@@ -457,10 +457,10 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -457,10 +457,10 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
TurboAssembler::Move(dst, src); TurboAssembler::Move(dst, src);
} }
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg, void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) { ValueType type) {
RecordUsedSpillSlot(index); RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(index); MemOperand dst = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
Sw(reg.gp(), dst); Sw(reg.gp(), dst);
...@@ -479,9 +479,9 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg, ...@@ -479,9 +479,9 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
} }
} }
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(index); RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(index); MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type()) { switch (value.type()) {
case kWasmI32: { case kWasmI32: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg); LiftoffRegister tmp = GetUnusedRegister(kGpReg);
...@@ -502,9 +502,9 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { ...@@ -502,9 +502,9 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
} }
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index, void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) { ValueType type) {
MemOperand src = liftoff::GetStackSlot(index); MemOperand src = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
Lw(reg.gp(), src); Lw(reg.gp(), src);
...@@ -523,28 +523,32 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index, ...@@ -523,28 +523,32 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
} }
} }
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) { void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
UNREACHABLE(); UNREACHABLE();
} }
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count); DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1; uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(last_stack_slot); RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
if (count <= 12) { if (count <= 12) {
// Special straight-line code for up to 12 slots. Generates one // Special straight-line code for up to 12 slots. Generates one
// instruction per slot (<= 12 instructions total). // instruction per slot (<= 12 instructions total).
for (uint32_t offset = 0; offset < count; ++offset) { for (uint32_t offset = 0; offset < count; ++offset) {
Sd(zero_reg, liftoff::GetStackSlot(index + offset)); Sd(zero_reg,
liftoff::GetStackSlot(GetStackOffsetFromIndex(index + offset)));
} }
} else { } else {
// General case for bigger counts (12 instructions). // General case for bigger counts (12 instructions).
// Use a0 for start address (inclusive), a1 for end address (exclusive). // Use a0 for start address (inclusive), a1 for end address (exclusive).
Push(a1, a0); Push(a1, a0);
Daddu(a0, fp, Operand(-liftoff::GetStackSlotOffset(last_stack_slot))); Daddu(a0, fp,
Operand(-liftoff::GetStackSlotOffset(
GetStackOffsetFromIndex(last_stack_slot))));
Daddu(a1, fp, Daddu(a1, fp,
Operand(-liftoff::GetStackSlotOffset(index) + kStackSlotSize)); Operand(-liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index)) +
kStackSlotSize));
Label loop; Label loop;
bind(&loop); bind(&loop);
...@@ -1399,7 +1403,7 @@ void LiftoffStackSlots::Construct() { ...@@ -1399,7 +1403,7 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_; const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) { switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: case LiftoffAssembler::VarState::kStack:
asm_->ld(kScratchReg, liftoff::GetStackSlot(slot.src_index_)); asm_->ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
asm_->push(kScratchReg); asm_->push(kScratchReg);
break; break;
case LiftoffAssembler::VarState::kRegister: case LiftoffAssembler::VarState::kRegister:
......
...@@ -41,16 +41,14 @@ constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize; ...@@ -41,16 +41,14 @@ constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset = constexpr int32_t kFirstStackSlotOffset =
kInstanceOffset + 2 * kSystemPointerSize; kInstanceOffset + 2 * kSystemPointerSize;
inline int GetStackSlotOffset(uint32_t index) { inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize; return kFirstStackSlotOffset + offset;
} }
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) { inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
int32_t offset = kFirstStackSlotOffset + return MemOperand(fp, -kFirstStackSlotOffset - offset + half_offset);
index * LiftoffAssembler::kStackSlotSize - half_offset;
return MemOperand(fp, -offset);
} }
} // namespace liftoff } // namespace liftoff
...@@ -119,7 +117,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, ...@@ -119,7 +117,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot"); bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
} }
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index, void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) { ValueType type) {
bailout(kUnsupportedArchitecture, "MoveStackValue"); bailout(kUnsupportedArchitecture, "MoveStackValue");
} }
...@@ -133,21 +131,21 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -133,21 +131,21 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
bailout(kUnsupportedArchitecture, "Move DoubleRegister"); bailout(kUnsupportedArchitecture, "Move DoubleRegister");
} }
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg, void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) { ValueType type) {
bailout(kUnsupportedArchitecture, "Spill register"); bailout(kUnsupportedArchitecture, "Spill register");
} }
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
bailout(kUnsupportedArchitecture, "Spill value"); bailout(kUnsupportedArchitecture, "Spill value");
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index, void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) { ValueType type) {
bailout(kUnsupportedArchitecture, "Fill"); bailout(kUnsupportedArchitecture, "Fill");
} }
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) { void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
bailout(kUnsupportedArchitecture, "FillI64Half"); bailout(kUnsupportedArchitecture, "FillI64Half");
} }
...@@ -165,16 +163,22 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { ...@@ -165,16 +163,22 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
// Special straight-line code for up to five slots. Generates two // Special straight-line code for up to five slots. Generates two
// instructions per slot. // instructions per slot.
for (uint32_t offset = 0; offset < count; ++offset) { for (uint32_t offset = 0; offset < count; ++offset) {
StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord)); StoreP(r0, liftoff::GetHalfStackSlot(
StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord)); GetStackOffsetFromIndex(index + offset), kLowWord));
StoreP(r0, liftoff::GetHalfStackSlot(
GetStackOffsetFromIndex(index + offset), kHighWord));
} }
} else { } else {
// General case for bigger counts (9 instructions). // General case for bigger counts (9 instructions).
// Use r4 for start address (inclusive), r5 for end address (exclusive). // Use r4 for start address (inclusive), r5 for end address (exclusive).
push(r4); push(r4);
push(r5); push(r5);
subi(r4, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot))); subi(r4, fp,
subi(r5, fp, Operand(liftoff::GetStackSlotOffset(index) - kStackSlotSize)); Operand(liftoff::GetStackSlotOffset(
GetStackOffsetFromIndex(last_stack_slot))));
subi(r5, fp,
Operand(liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index)) -
kStackSlotSize));
Label loop; Label loop;
bind(&loop); bind(&loop);
......
...@@ -40,16 +40,14 @@ constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize; ...@@ -40,16 +40,14 @@ constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset = constexpr int32_t kFirstStackSlotOffset =
kInstanceOffset + 2 * kSystemPointerSize; kInstanceOffset + 2 * kSystemPointerSize;
inline int GetStackSlotOffset(uint32_t index) { inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize; return kFirstStackSlotOffset + offset;
} }
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) { inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
int32_t offset = kFirstStackSlotOffset + return MemOperand(fp, -kFirstStackSlotOffset - offset + half_offset);
index * LiftoffAssembler::kStackSlotSize - half_offset;
return MemOperand(fp, -offset);
} }
} // namespace liftoff } // namespace liftoff
...@@ -118,7 +116,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, ...@@ -118,7 +116,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot"); bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
} }
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index, void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) { ValueType type) {
bailout(kUnsupportedArchitecture, "MoveStackValue"); bailout(kUnsupportedArchitecture, "MoveStackValue");
} }
...@@ -132,21 +130,21 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -132,21 +130,21 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
bailout(kUnsupportedArchitecture, "Move DoubleRegister"); bailout(kUnsupportedArchitecture, "Move DoubleRegister");
} }
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg, void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) { ValueType type) {
bailout(kUnsupportedArchitecture, "Spill register"); bailout(kUnsupportedArchitecture, "Spill register");
} }
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
bailout(kUnsupportedArchitecture, "Spill value"); bailout(kUnsupportedArchitecture, "Spill value");
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index, void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) { ValueType type) {
bailout(kUnsupportedArchitecture, "Fill"); bailout(kUnsupportedArchitecture, "Fill");
} }
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) { void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
bailout(kUnsupportedArchitecture, "FillI64Half"); bailout(kUnsupportedArchitecture, "FillI64Half");
} }
...@@ -164,16 +162,22 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { ...@@ -164,16 +162,22 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
// Special straight-line code for up to five slots. Generates two // Special straight-line code for up to five slots. Generates two
// instructions per slot. // instructions per slot.
for (uint32_t offset = 0; offset < count; ++offset) { for (uint32_t offset = 0; offset < count; ++offset) {
StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord)); StoreP(r0, liftoff::GetHalfStackSlot(
StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord)); GetStackOffsetFromIndex(index + offset), kLowWord));
StoreP(r0, liftoff::GetHalfStackSlot(
GetStackOffsetFromIndex(index + offset), kHighWord));
} }
} else { } else {
// General case for bigger counts (9 instructions). // General case for bigger counts (9 instructions).
// Use r3 for start address (inclusive), r4 for end address (exclusive). // Use r3 for start address (inclusive), r4 for end address (exclusive).
push(r3); push(r3);
push(r4); push(r4);
SubP(r3, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot))); SubP(r3, fp,
SubP(r4, fp, Operand(liftoff::GetStackSlotOffset(index) - kStackSlotSize)); Operand(liftoff::GetStackSlotOffset(
GetStackOffsetFromIndex(last_stack_slot))));
SubP(r4, fp,
Operand(liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index)) -
kStackSlotSize));
Label loop; Label loop;
bind(&loop); bind(&loop);
......
...@@ -39,8 +39,7 @@ constexpr int32_t kConstantStackSpace = 16; ...@@ -39,8 +39,7 @@ constexpr int32_t kConstantStackSpace = 16;
constexpr int32_t kFirstStackSlotOffset = constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize; kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline Operand GetStackSlot(uint32_t index) { inline Operand GetStackSlot(uint32_t offset) {
int32_t offset = index * LiftoffAssembler::kStackSlotSize;
return Operand(rbp, -kFirstStackSlotOffset - offset); return Operand(rbp, -kFirstStackSlotOffset - offset);
} }
...@@ -339,11 +338,11 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, ...@@ -339,11 +338,11 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
liftoff::Load(this, dst, src, type); liftoff::Load(this, dst, src, type);
} }
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index, void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) { ValueType type) {
DCHECK_NE(dst_index, src_index); DCHECK_NE(dst_offset, src_offset);
Operand src = liftoff::GetStackSlot(src_index); Operand dst = liftoff::GetStackSlot(dst_offset);
Operand dst = liftoff::GetStackSlot(dst_index); Operand src = liftoff::GetStackSlot(src_offset);
if (ValueTypes::ElementSizeLog2Of(type) == 2) { if (ValueTypes::ElementSizeLog2Of(type) == 2) {
movl(kScratchRegister, src); movl(kScratchRegister, src);
movl(dst, kScratchRegister); movl(dst, kScratchRegister);
...@@ -375,10 +374,10 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -375,10 +374,10 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
} }
} }
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg, void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) { ValueType type) {
RecordUsedSpillSlot(index); RecordUsedSpillSlot(offset);
Operand dst = liftoff::GetStackSlot(index); Operand dst = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
movl(dst, reg.gp()); movl(dst, reg.gp());
...@@ -397,9 +396,9 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg, ...@@ -397,9 +396,9 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
} }
} }
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(index); RecordUsedSpillSlot(offset);
Operand dst = liftoff::GetStackSlot(index); Operand dst = liftoff::GetStackSlot(offset);
switch (value.type()) { switch (value.type()) {
case kWasmI32: case kWasmI32:
movl(dst, Immediate(value.to_i32())); movl(dst, Immediate(value.to_i32()));
...@@ -424,9 +423,9 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) { ...@@ -424,9 +423,9 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
} }
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index, void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) { ValueType type) {
Operand src = liftoff::GetStackSlot(index); Operand src = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
movl(reg.gp(), src); movl(reg.gp(), src);
...@@ -445,20 +444,22 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index, ...@@ -445,20 +444,22 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
} }
} }
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) { void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
UNREACHABLE(); UNREACHABLE();
} }
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count); DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1; uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(last_stack_slot); RecordUsedSpillSlot(
LiftoffAssembler::GetStackOffsetFromIndex(last_stack_slot));
if (count <= 3) { if (count <= 3) {
// Special straight-line code for up to three slots // Special straight-line code for up to three slots
// (7-10 bytes per slot: REX C7 <1-4 bytes op> <4 bytes imm>). // (7-10 bytes per slot: REX C7 <1-4 bytes op> <4 bytes imm>).
for (uint32_t offset = 0; offset < count; ++offset) { for (uint32_t offset = 0; offset < count; ++offset) {
movq(liftoff::GetStackSlot(index + offset), Immediate(0)); movq(liftoff::GetStackSlot(GetStackOffsetFromIndex(index + offset)),
Immediate(0));
} }
} else { } else {
// General case for bigger counts. // General case for bigger counts.
...@@ -468,7 +469,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) { ...@@ -468,7 +469,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
pushq(rax); pushq(rax);
pushq(rcx); pushq(rcx);
pushq(rdi); pushq(rdi);
leaq(rdi, liftoff::GetStackSlot(last_stack_slot)); leaq(rdi, liftoff::GetStackSlot(GetStackOffsetFromIndex(last_stack_slot)));
xorl(rax, rax); xorl(rax, rax);
movl(rcx, Immediate(count)); movl(rcx, Immediate(count));
repstosq(); repstosq();
...@@ -1663,14 +1664,14 @@ void LiftoffStackSlots::Construct() { ...@@ -1663,14 +1664,14 @@ void LiftoffStackSlots::Construct() {
if (src.type() == kWasmI32) { if (src.type() == kWasmI32) {
// Load i32 values to a register first to ensure they are zero // Load i32 values to a register first to ensure they are zero
// extended. // extended.
asm_->movl(kScratchRegister, liftoff::GetStackSlot(slot.src_index_)); asm_->movl(kScratchRegister, liftoff::GetStackSlot(slot.src_offset_));
asm_->pushq(kScratchRegister); asm_->pushq(kScratchRegister);
} else { } else {
// For all other types, just push the whole (8-byte) stack slot. // For all other types, just push the whole (8-byte) stack slot.
// This is also ok for f32 values (even though we copy 4 uninitialized // This is also ok for f32 values (even though we copy 4 uninitialized
// bytes), because f32 and f64 values are clearly distinguished in // bytes), because f32 and f64 values are clearly distinguished in
// Turbofan, so the uninitialized bytes are never accessed. // Turbofan, so the uninitialized bytes are never accessed.
asm_->pushq(liftoff::GetStackSlot(slot.src_index_)); asm_->pushq(liftoff::GetStackSlot(slot.src_offset_));
} }
break; break;
case LiftoffAssembler::VarState::kRegister: case LiftoffAssembler::VarState::kRegister:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment