Commit 960badd1 authored by Zhi An Ng's avatar Zhi An Ng Committed by Commit Bot

Revert "[liftoff] Use stack slot offsets instead of indices"

This reverts commit 20727725.

Reason for revert: Many bugs/crashes, https://crbug.com/v8/9999 https://crbug.com/1026500 https://crbug.com/1026514

Original change's description:
> [liftoff] Use stack slot offsets instead of indices
> 
> Spill/fill now take offsets instead of indices. We provide a
> helper, GetStackOffsetFromIndex, for callers. This is currently only
> useful while slot sizes are still fixed to 8 bytes.
> 
> StackTransferRecipe's RegisterLoad now works in terms of offset.
> 
> LiftoffStackSlots work in terms of offset as well.
> 
> TransferStackSlot currently still works in terms of indicies, but can be
> converted to use offsets in a subsequent change.
> 
> Bug: v8:9909
> Change-Id: If54fb844309bdfd641720d063135dd59551813e0
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1922489
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Commit-Queue: Zhi An Ng <zhin@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#65049}

TBR=clemensb@chromium.org,zhin@chromium.org

Change-Id: I972b72346c87d1d55488911938e3f3cdbe69abe5
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:9909
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1925560Reviewed-by: 's avatarZhi An Ng <zhin@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65080}
parent 6b11b700
......@@ -46,18 +46,20 @@ constexpr int32_t kConstantStackSpace = kSystemPointerSize;
// Three instructions are required to sub a large constant, movw + movt + sub.
constexpr int32_t kPatchInstructionsRequired = 3;
inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset;
inline int GetStackSlotOffset(uint32_t index) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
}
inline MemOperand GetStackSlot(uint32_t offset) {
return MemOperand(fp, -GetStackSlotOffset(offset));
inline MemOperand GetStackSlot(uint32_t index) {
return MemOperand(fp, -GetStackSlotOffset(index));
}
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kFirstStackSlotOffset - offset + half_offset);
int32_t offset = kFirstStackSlotOffset +
index * LiftoffAssembler::kStackSlotSize - half_offset;
return MemOperand(fp, -offset);
}
inline MemOperand GetInstanceOperand() {
......@@ -537,12 +539,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
}
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
DCHECK_NE(dst_index, src_index);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
Fill(reg, src_index, type);
Spill(dst_index, reg, type);
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
......@@ -562,17 +564,17 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
}
}
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
RecordUsedSpillSlot(index);
MemOperand dst = liftoff::GetStackSlot(index);
switch (type) {
case kWasmI32:
str(reg.gp(), dst);
break;
case kWasmI64:
str(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
str(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
str(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
str(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
case kWasmF32:
vstr(liftoff::GetFloatRegister(reg.fp()), dst);
......@@ -585,9 +587,9 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
}
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
RecordUsedSpillSlot(index);
MemOperand dst = liftoff::GetStackSlot(index);
UseScratchRegisterScope temps(this);
Register src = no_reg;
// The scratch register will be required by str if multiple instructions
......@@ -605,10 +607,10 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
case kWasmI64: {
int32_t low_word = value.to_i64();
mov(src, Operand(low_word));
str(src, liftoff::GetHalfStackSlot(offset, kLowWord));
str(src, liftoff::GetHalfStackSlot(index, kLowWord));
int32_t high_word = value.to_i64() >> 32;
mov(src, Operand(high_word));
str(src, liftoff::GetHalfStackSlot(offset, kHighWord));
str(src, liftoff::GetHalfStackSlot(index, kHighWord));
break;
}
default:
......@@ -617,36 +619,36 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
ValueType type) {
switch (type) {
case kWasmI32:
ldr(reg.gp(), liftoff::GetStackSlot(offset));
ldr(reg.gp(), liftoff::GetStackSlot(index));
break;
case kWasmI64:
ldr(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
ldr(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
ldr(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
ldr(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
case kWasmF32:
vldr(liftoff::GetFloatRegister(reg.fp()), liftoff::GetStackSlot(offset));
vldr(liftoff::GetFloatRegister(reg.fp()), liftoff::GetStackSlot(index));
break;
case kWasmF64:
vldr(reg.fp(), liftoff::GetStackSlot(offset));
vldr(reg.fp(), liftoff::GetStackSlot(index));
break;
default:
UNREACHABLE();
}
}
void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset,
void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
RegPairHalf half) {
ldr(reg, liftoff::GetHalfStackSlot(offset, half));
ldr(reg, liftoff::GetHalfStackSlot(index, half));
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
RecordUsedSpillSlot(last_stack_slot);
// We need a zero reg. Always use r0 for that, and push it before to restore
// its value afterwards.
......@@ -657,22 +659,16 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
// Special straight-line code for up to five slots. Generates two
// instructions per slot.
for (uint32_t offset = 0; offset < count; ++offset) {
str(r0, liftoff::GetHalfStackSlot(GetStackOffsetFromIndex(index + offset),
kLowWord));
str(r0, liftoff::GetHalfStackSlot(GetStackOffsetFromIndex(index + offset),
kHighWord));
str(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord));
str(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord));
}
} else {
// General case for bigger counts (9 instructions).
// Use r1 for start address (inclusive), r2 for end address (exclusive).
push(r1);
push(r2);
sub(r1, fp,
Operand(liftoff::GetStackSlotOffset(
GetStackOffsetFromIndex(last_stack_slot))));
sub(r2, fp,
Operand(liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index)) -
kStackSlotSize));
sub(r1, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot)));
sub(r2, fp, Operand(liftoff::GetStackSlotOffset(index) - kStackSlotSize));
Label loop;
bind(&loop);
......@@ -1638,13 +1634,13 @@ void LiftoffStackSlots::Construct() {
UseScratchRegisterScope temps(asm_);
Register scratch = temps.Acquire();
asm_->ldr(scratch,
liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
liftoff::GetHalfStackSlot(slot.src_index_, slot.half_));
asm_->Push(scratch);
} break;
case kWasmF64: {
UseScratchRegisterScope temps(asm_);
DwVfpRegister scratch = temps.AcquireD();
asm_->vldr(scratch, liftoff::GetStackSlot(slot.src_offset_));
asm_->vldr(scratch, liftoff::GetStackSlot(slot.src_index_));
asm_->vpush(scratch);
} break;
default:
......
......@@ -43,12 +43,12 @@ constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + kSystemPointerSize;
constexpr int32_t kConstantStackSpace = 0;
inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset;
inline int GetStackSlotOffset(uint32_t index) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
}
inline MemOperand GetStackSlot(uint32_t offset) {
return MemOperand(fp, -GetStackSlotOffset(offset));
inline MemOperand GetStackSlot(uint32_t index) {
return MemOperand(fp, -GetStackSlotOffset(index));
}
inline MemOperand GetInstanceOperand() {
......@@ -327,12 +327,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
Ldr(liftoff::GetRegFromType(dst, type), MemOperand(fp, offset));
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
UseScratchRegisterScope temps(this);
CPURegister scratch = liftoff::AcquireByType(&temps, type);
Ldr(scratch, liftoff::GetStackSlot(src_offset));
Str(scratch, liftoff::GetStackSlot(dst_offset));
Ldr(scratch, liftoff::GetStackSlot(src_index));
Str(scratch, liftoff::GetStackSlot(dst_index));
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
......@@ -354,16 +354,16 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
}
}
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
RecordUsedSpillSlot(index);
MemOperand dst = liftoff::GetStackSlot(index);
Str(liftoff::GetRegFromType(reg, type), dst);
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
RecordUsedSpillSlot(index);
MemOperand dst = liftoff::GetStackSlot(index);
UseScratchRegisterScope temps(this);
CPURegister src = CPURegister::no_reg();
switch (value.type()) {
......@@ -390,41 +390,37 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
Str(src, dst);
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
ValueType type) {
MemOperand src = liftoff::GetStackSlot(offset);
MemOperand src = liftoff::GetStackSlot(index);
Ldr(liftoff::GetRegFromType(reg, type), src);
}
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
RecordUsedSpillSlot(last_stack_slot);
int max_stp_offset =
-liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index + count - 1));
int max_stp_offset = -liftoff::GetStackSlotOffset(index + count - 1);
if (count <= 12 && IsImmLSPair(max_stp_offset, kXRegSizeLog2)) {
// Special straight-line code for up to 12 slots. Generates one
// instruction per two slots (<= 6 instructions total).
for (; count > 1; count -= 2) {
STATIC_ASSERT(kStackSlotSize == kSystemPointerSize);
stp(xzr, xzr,
liftoff::GetStackSlot(GetStackOffsetFromIndex(index + count - 1)));
stp(xzr, xzr, liftoff::GetStackSlot(index + count - 1));
}
DCHECK(count == 0 || count == 1);
if (count) {
str(xzr, liftoff::GetStackSlot(GetStackOffsetFromIndex(index)));
}
if (count) str(xzr, liftoff::GetStackSlot(index));
} else {
// General case for bigger counts (5-8 instructions).
UseScratchRegisterScope temps(this);
Register address_reg = temps.AcquireX();
// This {Sub} might use another temp register if the offset is too large.
Sub(address_reg, fp, GetStackOffsetFromIndex(last_stack_slot));
Sub(address_reg, fp, liftoff::GetStackSlotOffset(last_stack_slot));
Register count_reg = temps.AcquireX();
Mov(count_reg, count);
......@@ -1108,7 +1104,7 @@ void LiftoffStackSlots::Construct() {
case LiftoffAssembler::VarState::kStack: {
UseScratchRegisterScope temps(asm_);
CPURegister scratch = liftoff::AcquireByType(&temps, slot.src_.type());
asm_->Ldr(scratch, liftoff::GetStackSlot(slot.src_offset_));
asm_->Ldr(scratch, liftoff::GetStackSlot(slot.src_index_));
asm_->Poke(scratch, poke_offset);
break;
}
......
......@@ -26,14 +26,16 @@ constexpr int32_t kConstantStackSpace = 8;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline Operand GetStackSlot(uint32_t offset) {
inline Operand GetStackSlot(uint32_t index) {
int32_t offset = index * LiftoffAssembler::kStackSlotSize;
return Operand(ebp, -kFirstStackSlotOffset - offset);
}
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return Operand(ebp, -kFirstStackSlotOffset - offset + half_offset);
int32_t offset = index * LiftoffAssembler::kStackSlotSize - half_offset;
return Operand(ebp, -kFirstStackSlotOffset - offset);
}
// TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
......@@ -404,18 +406,18 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
if (needs_reg_pair(type)) {
liftoff::MoveStackValue(this,
liftoff::GetHalfStackSlot(src_offset, kLowWord),
liftoff::GetHalfStackSlot(dst_offset, kLowWord));
liftoff::GetHalfStackSlot(src_index, kLowWord),
liftoff::GetHalfStackSlot(dst_index, kLowWord));
liftoff::MoveStackValue(this,
liftoff::GetHalfStackSlot(src_offset, kHighWord),
liftoff::GetHalfStackSlot(dst_offset, kHighWord));
liftoff::GetHalfStackSlot(src_index, kHighWord),
liftoff::GetHalfStackSlot(dst_index, kHighWord));
} else {
liftoff::MoveStackValue(this, liftoff::GetStackSlot(src_offset),
liftoff::GetStackSlot(dst_offset));
liftoff::MoveStackValue(this, liftoff::GetStackSlot(src_index),
liftoff::GetStackSlot(dst_index));
}
}
......@@ -436,17 +438,17 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
}
}
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(offset);
Operand dst = liftoff::GetStackSlot(offset);
RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
switch (type) {
case kWasmI32:
mov(dst, reg.gp());
break;
case kWasmI64:
mov(liftoff::GetHalfStackSlot(offset, kLowWord), reg.low_gp());
mov(liftoff::GetHalfStackSlot(offset, kHighWord), reg.high_gp());
mov(liftoff::GetHalfStackSlot(index, kLowWord), reg.low_gp());
mov(liftoff::GetHalfStackSlot(index, kHighWord), reg.high_gp());
break;
case kWasmF32:
movss(dst, reg.fp());
......@@ -459,9 +461,9 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
}
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
Operand dst = liftoff::GetStackSlot(offset);
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
switch (value.type()) {
case kWasmI32:
mov(dst, Immediate(value.to_i32()));
......@@ -469,8 +471,8 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
case kWasmI64: {
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
mov(liftoff::GetHalfStackSlot(offset, kLowWord), Immediate(low_word));
mov(liftoff::GetHalfStackSlot(offset, kHighWord), Immediate(high_word));
mov(liftoff::GetHalfStackSlot(index, kLowWord), Immediate(low_word));
mov(liftoff::GetHalfStackSlot(index, kHighWord), Immediate(high_word));
break;
}
default:
......@@ -479,16 +481,16 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
ValueType type) {
Operand src = liftoff::GetStackSlot(offset);
Operand src = liftoff::GetStackSlot(index);
switch (type) {
case kWasmI32:
mov(reg.gp(), src);
break;
case kWasmI64:
mov(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
mov(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
mov(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
mov(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
case kWasmF32:
movss(reg.fp(), src);
......@@ -501,26 +503,22 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
}
}
void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset,
void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
RegPairHalf half) {
mov(reg, liftoff::GetHalfStackSlot(offset, half));
mov(reg, liftoff::GetHalfStackSlot(index, half));
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
RecordUsedSpillSlot(last_stack_slot);
if (count <= 2) {
// Special straight-line code for up to two slots (6-9 bytes per word:
// C7 <1-4 bytes operand> <4 bytes imm>, makes 12-18 bytes per slot).
for (uint32_t offset = 0; offset < count; ++offset) {
mov(liftoff::GetHalfStackSlot(GetStackOffsetFromIndex(index + offset),
kLowWord),
Immediate(0));
mov(liftoff::GetHalfStackSlot(GetStackOffsetFromIndex(index + offset),
kHighWord),
Immediate(0));
mov(liftoff::GetHalfStackSlot(index + offset, kLowWord), Immediate(0));
mov(liftoff::GetHalfStackSlot(index + offset, kHighWord), Immediate(0));
}
} else {
// General case for bigger counts.
......@@ -530,7 +528,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
push(eax);
push(ecx);
push(edi);
lea(edi, liftoff::GetStackSlot(GetStackOffsetFromIndex(last_stack_slot)));
lea(edi, liftoff::GetStackSlot(last_stack_slot));
xor_(eax, eax);
// Number of words is number of slots times two.
mov(ecx, Immediate(count * 2));
......@@ -1933,9 +1931,9 @@ void LiftoffStackSlots::Construct() {
case LiftoffAssembler::VarState::kStack:
if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, slot.half_);
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
asm_->push(liftoff::GetHalfStackSlot(slot.src_index_, kHighWord));
}
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
asm_->push(liftoff::GetHalfStackSlot(slot.src_index_, slot.half_));
break;
case LiftoffAssembler::VarState::kRegister:
if (src.type() == kWasmI64) {
......
......@@ -42,7 +42,7 @@ class StackTransferRecipe {
LoadKind kind;
ValueType type;
int32_t value; // i32 constant value or stack offset, depending on kind.
int32_t value; // i32 constant value or stack index, depending on kind.
// Named constructors.
static RegisterLoad Const(WasmValue constant) {
......@@ -53,12 +53,12 @@ class StackTransferRecipe {
DCHECK_EQ(constant.to_i32_unchecked(), constant.to_i64_unchecked());
return {kConstant, kWasmI64, constant.to_i32_unchecked()};
}
static RegisterLoad Stack(int32_t offset, ValueType type) {
return {kStack, type, offset};
static RegisterLoad Stack(int32_t stack_index, ValueType type) {
return {kStack, type, stack_index};
}
static RegisterLoad HalfStack(int32_t offset, RegPairHalf half) {
static RegisterLoad HalfStack(int32_t stack_index, RegPairHalf half) {
return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kWasmI32,
offset};
stack_index};
}
private:
......@@ -91,23 +91,18 @@ class StackTransferRecipe {
switch (src.loc()) {
case VarState::kStack:
if (src_index == dst_index) break;
asm_->MoveStackValue(asm_->GetStackOffsetFromIndex(dst_index),
asm_->GetStackOffsetFromIndex(src_index),
src.type());
asm_->MoveStackValue(dst_index, src_index, src.type());
break;
case VarState::kRegister:
asm_->Spill(asm_->GetStackOffsetFromIndex(dst_index), src.reg(),
src.type());
asm_->Spill(dst_index, src.reg(), src.type());
break;
case VarState::kIntConst:
asm_->Spill(asm_->GetStackOffsetFromIndex(dst_index),
src.constant());
asm_->Spill(dst_index, src.constant());
break;
}
break;
case VarState::kRegister:
LoadIntoRegister(dst.reg(), src,
asm_->GetStackOffsetFromIndex(src_index));
LoadIntoRegister(dst.reg(), src, src_index);
break;
case VarState::kIntConst:
DCHECK_EQ(dst, src);
......@@ -117,10 +112,10 @@ class StackTransferRecipe {
void LoadIntoRegister(LiftoffRegister dst,
const LiftoffAssembler::VarState& src,
uint32_t src_offset) {
uint32_t src_index) {
switch (src.loc()) {
case VarState::kStack:
LoadStackSlot(dst, src_offset, src.type());
LoadStackSlot(dst, src_index, src.type());
break;
case VarState::kRegister:
DCHECK_EQ(dst.reg_class(), src.reg_class());
......@@ -134,14 +129,14 @@ class StackTransferRecipe {
void LoadI64HalfIntoRegister(LiftoffRegister dst,
const LiftoffAssembler::VarState& src,
uint32_t offset, RegPairHalf half) {
uint32_t index, RegPairHalf half) {
// Use CHECK such that the remaining code is statically dead if
// {kNeedI64RegPair} is false.
CHECK(kNeedI64RegPair);
DCHECK_EQ(kWasmI64, src.type());
switch (src.loc()) {
case VarState::kStack:
LoadI64HalfStackSlot(dst, offset, half);
LoadI64HalfStackSlot(dst, index, half);
break;
case VarState::kRegister: {
LiftoffRegister src_half =
......@@ -199,7 +194,7 @@ class StackTransferRecipe {
}
}
void LoadStackSlot(LiftoffRegister dst, uint32_t stack_offset,
void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index,
ValueType type) {
if (load_dst_regs_.has(dst)) {
// It can happen that we spilled the same register to different stack
......@@ -211,15 +206,15 @@ class StackTransferRecipe {
if (dst.is_pair()) {
DCHECK_EQ(kWasmI64, type);
*register_load(dst.low()) =
RegisterLoad::HalfStack(stack_offset, kLowWord);
RegisterLoad::HalfStack(stack_index, kLowWord);
*register_load(dst.high()) =
RegisterLoad::HalfStack(stack_offset, kHighWord);
RegisterLoad::HalfStack(stack_index, kHighWord);
} else {
*register_load(dst) = RegisterLoad::Stack(stack_offset, type);
*register_load(dst) = RegisterLoad::Stack(stack_index, type);
}
}
void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t offset,
void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t stack_index,
RegPairHalf half) {
if (load_dst_regs_.has(dst)) {
// It can happen that we spilled the same register to different stack
......@@ -228,7 +223,7 @@ class StackTransferRecipe {
return;
}
load_dst_regs_.set(dst);
*register_load(dst) = RegisterLoad::HalfStack(offset, half);
*register_load(dst) = RegisterLoad::HalfStack(stack_index, half);
}
private:
......@@ -299,11 +294,9 @@ class StackTransferRecipe {
LiftoffRegister dst = move_dst_regs_.GetFirstRegSet();
RegisterMove* move = register_move(dst);
LiftoffRegister spill_reg = move->src;
asm_->Spill(LiftoffAssembler::GetStackOffsetFromIndex(next_spill_slot),
spill_reg, move->type);
asm_->Spill(next_spill_slot, spill_reg, move->type);
// Remember to reload into the destination register later.
LoadStackSlot(dst, asm_->GetStackOffsetFromIndex(next_spill_slot),
move->type);
LoadStackSlot(dst, next_spill_slot, move->type);
++next_spill_slot;
ClearExecutedMove(dst);
}
......@@ -504,8 +497,7 @@ LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
case VarState::kStack: {
LiftoffRegister reg =
GetUnusedRegister(reg_class_for(slot.type()), pinned);
Fill(reg, GetStackOffsetFromIndex(cache_state_.stack_height()),
slot.type());
Fill(reg, cache_state_.stack_height(), slot.type());
return reg;
}
case VarState::kRegister:
......@@ -562,11 +554,11 @@ void LiftoffAssembler::Spill(uint32_t index) {
case VarState::kStack:
return;
case VarState::kRegister:
Spill(GetStackOffsetFromIndex(index), slot.reg(), slot.type());
Spill(index, slot.reg(), slot.type());
cache_state_.dec_used(slot.reg());
break;
case VarState::kIntConst:
Spill(GetStackOffsetFromIndex(index), slot.constant());
Spill(index, slot.constant());
break;
}
slot.MakeStack();
......@@ -582,7 +574,7 @@ void LiftoffAssembler::SpillAllRegisters() {
for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
auto& slot = cache_state_.stack_state[i];
if (!slot.is_reg()) continue;
Spill(GetStackOffsetFromIndex(i), slot.reg(), slot.type());
Spill(i, slot.reg(), slot.type());
slot.MakeStack();
}
cache_state_.reset_used_registers();
......@@ -602,7 +594,7 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
idx < end; ++idx) {
VarState& slot = cache_state_.stack_state[idx];
if (!slot.is_reg()) continue;
Spill(GetStackOffsetFromIndex(idx), slot.reg(), slot.type());
Spill(idx, slot.reg(), slot.type());
slot.MakeStack();
}
......@@ -636,7 +628,6 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
const bool is_pair = kNeedI64RegPair && type == kWasmI64;
const int num_lowered_params = is_pair ? 2 : 1;
const uint32_t stack_idx = param_base + param;
const uint32_t stack_offset = GetStackOffsetFromIndex(stack_idx);
const VarState& slot = cache_state_.stack_state[stack_idx];
// Process both halfs of a register pair separately, because they are passed
// as separate parameters. One or both of them could end up on the stack.
......@@ -663,14 +654,13 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
#endif
param_regs.set(reg);
if (is_pair) {
stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_offset,
half);
stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_idx, half);
} else {
stack_transfers.LoadIntoRegister(reg, slot, stack_offset);
stack_transfers.LoadIntoRegister(reg, slot, stack_idx);
}
} else {
DCHECK(loc.IsCallerFrameSlot());
stack_slots.Add(slot, stack_offset, half);
stack_slots.Add(slot, stack_idx, half);
}
}
}
......@@ -775,9 +765,8 @@ void LiftoffAssembler::MoveToReturnRegisters(FunctionSig* sig) {
: reg_class_for(return_type) == kGpReg
? LiftoffRegister(kGpReturnRegisters[0])
: LiftoffRegister(kFpReturnRegisters[0]);
stack_transfers.LoadIntoRegister(
return_reg, cache_state_.stack_state.back(),
GetStackOffsetFromIndex(cache_state_.stack_height() - 1));
stack_transfers.LoadIntoRegister(return_reg, cache_state_.stack_state.back(),
cache_state_.stack_height() - 1);
}
#ifdef ENABLE_SLOW_DCHECKS
......@@ -831,7 +820,7 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
cache_state_.dec_used(slot->reg().low());
cache_state_.dec_used(slot->reg().high());
}
Spill(GetStackOffsetFromIndex(idx), slot->reg(), slot->type());
Spill(idx, slot->reg(), slot->type());
slot->MakeStack();
if (--remaining_uses == 0) break;
}
......
......@@ -38,16 +38,6 @@ class LiftoffAssembler : public TurboAssembler {
static constexpr ValueType kWasmIntPtr =
kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
// TODO(zhin): Temporary while migrating away from fixed slot sizes.
inline static constexpr uint32_t SlotSizeForType(ValueType type) {
return kStackSlotSize;
}
// TODO(zhin): Temporary for migration from index to offset.
inline static uint32_t GetStackOffsetFromIndex(uint32_t index) {
return index * LiftoffAssembler::kStackSlotSize;
}
class VarState {
public:
enum Location : uint8_t { kStack, kRegister, kIntConst };
......@@ -113,18 +103,6 @@ class LiftoffAssembler : public TurboAssembler {
void MakeStack() { loc_ = kStack; }
// Copy src to this, except for offset, since src and this could have been
// from different stack states.
void Copy(VarState src) {
loc_ = src.loc();
type_ = src.type();
if (loc_ == kRegister) {
reg_ = src.reg();
} else if (loc_ == kIntConst) {
i32_const_ = src.i32_const();
}
}
private:
Location loc_;
// TODO(wasm): This is redundant, the decoder already knows the type of each
......@@ -281,29 +259,29 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
uint32_t NextSpillOffset(ValueType type) {
uint32_t NextSpillOffset() {
if (cache_state_.stack_state.empty()) {
return SlotSizeForType(type);
return 0;
}
VarState last = cache_state_.stack_state.back();
uint32_t offset = last.offset() + SlotSizeForType(type);
uint32_t offset =
last.offset() + ValueTypes::ElementSizeInBytes(last.type());
return offset;
}
void PushRegister(ValueType type, LiftoffRegister reg) {
DCHECK_EQ(reg_class_for(type), reg.reg_class());
cache_state_.inc_used(reg);
cache_state_.stack_state.emplace_back(type, reg, NextSpillOffset(type));
cache_state_.stack_state.emplace_back(type, reg, NextSpillOffset());
}
void PushConstant(ValueType type, int32_t i32_const) {
DCHECK(type == kWasmI32 || type == kWasmI64);
cache_state_.stack_state.emplace_back(type, i32_const,
NextSpillOffset(type));
cache_state_.stack_state.emplace_back(type, i32_const, NextSpillOffset());
}
void PushStack(ValueType type) {
cache_state_.stack_state.emplace_back(type, NextSpillOffset(type));
cache_state_.stack_state.emplace_back(type, NextSpillOffset());
}
void SpillRegister(LiftoffRegister);
......@@ -355,9 +333,7 @@ class LiftoffAssembler : public TurboAssembler {
// Call this method whenever spilling something, such that the number of used
// spill slot can be tracked and the stack frame will be allocated big enough.
void RecordUsedSpillSlot(uint32_t offset) {
// TODO(zhin): Temporary for migration from index to offset.
uint32_t index = offset / kStackSlotSize;
void RecordUsedSpillSlot(uint32_t index) {
if (index >= num_used_spill_slots_) num_used_spill_slots_ = index + 1;
}
......@@ -425,18 +401,17 @@ class LiftoffAssembler : public TurboAssembler {
bool is_store_mem = false);
inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
ValueType);
inline void MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType);
inline void MoveStackValue(uint32_t dst_index, uint32_t src_index, ValueType);
inline void Move(Register dst, Register src, ValueType);
inline void Move(DoubleRegister dst, DoubleRegister src, ValueType);
inline void Spill(uint32_t offset, LiftoffRegister, ValueType);
inline void Spill(uint32_t offset, WasmValue);
inline void Fill(LiftoffRegister, uint32_t offset, ValueType);
inline void Spill(uint32_t index, LiftoffRegister, ValueType);
inline void Spill(uint32_t index, WasmValue);
inline void Fill(LiftoffRegister, uint32_t index, ValueType);
// Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
// 4 bytes on the stack holding half of a 64-bit value.
inline void FillI64Half(Register, uint32_t offset, RegPairHalf);
inline void FillI64Half(Register, uint32_t index, RegPairHalf);
inline void FillStackSlotsWithZero(uint32_t index, uint32_t count);
// i32 binops.
......@@ -835,9 +810,9 @@ class LiftoffStackSlots {
public:
explicit LiftoffStackSlots(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
void Add(const LiftoffAssembler::VarState& src, uint32_t src_offset,
void Add(const LiftoffAssembler::VarState& src, uint32_t src_index,
RegPairHalf half) {
slots_.emplace_back(src, src_offset, half);
slots_.emplace_back(src, src_index, half);
}
void Add(const LiftoffAssembler::VarState& src) { slots_.emplace_back(src); }
......@@ -847,14 +822,14 @@ class LiftoffStackSlots {
struct Slot {
// Allow move construction.
Slot(Slot&&) V8_NOEXCEPT = default;
Slot(const LiftoffAssembler::VarState& src, uint32_t src_offset,
Slot(const LiftoffAssembler::VarState& src, uint32_t src_index,
RegPairHalf half)
: src_(src), src_offset_(src_offset), half_(half) {}
: src_(src), src_index_(src_index), half_(half) {}
explicit Slot(const LiftoffAssembler::VarState& src)
: src_(src), half_(kLowWord) {}
const LiftoffAssembler::VarState src_;
uint32_t src_offset_ = 0;
uint32_t src_index_ = 0;
RegPairHalf half_;
};
......
......@@ -1292,7 +1292,7 @@ class LiftoffCompiler {
case kStack: {
auto rc = reg_class_for(imm.type);
LiftoffRegister reg = __ GetUnusedRegister(rc);
__ Fill(reg, __ GetStackOffsetFromIndex(imm.index), imm.type);
__ Fill(reg, imm.index, imm.type);
__ PushRegister(slot.type(), reg);
break;
}
......@@ -1306,8 +1306,7 @@ class LiftoffCompiler {
if (dst_slot->is_reg()) {
LiftoffRegister slot_reg = dst_slot->reg();
if (state.get_use_count(slot_reg) == 1) {
__ Fill(dst_slot->reg(),
__ GetStackOffsetFromIndex(state.stack_height() - 1), type);
__ Fill(dst_slot->reg(), state.stack_height() - 1, type);
return;
}
state.dec_used(slot_reg);
......@@ -1316,9 +1315,7 @@ class LiftoffCompiler {
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
__ Fill(dst_reg,
__ GetStackOffsetFromIndex(__ cache_state()->stack_height() - 1),
type);
__ Fill(dst_reg, __ cache_state()->stack_height() - 1, type);
*dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset());
__ cache_state()->inc_used(dst_reg);
}
......@@ -1330,12 +1327,12 @@ class LiftoffCompiler {
switch (source_slot.loc()) {
case kRegister:
if (target_slot.is_reg()) state.dec_used(target_slot.reg());
target_slot.Copy(source_slot);
target_slot = source_slot;
if (is_tee) state.inc_used(target_slot.reg());
break;
case kIntConst:
if (target_slot.is_reg()) state.dec_used(target_slot.reg());
target_slot.Copy(source_slot);
target_slot = source_slot;
break;
case kStack:
LocalSetFromStackSlot(&target_slot, local_index);
......
......@@ -49,18 +49,19 @@ constexpr int32_t kConstantStackSpace = 8;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset;
inline int GetStackSlotOffset(uint32_t index) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
}
inline MemOperand GetStackSlot(uint32_t offset) {
return MemOperand(fp, -GetStackSlotOffset(offset));
inline MemOperand GetStackSlot(uint32_t index) {
return MemOperand(fp, -GetStackSlotOffset(index));
}
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kFirstStackSlotOffset - offset + half_offset);
int32_t offset = index * LiftoffAssembler::kStackSlotSize + half_offset;
return MemOperand(fp, -kFirstStackSlotOffset - offset);
}
inline MemOperand GetInstanceOperand() { return MemOperand(fp, -8); }
......@@ -521,12 +522,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
liftoff::Load(this, dst, fp, offset, type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
DCHECK_NE(dst_index, src_index);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
Fill(reg, src_index, type);
Spill(dst_index, reg, type);
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
......@@ -540,17 +541,17 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
TurboAssembler::Move(dst, src);
}
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
RecordUsedSpillSlot(index);
MemOperand dst = liftoff::GetStackSlot(index);
switch (type) {
case kWasmI32:
sw(reg.gp(), dst);
break;
case kWasmI64:
sw(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
sw(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
sw(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
sw(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
case kWasmF32:
swc1(reg.fp(), dst);
......@@ -563,9 +564,9 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
}
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
RecordUsedSpillSlot(index);
MemOperand dst = liftoff::GetStackSlot(index);
switch (value.type()) {
case kWasmI32: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg);
......@@ -581,8 +582,8 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
TurboAssembler::li(tmp.low_gp(), Operand(low_word));
TurboAssembler::li(tmp.high_gp(), Operand(high_word));
sw(tmp.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
sw(tmp.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
sw(tmp.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
sw(tmp.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
}
default:
......@@ -592,16 +593,16 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
ValueType type) {
MemOperand src = liftoff::GetStackSlot(offset);
MemOperand src = liftoff::GetStackSlot(index);
switch (type) {
case kWasmI32:
lw(reg.gp(), src);
break;
case kWasmI64:
lw(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
lw(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
lw(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
lw(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
case kWasmF32:
lwc1(reg.fp(), src);
......@@ -614,33 +615,28 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
}
}
void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset,
void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
RegPairHalf half) {
lw(reg, liftoff::GetHalfStackSlot(offset, half));
lw(reg, liftoff::GetHalfStackSlot(index, half));
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
RecordUsedSpillSlot(last_stack_slot);
if (count <= 12) {
// Special straight-line code for up to 12 slots. Generates one
// instruction per slot (<=12 instructions total).
for (uint32_t offset = 0; offset < count; ++offset) {
Sw(zero_reg,
liftoff::GetStackSlot(GetStackOffsetFromIndex(index + offset)));
Sw(zero_reg, liftoff::GetStackSlot(index + offset));
}
} else {
// General case for bigger counts (12 instructions).
// Use a0 for start address (inclusive), a1 for end address (exclusive).
Push(a1, a0);
Addu(a0, fp,
Operand(-liftoff::GetStackSlotOffset(
GetStackOffsetFromIndex(last_stack_slot))));
Addu(a1, fp,
Operand(-liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index)) +
kStackSlotSize));
Addu(a0, fp, Operand(-liftoff::GetStackSlotOffset(last_stack_slot)));
Addu(a1, fp, Operand(-liftoff::GetStackSlotOffset(index) + kStackSlotSize));
Label loop;
bind(&loop);
......@@ -1614,11 +1610,11 @@ void LiftoffStackSlots::Construct() {
if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, slot.half_);
asm_->lw(kScratchReg,
liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
liftoff::GetHalfStackSlot(slot.src_index_, kHighWord));
asm_->push(kScratchReg);
}
asm_->lw(kScratchReg,
liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
liftoff::GetHalfStackSlot(slot.src_index_, slot.half_));
asm_->push(kScratchReg);
break;
}
......
......@@ -45,12 +45,12 @@ constexpr int32_t kConstantStackSpace = 16;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset;
inline int GetStackSlotOffset(uint32_t index) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
}
inline MemOperand GetStackSlot(uint32_t offset) {
return MemOperand(fp, -GetStackSlotOffset(offset));
inline MemOperand GetStackSlot(uint32_t index) {
return MemOperand(fp, -GetStackSlotOffset(index));
}
inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); }
......@@ -437,12 +437,12 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
DCHECK_NE(dst_index, src_index);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
Fill(reg, src_index, type);
Spill(dst_index, reg, type);
}
void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
......@@ -457,10 +457,10 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
TurboAssembler::Move(dst, src);
}
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
RecordUsedSpillSlot(index);
MemOperand dst = liftoff::GetStackSlot(index);
switch (type) {
case kWasmI32:
Sw(reg.gp(), dst);
......@@ -479,9 +479,9 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
}
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
RecordUsedSpillSlot(index);
MemOperand dst = liftoff::GetStackSlot(index);
switch (value.type()) {
case kWasmI32: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg);
......@@ -502,9 +502,9 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
ValueType type) {
MemOperand src = liftoff::GetStackSlot(offset);
MemOperand src = liftoff::GetStackSlot(index);
switch (type) {
case kWasmI32:
Lw(reg.gp(), src);
......@@ -523,32 +523,28 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
}
}
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(GetStackOffsetFromIndex(last_stack_slot));
RecordUsedSpillSlot(last_stack_slot);
if (count <= 12) {
// Special straight-line code for up to 12 slots. Generates one
// instruction per slot (<= 12 instructions total).
for (uint32_t offset = 0; offset < count; ++offset) {
Sd(zero_reg,
liftoff::GetStackSlot(GetStackOffsetFromIndex(index + offset)));
Sd(zero_reg, liftoff::GetStackSlot(index + offset));
}
} else {
// General case for bigger counts (12 instructions).
// Use a0 for start address (inclusive), a1 for end address (exclusive).
Push(a1, a0);
Daddu(a0, fp,
Operand(-liftoff::GetStackSlotOffset(
GetStackOffsetFromIndex(last_stack_slot))));
Daddu(a0, fp, Operand(-liftoff::GetStackSlotOffset(last_stack_slot)));
Daddu(a1, fp,
Operand(-liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index)) +
kStackSlotSize));
Operand(-liftoff::GetStackSlotOffset(index) + kStackSlotSize));
Label loop;
bind(&loop);
......@@ -1403,7 +1399,7 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
asm_->ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
asm_->ld(kScratchReg, liftoff::GetStackSlot(slot.src_index_));
asm_->push(kScratchReg);
break;
case LiftoffAssembler::VarState::kRegister:
......
......@@ -41,14 +41,16 @@ constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset =
kInstanceOffset + 2 * kSystemPointerSize;
inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset;
inline int GetStackSlotOffset(uint32_t index) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
}
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kFirstStackSlotOffset - offset + half_offset);
int32_t offset = kFirstStackSlotOffset +
index * LiftoffAssembler::kStackSlotSize - half_offset;
return MemOperand(fp, -offset);
}
} // namespace liftoff
......@@ -117,7 +119,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
bailout(kUnsupportedArchitecture, "MoveStackValue");
}
......@@ -131,21 +133,21 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
bailout(kUnsupportedArchitecture, "Move DoubleRegister");
}
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
ValueType type) {
bailout(kUnsupportedArchitecture, "Spill register");
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
bailout(kUnsupportedArchitecture, "Spill value");
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
ValueType type) {
bailout(kUnsupportedArchitecture, "Fill");
}
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
bailout(kUnsupportedArchitecture, "FillI64Half");
}
......@@ -163,22 +165,16 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
// Special straight-line code for up to five slots. Generates two
// instructions per slot.
for (uint32_t offset = 0; offset < count; ++offset) {
StoreP(r0, liftoff::GetHalfStackSlot(
GetStackOffsetFromIndex(index + offset), kLowWord));
StoreP(r0, liftoff::GetHalfStackSlot(
GetStackOffsetFromIndex(index + offset), kHighWord));
StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord));
StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord));
}
} else {
// General case for bigger counts (9 instructions).
// Use r4 for start address (inclusive), r5 for end address (exclusive).
push(r4);
push(r5);
subi(r4, fp,
Operand(liftoff::GetStackSlotOffset(
GetStackOffsetFromIndex(last_stack_slot))));
subi(r5, fp,
Operand(liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index)) -
kStackSlotSize));
subi(r4, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot)));
subi(r5, fp, Operand(liftoff::GetStackSlotOffset(index) - kStackSlotSize));
Label loop;
bind(&loop);
......
......@@ -40,14 +40,16 @@ constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset =
kInstanceOffset + 2 * kSystemPointerSize;
inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset;
inline int GetStackSlotOffset(uint32_t index) {
return kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
}
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kFirstStackSlotOffset - offset + half_offset);
int32_t offset = kFirstStackSlotOffset +
index * LiftoffAssembler::kStackSlotSize - half_offset;
return MemOperand(fp, -offset);
}
} // namespace liftoff
......@@ -116,7 +118,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
bailout(kUnsupportedArchitecture, "MoveStackValue");
}
......@@ -130,21 +132,21 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
bailout(kUnsupportedArchitecture, "Move DoubleRegister");
}
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
ValueType type) {
bailout(kUnsupportedArchitecture, "Spill register");
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
bailout(kUnsupportedArchitecture, "Spill value");
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
ValueType type) {
bailout(kUnsupportedArchitecture, "Fill");
}
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
bailout(kUnsupportedArchitecture, "FillI64Half");
}
......@@ -162,22 +164,16 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
// Special straight-line code for up to five slots. Generates two
// instructions per slot.
for (uint32_t offset = 0; offset < count; ++offset) {
StoreP(r0, liftoff::GetHalfStackSlot(
GetStackOffsetFromIndex(index + offset), kLowWord));
StoreP(r0, liftoff::GetHalfStackSlot(
GetStackOffsetFromIndex(index + offset), kHighWord));
StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kLowWord));
StoreP(r0, liftoff::GetHalfStackSlot(index + offset, kHighWord));
}
} else {
// General case for bigger counts (9 instructions).
// Use r3 for start address (inclusive), r4 for end address (exclusive).
push(r3);
push(r4);
SubP(r3, fp,
Operand(liftoff::GetStackSlotOffset(
GetStackOffsetFromIndex(last_stack_slot))));
SubP(r4, fp,
Operand(liftoff::GetStackSlotOffset(GetStackOffsetFromIndex(index)) -
kStackSlotSize));
SubP(r3, fp, Operand(liftoff::GetStackSlotOffset(last_stack_slot)));
SubP(r4, fp, Operand(liftoff::GetStackSlotOffset(index) - kStackSlotSize));
Label loop;
bind(&loop);
......
......@@ -39,7 +39,8 @@ constexpr int32_t kConstantStackSpace = 16;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline Operand GetStackSlot(uint32_t offset) {
inline Operand GetStackSlot(uint32_t index) {
int32_t offset = index * LiftoffAssembler::kStackSlotSize;
return Operand(rbp, -kFirstStackSlotOffset - offset);
}
......@@ -338,11 +339,11 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
Operand dst = liftoff::GetStackSlot(dst_offset);
Operand src = liftoff::GetStackSlot(src_offset);
DCHECK_NE(dst_index, src_index);
Operand src = liftoff::GetStackSlot(src_index);
Operand dst = liftoff::GetStackSlot(dst_index);
if (ValueTypes::ElementSizeLog2Of(type) == 2) {
movl(kScratchRegister, src);
movl(dst, kScratchRegister);
......@@ -374,10 +375,10 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
}
}
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
ValueType type) {
RecordUsedSpillSlot(offset);
Operand dst = liftoff::GetStackSlot(offset);
RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
switch (type) {
case kWasmI32:
movl(dst, reg.gp());
......@@ -396,9 +397,9 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
}
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
RecordUsedSpillSlot(offset);
Operand dst = liftoff::GetStackSlot(offset);
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
RecordUsedSpillSlot(index);
Operand dst = liftoff::GetStackSlot(index);
switch (value.type()) {
case kWasmI32:
movl(dst, Immediate(value.to_i32()));
......@@ -423,9 +424,9 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
ValueType type) {
Operand src = liftoff::GetStackSlot(offset);
Operand src = liftoff::GetStackSlot(index);
switch (type) {
case kWasmI32:
movl(reg.gp(), src);
......@@ -444,22 +445,20 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
}
}
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
DCHECK_LT(0, count);
uint32_t last_stack_slot = index + count - 1;
RecordUsedSpillSlot(
LiftoffAssembler::GetStackOffsetFromIndex(last_stack_slot));
RecordUsedSpillSlot(last_stack_slot);
if (count <= 3) {
// Special straight-line code for up to three slots
// (7-10 bytes per slot: REX C7 <1-4 bytes op> <4 bytes imm>).
for (uint32_t offset = 0; offset < count; ++offset) {
movq(liftoff::GetStackSlot(GetStackOffsetFromIndex(index + offset)),
Immediate(0));
movq(liftoff::GetStackSlot(index + offset), Immediate(0));
}
} else {
// General case for bigger counts.
......@@ -469,7 +468,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t index, uint32_t count) {
pushq(rax);
pushq(rcx);
pushq(rdi);
leaq(rdi, liftoff::GetStackSlot(GetStackOffsetFromIndex(last_stack_slot)));
leaq(rdi, liftoff::GetStackSlot(last_stack_slot));
xorl(rax, rax);
movl(rcx, Immediate(count));
repstosq();
......@@ -1664,14 +1663,14 @@ void LiftoffStackSlots::Construct() {
if (src.type() == kWasmI32) {
// Load i32 values to a register first to ensure they are zero
// extended.
asm_->movl(kScratchRegister, liftoff::GetStackSlot(slot.src_offset_));
asm_->movl(kScratchRegister, liftoff::GetStackSlot(slot.src_index_));
asm_->pushq(kScratchRegister);
} else {
// For all other types, just push the whole (8-byte) stack slot.
// This is also ok for f32 values (even though we copy 4 uninitialized
// bytes), because f32 and f64 values are clearly distinguished in
// Turbofan, so the uninitialized bytes are never accessed.
asm_->pushq(liftoff::GetStackSlot(slot.src_offset_));
asm_->pushq(liftoff::GetStackSlot(slot.src_index_));
}
break;
case LiftoffAssembler::VarState::kRegister:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment