Commit 8e2cfc89 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[Liftoff] Use {int} for stack offsets

To follow the style guide, and potentially improve performance, we
switch to {int} for all stack offsets. The size of the stack cannot grow
anywhere near {kMaxInt} anyway, and we have limits in place (for number
of locals, number of parameters, and function body size) which also
prevent this number from ever growing near that limit.

R=jkummerow@chromium.org

Change-Id: I05ca9124d2def79edd212464739bc12315f7b813
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1997445Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65740}
parent e6f14760
...@@ -37,22 +37,20 @@ namespace liftoff { ...@@ -37,22 +37,20 @@ namespace liftoff {
// //
static_assert(2 * kSystemPointerSize == LiftoffAssembler::kStackSlotSize, static_assert(2 * kSystemPointerSize == LiftoffAssembler::kStackSlotSize,
"Slot size should be twice the size of the 32 bit pointer."); "Slot size should be twice the size of the 32 bit pointer.");
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize; constexpr int kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kConstantStackSpace = kSystemPointerSize; constexpr int kConstantStackSpace = kSystemPointerSize;
// kPatchInstructionsRequired sets a maximum limit of how many instructions that // kPatchInstructionsRequired sets a maximum limit of how many instructions that
// PatchPrepareStackFrame will use in order to increase the stack appropriately. // PatchPrepareStackFrame will use in order to increase the stack appropriately.
// Three instructions are required to sub a large constant, movw + movt + sub. // Three instructions are required to sub a large constant, movw + movt + sub.
constexpr int32_t kPatchInstructionsRequired = 3; constexpr int32_t kPatchInstructionsRequired = 3;
inline int GetStackSlotOffset(uint32_t offset) { inline int GetStackSlotOffset(int offset) { return kInstanceOffset + offset; }
return kInstanceOffset + offset;
}
inline MemOperand GetStackSlot(uint32_t offset) { inline MemOperand GetStackSlot(int offset) {
return MemOperand(fp, -GetStackSlotOffset(offset)); return MemOperand(fp, -GetStackSlotOffset(offset));
} }
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kInstanceOffset - offset + half_offset); return MemOperand(fp, -kInstanceOffset - offset + half_offset);
...@@ -242,9 +240,9 @@ int LiftoffAssembler::PrepareStackFrame() { ...@@ -242,9 +240,9 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset; return offset;
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) { void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
// Allocate space for instance plus what is needed for the frame slots. // Allocate space for instance plus what is needed for the frame slots.
uint32_t bytes = liftoff::kConstantStackSpace + spill_size; int bytes = liftoff::kConstantStackSpace + spill_size;
#ifdef USE_SIMULATOR #ifdef USE_SIMULATOR
// When using the simulator, deal with Liftoff which allocates the stack // When using the simulator, deal with Liftoff which allocates the stack
// before checking it. // before checking it.
...@@ -288,7 +286,7 @@ void LiftoffAssembler::FinishCode() { CheckConstPool(true, false); } ...@@ -288,7 +286,7 @@ void LiftoffAssembler::FinishCode() { CheckConstPool(true, false); }
void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); } void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueType type) {
switch (type) { switch (type) {
case kWasmS128: case kWasmS128:
return ValueTypes::ElementSizeInBytes(type); return ValueTypes::ElementSizeInBytes(type);
...@@ -585,8 +583,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -585,8 +583,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
} }
} }
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg, void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
ValueType type) {
RecordUsedSpillOffset(offset); RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset); MemOperand dst = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
...@@ -608,7 +605,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg, ...@@ -608,7 +605,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
} }
} }
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) { void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset); RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset); MemOperand dst = liftoff::GetStackSlot(offset);
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
...@@ -640,8 +637,7 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) { ...@@ -640,8 +637,7 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
} }
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset, void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
ValueType type) {
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
ldr(reg.gp(), liftoff::GetStackSlot(offset)); ldr(reg.gp(), liftoff::GetStackSlot(offset));
...@@ -661,12 +657,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset, ...@@ -661,12 +657,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
} }
} }
void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset, void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) {
RegPairHalf half) {
ldr(reg, liftoff::GetHalfStackSlot(offset, half)); ldr(reg, liftoff::GetHalfStackSlot(offset, half));
} }
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) { void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size); DCHECK_LT(0, size);
DCHECK_EQ(0, size % 4); DCHECK_EQ(0, size % 4);
RecordUsedSpillOffset(start + size); RecordUsedSpillOffset(start + size);
...@@ -679,7 +674,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) { ...@@ -679,7 +674,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) {
if (size <= 36) { if (size <= 36) {
// Special straight-line code for up to 9 words. Generates one // Special straight-line code for up to 9 words. Generates one
// instruction per word. // instruction per word.
for (uint32_t offset = 4; offset <= size; offset += 4) { for (int offset = 4; offset <= size; offset += 4) {
str(r0, liftoff::GetHalfStackSlot(start + offset, kLowWord)); str(r0, liftoff::GetHalfStackSlot(start + offset, kLowWord));
} }
} else { } else {
......
...@@ -39,14 +39,12 @@ namespace liftoff { ...@@ -39,14 +39,12 @@ namespace liftoff {
// -----+--------------------+ <-- stack ptr (sp) // -----+--------------------+ <-- stack ptr (sp)
// //
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize; constexpr int kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kConstantStackSpace = 0; constexpr int kConstantStackSpace = 0;
inline int GetStackSlotOffset(uint32_t offset) { inline int GetStackSlotOffset(int offset) { return kInstanceOffset + offset; }
return kInstanceOffset + offset;
}
inline MemOperand GetStackSlot(uint32_t offset) { inline MemOperand GetStackSlot(int offset) {
return MemOperand(fp, -GetStackSlotOffset(offset)); return MemOperand(fp, -GetStackSlotOffset(offset));
} }
...@@ -120,10 +118,10 @@ int LiftoffAssembler::PrepareStackFrame() { ...@@ -120,10 +118,10 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset; return offset;
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) { void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
static_assert(kStackSlotSize == kXRegSize, static_assert(kStackSlotSize == kXRegSize,
"kStackSlotSize must equal kXRegSize"); "kStackSlotSize must equal kXRegSize");
uint32_t bytes = liftoff::kConstantStackSpace + spill_size; int bytes = liftoff::kConstantStackSpace + spill_size;
// The stack pointer is required to be quadword aligned. // The stack pointer is required to be quadword aligned.
// Misalignment will cause a stack alignment fault. // Misalignment will cause a stack alignment fault.
bytes = RoundUp(bytes, kQuadWordSizeInBytes); bytes = RoundUp(bytes, kQuadWordSizeInBytes);
...@@ -176,7 +174,7 @@ void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); } ...@@ -176,7 +174,7 @@ void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); }
void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); } void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueType type) {
// TODO(zhin): Unaligned access typically take additional cycles, we should do // TODO(zhin): Unaligned access typically take additional cycles, we should do
// some performance testing to see how big an effect it will take. // some performance testing to see how big an effect it will take.
switch (type) { switch (type) {
...@@ -376,14 +374,13 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -376,14 +374,13 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
} }
} }
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg, void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
ValueType type) {
RecordUsedSpillOffset(offset); RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset); MemOperand dst = liftoff::GetStackSlot(offset);
Str(liftoff::GetRegFromType(reg, type), dst); Str(liftoff::GetRegFromType(reg, type), dst);
} }
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) { void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset); RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset); MemOperand dst = liftoff::GetStackSlot(offset);
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
...@@ -412,17 +409,16 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) { ...@@ -412,17 +409,16 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
Str(src, dst); Str(src, dst);
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset, void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
ValueType type) {
MemOperand src = liftoff::GetStackSlot(offset); MemOperand src = liftoff::GetStackSlot(offset);
Ldr(liftoff::GetRegFromType(reg, type), src); Ldr(liftoff::GetRegFromType(reg, type), src);
} }
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) { void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
UNREACHABLE(); UNREACHABLE();
} }
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) { void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size); DCHECK_LT(0, size);
DCHECK_EQ(0, size % 4); DCHECK_EQ(0, size % 4);
RecordUsedSpillOffset(start + size); RecordUsedSpillOffset(start + size);
......
...@@ -22,13 +22,13 @@ namespace liftoff { ...@@ -22,13 +22,13 @@ namespace liftoff {
// ebp-4 holds the stack marker, ebp-8 is the instance parameter, first stack // ebp-4 holds the stack marker, ebp-8 is the instance parameter, first stack
// slot is located at ebp-8-offset. // slot is located at ebp-8-offset.
constexpr int32_t kConstantStackSpace = 8; constexpr int kConstantStackSpace = 8;
inline Operand GetStackSlot(uint32_t offset) { inline Operand GetStackSlot(int offset) {
return Operand(ebp, -kConstantStackSpace - offset); return Operand(ebp, -kConstantStackSpace - offset);
} }
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return Operand(ebp, -kConstantStackSpace - offset + half_offset); return Operand(ebp, -kConstantStackSpace - offset + half_offset);
...@@ -153,9 +153,8 @@ int LiftoffAssembler::PrepareStackFrame() { ...@@ -153,9 +153,8 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset; return offset;
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) { void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
uint32_t bytes = liftoff::kConstantStackSpace + spill_size; int bytes = liftoff::kConstantStackSpace + spill_size;
DCHECK_LE(bytes, kMaxInt);
DCHECK_EQ(bytes % kSystemPointerSize, 0); DCHECK_EQ(bytes % kSystemPointerSize, 0);
// We can't run out of space, just pass anything big enough to not cause the // We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer. // assembler to try to grow the buffer.
...@@ -194,7 +193,7 @@ void LiftoffAssembler::FinishCode() {} ...@@ -194,7 +193,7 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {} void LiftoffAssembler::AbortCompilation() {}
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueType type) {
return ValueTypes::ElementSizeInBytes(type); return ValueTypes::ElementSizeInBytes(type);
} }
...@@ -443,8 +442,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -443,8 +442,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
} }
} }
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg, void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
ValueType type) {
RecordUsedSpillOffset(offset); RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset); Operand dst = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
...@@ -466,7 +464,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg, ...@@ -466,7 +464,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
} }
} }
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) { void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset); RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset); Operand dst = liftoff::GetStackSlot(offset);
switch (value.type()) { switch (value.type()) {
...@@ -486,8 +484,7 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) { ...@@ -486,8 +484,7 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
} }
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset, void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
ValueType type) {
Operand src = liftoff::GetStackSlot(offset); Operand src = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
...@@ -508,12 +505,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset, ...@@ -508,12 +505,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
} }
} }
void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset, void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) {
RegPairHalf half) {
mov(reg, liftoff::GetHalfStackSlot(offset, half)); mov(reg, liftoff::GetHalfStackSlot(offset, half));
} }
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) { void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size); DCHECK_LT(0, size);
DCHECK_EQ(0, size % 4); DCHECK_EQ(0, size % 4);
RecordUsedSpillOffset(start + size); RecordUsedSpillOffset(start + size);
...@@ -521,7 +517,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) { ...@@ -521,7 +517,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) {
if (size <= 12) { if (size <= 12) {
// Special straight-line code for up to three words (6-9 bytes per word: // Special straight-line code for up to three words (6-9 bytes per word:
// C7 <1-4 bytes operand> <4 bytes imm>, makes 18-27 bytes total). // C7 <1-4 bytes operand> <4 bytes imm>, makes 18-27 bytes total).
for (uint32_t offset = 4; offset <= size; offset += 4) { for (int offset = 4; offset <= size; offset += 4) {
mov(liftoff::GetHalfStackSlot(start + offset, kLowWord), Immediate(0)); mov(liftoff::GetHalfStackSlot(start + offset, kLowWord), Immediate(0));
} }
} else { } else {
......
...@@ -124,7 +124,7 @@ class StackTransferRecipe { ...@@ -124,7 +124,7 @@ class StackTransferRecipe {
void LoadI64HalfIntoRegister(LiftoffRegister dst, void LoadI64HalfIntoRegister(LiftoffRegister dst,
const LiftoffAssembler::VarState& src, const LiftoffAssembler::VarState& src,
uint32_t offset, RegPairHalf half) { int offset, RegPairHalf half) {
// Use CHECK such that the remaining code is statically dead if // Use CHECK such that the remaining code is statically dead if
// {kNeedI64RegPair} is false. // {kNeedI64RegPair} is false.
CHECK(kNeedI64RegPair); CHECK(kNeedI64RegPair);
...@@ -209,8 +209,7 @@ class StackTransferRecipe { ...@@ -209,8 +209,7 @@ class StackTransferRecipe {
} }
} }
void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t offset, void LoadI64HalfStackSlot(LiftoffRegister dst, int offset, RegPairHalf half) {
RegPairHalf half) {
if (load_dst_regs_.has(dst)) { if (load_dst_regs_.has(dst)) {
// It can happen that we spilled the same register to different stack // It can happen that we spilled the same register to different stack
// slots, and then we reload them later into the same dst register. // slots, and then we reload them later into the same dst register.
......
...@@ -33,7 +33,7 @@ namespace wasm { ...@@ -33,7 +33,7 @@ namespace wasm {
class LiftoffAssembler : public TurboAssembler { class LiftoffAssembler : public TurboAssembler {
public: public:
// Each slot in our stack frame currently has exactly 8 bytes. // Each slot in our stack frame currently has exactly 8 bytes.
static constexpr uint32_t kStackSlotSize = 8; static constexpr int kStackSlotSize = 8;
static constexpr ValueType kWasmIntPtr = static constexpr ValueType kWasmIntPtr =
kSystemPointerSize == 8 ? kWasmI64 : kWasmI32; kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
...@@ -42,13 +42,13 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -42,13 +42,13 @@ class LiftoffAssembler : public TurboAssembler {
public: public:
enum Location : uint8_t { kStack, kRegister, kIntConst }; enum Location : uint8_t { kStack, kRegister, kIntConst };
explicit VarState(ValueType type, uint32_t offset) explicit VarState(ValueType type, int offset)
: loc_(kStack), type_(type), spill_offset_(offset) {} : loc_(kStack), type_(type), spill_offset_(offset) {}
explicit VarState(ValueType type, LiftoffRegister r, uint32_t offset) explicit VarState(ValueType type, LiftoffRegister r, int offset)
: loc_(kRegister), type_(type), reg_(r), spill_offset_(offset) { : loc_(kRegister), type_(type), reg_(r), spill_offset_(offset) {
DCHECK_EQ(r.reg_class(), reg_class_for(type)); DCHECK_EQ(r.reg_class(), reg_class_for(type));
} }
explicit VarState(ValueType type, int32_t i32_const, uint32_t offset) explicit VarState(ValueType type, int32_t i32_const, int offset)
: loc_(kIntConst), : loc_(kIntConst),
type_(type), type_(type),
i32_const_(i32_const), i32_const_(i32_const),
...@@ -91,7 +91,7 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -91,7 +91,7 @@ class LiftoffAssembler : public TurboAssembler {
: WasmValue(int64_t{i32_const_}); : WasmValue(int64_t{i32_const_});
} }
uint32_t offset() const { return spill_offset_; } int offset() const { return spill_offset_; }
Register gp_reg() const { return reg().gp(); } Register gp_reg() const { return reg().gp(); }
DoubleRegister fp_reg() const { return reg().fp(); } DoubleRegister fp_reg() const { return reg().fp(); }
...@@ -125,7 +125,7 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -125,7 +125,7 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister reg_; // used if loc_ == kRegister LiftoffRegister reg_; // used if loc_ == kRegister
int32_t i32_const_; // used if loc_ == kIntConst int32_t i32_const_; // used if loc_ == kIntConst
}; };
uint32_t spill_offset_; int spill_offset_;
}; };
ASSERT_TRIVIALLY_COPYABLE(VarState); ASSERT_TRIVIALLY_COPYABLE(VarState);
...@@ -282,15 +282,15 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -282,15 +282,15 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister PopToRegister(LiftoffRegList pinned = {}); LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
uint32_t NextSpillOffset(ValueType type) { int NextSpillOffset(ValueType type) {
uint32_t offset = TopSpillOffset() + SlotSizeForType(type); int offset = TopSpillOffset() + SlotSizeForType(type);
if (NeedsAlignment(type)) { if (NeedsAlignment(type)) {
offset = RoundUp(offset, SlotSizeForType(type)); offset = RoundUp(offset, SlotSizeForType(type));
} }
return offset; return offset;
} }
uint32_t TopSpillOffset() { int TopSpillOffset() {
if (cache_state_.stack_state.empty()) { if (cache_state_.stack_state.empty()) {
return 0; return 0;
} }
...@@ -370,7 +370,7 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -370,7 +370,7 @@ class LiftoffAssembler : public TurboAssembler {
// Call this method whenever spilling something, such that the number of used // Call this method whenever spilling something, such that the number of used
// spill slot can be tracked and the stack frame will be allocated big enough. // spill slot can be tracked and the stack frame will be allocated big enough.
void RecordUsedSpillOffset(uint32_t offset) { void RecordUsedSpillOffset(int offset) {
if (offset >= num_used_spill_bytes_) num_used_spill_bytes_ = offset; if (offset >= num_used_spill_bytes_) num_used_spill_bytes_ = offset;
} }
...@@ -415,10 +415,10 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -415,10 +415,10 @@ class LiftoffAssembler : public TurboAssembler {
// which can later be patched (via {PatchPrepareStackFrame)} when the size of // which can later be patched (via {PatchPrepareStackFrame)} when the size of
// the frame is known. // the frame is known.
inline int PrepareStackFrame(); inline int PrepareStackFrame();
inline void PatchPrepareStackFrame(int offset, uint32_t spill_size); inline void PatchPrepareStackFrame(int offset, int spill_size);
inline void FinishCode(); inline void FinishCode();
inline void AbortCompilation(); inline void AbortCompilation();
inline static uint32_t SlotSizeForType(ValueType type); inline static int SlotSizeForType(ValueType type);
inline static bool NeedsAlignment(ValueType type); inline static bool NeedsAlignment(ValueType type);
inline void LoadConstant(LiftoffRegister, WasmValue, inline void LoadConstant(LiftoffRegister, WasmValue,
...@@ -446,13 +446,13 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -446,13 +446,13 @@ class LiftoffAssembler : public TurboAssembler {
inline void Move(Register dst, Register src, ValueType); inline void Move(Register dst, Register src, ValueType);
inline void Move(DoubleRegister dst, DoubleRegister src, ValueType); inline void Move(DoubleRegister dst, DoubleRegister src, ValueType);
inline void Spill(uint32_t offset, LiftoffRegister, ValueType); inline void Spill(int offset, LiftoffRegister, ValueType);
inline void Spill(uint32_t offset, WasmValue); inline void Spill(int offset, WasmValue);
inline void Fill(LiftoffRegister, uint32_t offset, ValueType); inline void Fill(LiftoffRegister, int offset, ValueType);
// Only used on 32-bit systems: Fill a register from a "half stack slot", i.e. // Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
// 4 bytes on the stack holding half of a 64-bit value. // 4 bytes on the stack holding half of a 64-bit value.
inline void FillI64Half(Register, uint32_t offset, RegPairHalf); inline void FillI64Half(Register, int offset, RegPairHalf);
inline void FillStackSlotsWithZero(uint32_t start, uint32_t size); inline void FillStackSlotsWithZero(int start, int size);
// i32 binops. // i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs); inline void emit_i32_add(Register dst, Register lhs, Register rhs);
...@@ -690,12 +690,12 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -690,12 +690,12 @@ class LiftoffAssembler : public TurboAssembler {
uint32_t num_locals() const { return num_locals_; } uint32_t num_locals() const { return num_locals_; }
void set_num_locals(uint32_t num_locals); void set_num_locals(uint32_t num_locals);
uint32_t GetTotalFrameSlotCount() const { int GetTotalFrameSlotCount() const {
// TODO(zhin): Temporary for migration from index to offset. // TODO(zhin): Temporary for migration from index to offset.
return ((num_used_spill_bytes_ + kStackSlotSize - 1) / kStackSlotSize); return ((num_used_spill_bytes_ + kStackSlotSize - 1) / kStackSlotSize);
} }
uint32_t GetTotalFrameSlotSize() const { return num_used_spill_bytes_; } int GetTotalFrameSlotSize() const { return num_used_spill_bytes_; }
ValueType local_type(uint32_t index) { ValueType local_type(uint32_t index) {
DCHECK_GT(num_locals_, index); DCHECK_GT(num_locals_, index);
...@@ -735,7 +735,7 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -735,7 +735,7 @@ class LiftoffAssembler : public TurboAssembler {
static_assert(sizeof(ValueType) == 1, static_assert(sizeof(ValueType) == 1,
"Reconsider this inlining if ValueType gets bigger"); "Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_; CacheState cache_state_;
uint32_t num_used_spill_bytes_ = 0; int num_used_spill_bytes_ = 0;
LiftoffBailoutReason bailout_reason_ = kSuccess; LiftoffBailoutReason bailout_reason_ = kSuccess;
const char* bailout_detail_ = nullptr; const char* bailout_detail_ = nullptr;
......
...@@ -511,7 +511,7 @@ class LiftoffCompiler { ...@@ -511,7 +511,7 @@ class LiftoffCompiler {
for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) { for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
input_idx += ProcessParameter(__ local_type(param_idx), input_idx); input_idx += ProcessParameter(__ local_type(param_idx), input_idx);
} }
uint32_t params_size = __ TopSpillOffset(); int params_size = __ TopSpillOffset();
DCHECK_EQ(input_idx, descriptor_->InputCount()); DCHECK_EQ(input_idx, descriptor_->InputCount());
// Initialize locals beyond parameters. // Initialize locals beyond parameters.
...@@ -521,7 +521,7 @@ class LiftoffCompiler { ...@@ -521,7 +521,7 @@ class LiftoffCompiler {
ValueType type = decoder->GetLocalType(param_idx); ValueType type = decoder->GetLocalType(param_idx);
__ PushStack(type); __ PushStack(type);
} }
uint32_t spill_size = __ TopSpillOffset(); int spill_size = __ TopSpillOffset();
__ FillStackSlotsWithZero(params_size, spill_size); __ FillStackSlotsWithZero(params_size, spill_size);
} else { } else {
for (uint32_t param_idx = num_params; param_idx < __ num_locals(); for (uint32_t param_idx = num_params; param_idx < __ num_locals();
......
...@@ -45,17 +45,17 @@ constexpr int32_t kHighWordOffset = 4; ...@@ -45,17 +45,17 @@ constexpr int32_t kHighWordOffset = 4;
// fp-4 holds the stack marker, fp-8 is the instance parameter, first stack // fp-4 holds the stack marker, fp-8 is the instance parameter, first stack
// slot is located at fp-8-offset. // slot is located at fp-8-offset.
constexpr int32_t kConstantStackSpace = 8; constexpr int kConstantStackSpace = 8;
inline int GetStackSlotOffset(uint32_t offset) { inline int GetStackSlotOffset(int offset) {
return kConstantStackSpace + offset; return kConstantStackSpace + offset;
} }
inline MemOperand GetStackSlot(uint32_t offset) { inline MemOperand GetStackSlot(int offset) {
return MemOperand(fp, -GetStackSlotOffset(offset)); return MemOperand(fp, -GetStackSlotOffset(offset));
} }
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kConstantStackSpace - offset + half_offset); return MemOperand(fp, -kConstantStackSpace - offset + half_offset);
...@@ -282,9 +282,8 @@ int LiftoffAssembler::PrepareStackFrame() { ...@@ -282,9 +282,8 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset; return offset;
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) { void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
uint32_t bytes = liftoff::kConstantStackSpace + spill_size; int bytes = liftoff::kConstantStackSpace + spill_size;
DCHECK_LE(bytes, kMaxInt);
// We can't run out of space, just pass anything big enough to not cause the // We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer. // assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256; constexpr int kAvailableSpace = 256;
...@@ -301,7 +300,7 @@ void LiftoffAssembler::FinishCode() {} ...@@ -301,7 +300,7 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {} void LiftoffAssembler::AbortCompilation() {}
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueType type) {
switch (type) { switch (type) {
case kWasmS128: case kWasmS128:
return ValueTypes::ElementSizeInBytes(type); return ValueTypes::ElementSizeInBytes(type);
...@@ -556,8 +555,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -556,8 +555,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
TurboAssembler::Move(dst, src); TurboAssembler::Move(dst, src);
} }
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg, void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
ValueType type) {
RecordUsedSpillOffset(offset); RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset); MemOperand dst = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
...@@ -579,7 +577,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg, ...@@ -579,7 +577,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
} }
} }
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) { void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset); RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset); MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type()) { switch (value.type()) {
...@@ -608,8 +606,7 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) { ...@@ -608,8 +606,7 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
} }
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset, void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
ValueType type) {
MemOperand src = liftoff::GetStackSlot(offset); MemOperand src = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
...@@ -630,12 +627,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset, ...@@ -630,12 +627,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
} }
} }
void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset, void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) {
RegPairHalf half) {
lw(reg, liftoff::GetHalfStackSlot(offset, half)); lw(reg, liftoff::GetHalfStackSlot(offset, half));
} }
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) { void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size); DCHECK_LT(0, size);
DCHECK_EQ(0, size % 4); DCHECK_EQ(0, size % 4);
RecordUsedSpillOffset(start + size); RecordUsedSpillOffset(start + size);
...@@ -643,7 +639,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) { ...@@ -643,7 +639,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) {
if (size <= 48) { if (size <= 48) {
// Special straight-line code for up to 12 words. Generates one // Special straight-line code for up to 12 words. Generates one
// instruction per word (<=12 instructions total). // instruction per word (<=12 instructions total).
for (uint32_t offset = 4; offset <= size; offset += 4) { for (int offset = 4; offset <= size; offset += 4) {
Sw(zero_reg, liftoff::GetStackSlot(start + offset)); Sw(zero_reg, liftoff::GetStackSlot(start + offset));
} }
} else { } else {
......
...@@ -41,13 +41,13 @@ namespace liftoff { ...@@ -41,13 +41,13 @@ namespace liftoff {
// fp-8 holds the stack marker, fp-16 is the instance parameter, first stack // fp-8 holds the stack marker, fp-16 is the instance parameter, first stack
// slot is located at fp-16-offset. // slot is located at fp-16-offset.
constexpr int32_t kConstantStackSpace = 16; constexpr int kConstantStackSpace = 16;
inline int GetStackSlotOffset(uint32_t offset) { inline int GetStackSlotOffset(int offset) {
return kConstantStackSpace + offset; return kConstantStackSpace + offset;
} }
inline MemOperand GetStackSlot(uint32_t offset) { inline MemOperand GetStackSlot(int offset) {
return MemOperand(fp, -GetStackSlotOffset(offset)); return MemOperand(fp, -GetStackSlotOffset(offset));
} }
...@@ -240,9 +240,8 @@ int LiftoffAssembler::PrepareStackFrame() { ...@@ -240,9 +240,8 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset; return offset;
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) { void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
uint64_t bytes = liftoff::kConstantStackSpace + spill_size; int bytes = liftoff::kConstantStackSpace + spill_size;
DCHECK_LE(bytes, kMaxInt);
// We can't run out of space, just pass anything big enough to not cause the // We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer. // assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256; constexpr int kAvailableSpace = 256;
...@@ -259,7 +258,7 @@ void LiftoffAssembler::FinishCode() {} ...@@ -259,7 +258,7 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {} void LiftoffAssembler::AbortCompilation() {}
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueType type) {
switch (type) { switch (type) {
case kWasmS128: case kWasmS128:
return ValueTypes::ElementSizeInBytes(type); return ValueTypes::ElementSizeInBytes(type);
...@@ -473,8 +472,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -473,8 +472,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
TurboAssembler::Move(dst, src); TurboAssembler::Move(dst, src);
} }
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg, void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
ValueType type) {
RecordUsedSpillOffset(offset); RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset); MemOperand dst = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
...@@ -495,7 +493,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg, ...@@ -495,7 +493,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
} }
} }
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) { void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset); RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset); MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type()) { switch (value.type()) {
...@@ -518,8 +516,7 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) { ...@@ -518,8 +516,7 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
} }
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset, void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
ValueType type) {
MemOperand src = liftoff::GetStackSlot(offset); MemOperand src = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
...@@ -539,11 +536,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset, ...@@ -539,11 +536,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
} }
} }
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) { void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
UNREACHABLE(); UNREACHABLE();
} }
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) { void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size); DCHECK_LT(0, size);
RecordUsedSpillOffset(start + size); RecordUsedSpillOffset(start + size);
......
...@@ -39,11 +39,9 @@ namespace liftoff { ...@@ -39,11 +39,9 @@ namespace liftoff {
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize; constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
inline int GetStackSlotOffset(uint32_t offset) { inline int GetStackSlotOffset(int offset) { return kInstanceOffset + offset; }
return kInstanceOffset + offset;
}
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kInstanceOffset - offset + half_offset); return MemOperand(fp, -kInstanceOffset - offset + half_offset);
...@@ -56,7 +54,7 @@ int LiftoffAssembler::PrepareStackFrame() { ...@@ -56,7 +54,7 @@ int LiftoffAssembler::PrepareStackFrame() {
return 0; return 0;
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) { void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame"); bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame");
} }
...@@ -64,7 +62,7 @@ void LiftoffAssembler::FinishCode() { EmitConstantPool(); } ...@@ -64,7 +62,7 @@ void LiftoffAssembler::FinishCode() { EmitConstantPool(); }
void LiftoffAssembler::AbortCompilation() { FinishCode(); } void LiftoffAssembler::AbortCompilation() { FinishCode(); }
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueType type) {
switch (type) { switch (type) {
case kWasmS128: case kWasmS128:
return ValueTypes::ElementSizeInBytes(type); return ValueTypes::ElementSizeInBytes(type);
...@@ -147,25 +145,23 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -147,25 +145,23 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
bailout(kUnsupportedArchitecture, "Move DoubleRegister"); bailout(kUnsupportedArchitecture, "Move DoubleRegister");
} }
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg, void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
ValueType type) {
bailout(kUnsupportedArchitecture, "Spill register"); bailout(kUnsupportedArchitecture, "Spill register");
} }
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) { void LiftoffAssembler::Spill(int offset, WasmValue value) {
bailout(kUnsupportedArchitecture, "Spill value"); bailout(kUnsupportedArchitecture, "Spill value");
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset, void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
ValueType type) {
bailout(kUnsupportedArchitecture, "Fill"); bailout(kUnsupportedArchitecture, "Fill");
} }
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) { void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
bailout(kUnsupportedArchitecture, "FillI64Half"); bailout(kUnsupportedArchitecture, "FillI64Half");
} }
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) { void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size); DCHECK_LT(0, size);
DCHECK_EQ(0, size % 4); DCHECK_EQ(0, size % 4);
RecordUsedSpillOffset(start + size); RecordUsedSpillOffset(start + size);
...@@ -178,7 +174,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) { ...@@ -178,7 +174,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) {
if (size <= 36) { if (size <= 36) {
// Special straight-line code for up to nine words. Generates one // Special straight-line code for up to nine words. Generates one
// instruction per word. // instruction per word.
for (uint32_t offset = 4; offset <= size; offset += 4) { for (int offset = 4; offset <= size; offset += 4) {
StoreP(r0, liftoff::GetHalfStackSlot(start + offset, kLowWord)); StoreP(r0, liftoff::GetHalfStackSlot(start + offset, kLowWord));
} }
} else { } else {
......
...@@ -38,11 +38,9 @@ namespace liftoff { ...@@ -38,11 +38,9 @@ namespace liftoff {
// //
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize; constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
inline int GetStackSlotOffset(uint32_t offset) { inline int GetStackSlotOffset(int offset) { return kInstanceOffset + offset; }
return kInstanceOffset + offset;
}
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kInstanceOffset - offset + half_offset); return MemOperand(fp, -kInstanceOffset - offset + half_offset);
...@@ -55,7 +53,7 @@ int LiftoffAssembler::PrepareStackFrame() { ...@@ -55,7 +53,7 @@ int LiftoffAssembler::PrepareStackFrame() {
return 0; return 0;
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) { void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame"); bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame");
} }
...@@ -63,7 +61,7 @@ void LiftoffAssembler::FinishCode() {} ...@@ -63,7 +61,7 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {} void LiftoffAssembler::AbortCompilation() {}
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueType type) {
switch (type) { switch (type) {
case kWasmS128: case kWasmS128:
return ValueTypes::ElementSizeInBytes(type); return ValueTypes::ElementSizeInBytes(type);
...@@ -146,25 +144,23 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -146,25 +144,23 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
bailout(kUnsupportedArchitecture, "Move DoubleRegister"); bailout(kUnsupportedArchitecture, "Move DoubleRegister");
} }
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg, void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
ValueType type) {
bailout(kUnsupportedArchitecture, "Spill register"); bailout(kUnsupportedArchitecture, "Spill register");
} }
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) { void LiftoffAssembler::Spill(int offset, WasmValue value) {
bailout(kUnsupportedArchitecture, "Spill value"); bailout(kUnsupportedArchitecture, "Spill value");
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset, void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
ValueType type) {
bailout(kUnsupportedArchitecture, "Fill"); bailout(kUnsupportedArchitecture, "Fill");
} }
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) { void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
bailout(kUnsupportedArchitecture, "FillI64Half"); bailout(kUnsupportedArchitecture, "FillI64Half");
} }
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) { void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size); DCHECK_LT(0, size);
RecordUsedSpillOffset(start + size); RecordUsedSpillOffset(start + size);
......
...@@ -35,9 +35,9 @@ static_assert((kLiftoffAssemblerFpCacheRegs & ...@@ -35,9 +35,9 @@ static_assert((kLiftoffAssemblerFpCacheRegs &
// rbp-8 holds the stack marker, rbp-16 is the instance parameter, first stack // rbp-8 holds the stack marker, rbp-16 is the instance parameter, first stack
// slot is located at rbp-16-offset. // slot is located at rbp-16-offset.
constexpr int32_t kConstantStackSpace = 16; constexpr int kConstantStackSpace = 16;
inline Operand GetStackSlot(uint32_t offset) { inline Operand GetStackSlot(int offset) {
return Operand(rbp, -kConstantStackSpace - offset); return Operand(rbp, -kConstantStackSpace - offset);
} }
...@@ -136,9 +136,8 @@ int LiftoffAssembler::PrepareStackFrame() { ...@@ -136,9 +136,8 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset; return offset;
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) { void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
uint32_t bytes = liftoff::kConstantStackSpace + spill_size; int bytes = liftoff::kConstantStackSpace + spill_size;
DCHECK_LE(bytes, kMaxInt);
// Need to align sp to system pointer size. // Need to align sp to system pointer size.
bytes = RoundUp(bytes, kSystemPointerSize); bytes = RoundUp(bytes, kSystemPointerSize);
// We can't run out of space, just pass anything big enough to not cause the // We can't run out of space, just pass anything big enough to not cause the
...@@ -177,7 +176,7 @@ void LiftoffAssembler::FinishCode() {} ...@@ -177,7 +176,7 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {} void LiftoffAssembler::AbortCompilation() {}
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueType type) {
return ValueTypes::ElementSizeInBytes(type); return ValueTypes::ElementSizeInBytes(type);
} }
...@@ -382,8 +381,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ...@@ -382,8 +381,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
} }
} }
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg, void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
ValueType type) {
RecordUsedSpillOffset(offset); RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset); Operand dst = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
...@@ -404,7 +402,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg, ...@@ -404,7 +402,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
} }
} }
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) { void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset); RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset); Operand dst = liftoff::GetStackSlot(offset);
switch (value.type()) { switch (value.type()) {
...@@ -431,8 +429,7 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) { ...@@ -431,8 +429,7 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
} }
} }
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset, void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
ValueType type) {
Operand src = liftoff::GetStackSlot(offset); Operand src = liftoff::GetStackSlot(offset);
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
...@@ -452,11 +449,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset, ...@@ -452,11 +449,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
} }
} }
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) { void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
UNREACHABLE(); UNREACHABLE();
} }
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) { void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size); DCHECK_LT(0, size);
RecordUsedSpillOffset(start + size); RecordUsedSpillOffset(start + size);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment