Commit 8e2cfc89 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[Liftoff] Use {int} for stack offsets

To follow the style guide, and potentially improve performance, we
switch to {int} for all stack offsets. The size of the stack cannot grow
anywhere near {kMaxInt} anyway, and we have limits in place (for number
of locals, number of parameters, and function body size) which also
prevent this number from ever growing near that limit.

R=jkummerow@chromium.org

Change-Id: I05ca9124d2def79edd212464739bc12315f7b813
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1997445Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65740}
parent e6f14760
......@@ -37,22 +37,20 @@ namespace liftoff {
//
static_assert(2 * kSystemPointerSize == LiftoffAssembler::kStackSlotSize,
"Slot size should be twice the size of the 32 bit pointer.");
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kConstantStackSpace = kSystemPointerSize;
constexpr int kInstanceOffset = 2 * kSystemPointerSize;
constexpr int kConstantStackSpace = kSystemPointerSize;
// kPatchInstructionsRequired sets a maximum limit of how many instructions that
// PatchPrepareStackFrame will use in order to increase the stack appropriately.
// Three instructions are required to sub a large constant, movw + movt + sub.
constexpr int32_t kPatchInstructionsRequired = 3;
inline int GetStackSlotOffset(uint32_t offset) {
return kInstanceOffset + offset;
}
inline int GetStackSlotOffset(int offset) { return kInstanceOffset + offset; }
inline MemOperand GetStackSlot(uint32_t offset) {
inline MemOperand GetStackSlot(int offset) {
return MemOperand(fp, -GetStackSlotOffset(offset));
}
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kInstanceOffset - offset + half_offset);
......@@ -242,9 +240,9 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset;
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) {
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
// Allocate space for instance plus what is needed for the frame slots.
uint32_t bytes = liftoff::kConstantStackSpace + spill_size;
int bytes = liftoff::kConstantStackSpace + spill_size;
#ifdef USE_SIMULATOR
// When using the simulator, deal with Liftoff which allocates the stack
// before checking it.
......@@ -288,7 +286,7 @@ void LiftoffAssembler::FinishCode() { CheckConstPool(true, false); }
void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) {
int LiftoffAssembler::SlotSizeForType(ValueType type) {
switch (type) {
case kWasmS128:
return ValueTypes::ElementSizeInBytes(type);
......@@ -585,8 +583,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
}
}
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (type) {
......@@ -608,7 +605,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
}
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
UseScratchRegisterScope temps(this);
......@@ -640,8 +637,7 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) {
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
switch (type) {
case kWasmI32:
ldr(reg.gp(), liftoff::GetStackSlot(offset));
......@@ -661,12 +657,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
}
}
void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset,
RegPairHalf half) {
void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) {
ldr(reg, liftoff::GetHalfStackSlot(offset, half));
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) {
void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size);
DCHECK_EQ(0, size % 4);
RecordUsedSpillOffset(start + size);
......@@ -679,7 +674,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) {
if (size <= 36) {
// Special straight-line code for up to 9 words. Generates one
// instruction per word.
for (uint32_t offset = 4; offset <= size; offset += 4) {
for (int offset = 4; offset <= size; offset += 4) {
str(r0, liftoff::GetHalfStackSlot(start + offset, kLowWord));
}
} else {
......
......@@ -39,14 +39,12 @@ namespace liftoff {
// -----+--------------------+ <-- stack ptr (sp)
//
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kConstantStackSpace = 0;
constexpr int kInstanceOffset = 2 * kSystemPointerSize;
constexpr int kConstantStackSpace = 0;
inline int GetStackSlotOffset(uint32_t offset) {
return kInstanceOffset + offset;
}
inline int GetStackSlotOffset(int offset) { return kInstanceOffset + offset; }
inline MemOperand GetStackSlot(uint32_t offset) {
inline MemOperand GetStackSlot(int offset) {
return MemOperand(fp, -GetStackSlotOffset(offset));
}
......@@ -120,10 +118,10 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset;
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) {
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
static_assert(kStackSlotSize == kXRegSize,
"kStackSlotSize must equal kXRegSize");
uint32_t bytes = liftoff::kConstantStackSpace + spill_size;
int bytes = liftoff::kConstantStackSpace + spill_size;
// The stack pointer is required to be quadword aligned.
// Misalignment will cause a stack alignment fault.
bytes = RoundUp(bytes, kQuadWordSizeInBytes);
......@@ -176,7 +174,7 @@ void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); }
void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) {
int LiftoffAssembler::SlotSizeForType(ValueType type) {
// TODO(zhin): Unaligned access typically take additional cycles, we should do
// some performance testing to see how big an effect it will take.
switch (type) {
......@@ -376,14 +374,13 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
}
}
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
Str(liftoff::GetRegFromType(reg, type), dst);
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
UseScratchRegisterScope temps(this);
......@@ -412,17 +409,16 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
Str(src, dst);
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) {
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
MemOperand src = liftoff::GetStackSlot(offset);
Ldr(liftoff::GetRegFromType(reg, type), src);
}
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
UNREACHABLE();
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) {
void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size);
DCHECK_EQ(0, size % 4);
RecordUsedSpillOffset(start + size);
......
......@@ -22,13 +22,13 @@ namespace liftoff {
// ebp-4 holds the stack marker, ebp-8 is the instance parameter, first stack
// slot is located at ebp-8-offset.
constexpr int32_t kConstantStackSpace = 8;
constexpr int kConstantStackSpace = 8;
inline Operand GetStackSlot(uint32_t offset) {
inline Operand GetStackSlot(int offset) {
return Operand(ebp, -kConstantStackSpace - offset);
}
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return Operand(ebp, -kConstantStackSpace - offset + half_offset);
......@@ -153,9 +153,8 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset;
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) {
uint32_t bytes = liftoff::kConstantStackSpace + spill_size;
DCHECK_LE(bytes, kMaxInt);
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
int bytes = liftoff::kConstantStackSpace + spill_size;
DCHECK_EQ(bytes % kSystemPointerSize, 0);
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
......@@ -194,7 +193,7 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {}
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) {
int LiftoffAssembler::SlotSizeForType(ValueType type) {
return ValueTypes::ElementSizeInBytes(type);
}
......@@ -443,8 +442,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
}
}
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (type) {
......@@ -466,7 +464,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
}
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (value.type()) {
......@@ -486,8 +484,7 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) {
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
Operand src = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
......@@ -508,12 +505,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
}
}
void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset,
RegPairHalf half) {
void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) {
mov(reg, liftoff::GetHalfStackSlot(offset, half));
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) {
void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size);
DCHECK_EQ(0, size % 4);
RecordUsedSpillOffset(start + size);
......@@ -521,7 +517,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) {
if (size <= 12) {
// Special straight-line code for up to three words (6-9 bytes per word:
// C7 <1-4 bytes operand> <4 bytes imm>, makes 18-27 bytes total).
for (uint32_t offset = 4; offset <= size; offset += 4) {
for (int offset = 4; offset <= size; offset += 4) {
mov(liftoff::GetHalfStackSlot(start + offset, kLowWord), Immediate(0));
}
} else {
......
......@@ -124,7 +124,7 @@ class StackTransferRecipe {
void LoadI64HalfIntoRegister(LiftoffRegister dst,
const LiftoffAssembler::VarState& src,
uint32_t offset, RegPairHalf half) {
int offset, RegPairHalf half) {
// Use CHECK such that the remaining code is statically dead if
// {kNeedI64RegPair} is false.
CHECK(kNeedI64RegPair);
......@@ -209,8 +209,7 @@ class StackTransferRecipe {
}
}
void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t offset,
RegPairHalf half) {
void LoadI64HalfStackSlot(LiftoffRegister dst, int offset, RegPairHalf half) {
if (load_dst_regs_.has(dst)) {
// It can happen that we spilled the same register to different stack
// slots, and then we reload them later into the same dst register.
......
......@@ -33,7 +33,7 @@ namespace wasm {
class LiftoffAssembler : public TurboAssembler {
public:
// Each slot in our stack frame currently has exactly 8 bytes.
static constexpr uint32_t kStackSlotSize = 8;
static constexpr int kStackSlotSize = 8;
static constexpr ValueType kWasmIntPtr =
kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
......@@ -42,13 +42,13 @@ class LiftoffAssembler : public TurboAssembler {
public:
enum Location : uint8_t { kStack, kRegister, kIntConst };
explicit VarState(ValueType type, uint32_t offset)
explicit VarState(ValueType type, int offset)
: loc_(kStack), type_(type), spill_offset_(offset) {}
explicit VarState(ValueType type, LiftoffRegister r, uint32_t offset)
explicit VarState(ValueType type, LiftoffRegister r, int offset)
: loc_(kRegister), type_(type), reg_(r), spill_offset_(offset) {
DCHECK_EQ(r.reg_class(), reg_class_for(type));
}
explicit VarState(ValueType type, int32_t i32_const, uint32_t offset)
explicit VarState(ValueType type, int32_t i32_const, int offset)
: loc_(kIntConst),
type_(type),
i32_const_(i32_const),
......@@ -91,7 +91,7 @@ class LiftoffAssembler : public TurboAssembler {
: WasmValue(int64_t{i32_const_});
}
uint32_t offset() const { return spill_offset_; }
int offset() const { return spill_offset_; }
Register gp_reg() const { return reg().gp(); }
DoubleRegister fp_reg() const { return reg().fp(); }
......@@ -125,7 +125,7 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister reg_; // used if loc_ == kRegister
int32_t i32_const_; // used if loc_ == kIntConst
};
uint32_t spill_offset_;
int spill_offset_;
};
ASSERT_TRIVIALLY_COPYABLE(VarState);
......@@ -282,15 +282,15 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
uint32_t NextSpillOffset(ValueType type) {
uint32_t offset = TopSpillOffset() + SlotSizeForType(type);
int NextSpillOffset(ValueType type) {
int offset = TopSpillOffset() + SlotSizeForType(type);
if (NeedsAlignment(type)) {
offset = RoundUp(offset, SlotSizeForType(type));
}
return offset;
}
uint32_t TopSpillOffset() {
int TopSpillOffset() {
if (cache_state_.stack_state.empty()) {
return 0;
}
......@@ -370,7 +370,7 @@ class LiftoffAssembler : public TurboAssembler {
// Call this method whenever spilling something, such that the number of used
// spill slot can be tracked and the stack frame will be allocated big enough.
void RecordUsedSpillOffset(uint32_t offset) {
void RecordUsedSpillOffset(int offset) {
if (offset >= num_used_spill_bytes_) num_used_spill_bytes_ = offset;
}
......@@ -415,10 +415,10 @@ class LiftoffAssembler : public TurboAssembler {
// which can later be patched (via {PatchPrepareStackFrame)} when the size of
// the frame is known.
inline int PrepareStackFrame();
inline void PatchPrepareStackFrame(int offset, uint32_t spill_size);
inline void PatchPrepareStackFrame(int offset, int spill_size);
inline void FinishCode();
inline void AbortCompilation();
inline static uint32_t SlotSizeForType(ValueType type);
inline static int SlotSizeForType(ValueType type);
inline static bool NeedsAlignment(ValueType type);
inline void LoadConstant(LiftoffRegister, WasmValue,
......@@ -446,13 +446,13 @@ class LiftoffAssembler : public TurboAssembler {
inline void Move(Register dst, Register src, ValueType);
inline void Move(DoubleRegister dst, DoubleRegister src, ValueType);
inline void Spill(uint32_t offset, LiftoffRegister, ValueType);
inline void Spill(uint32_t offset, WasmValue);
inline void Fill(LiftoffRegister, uint32_t offset, ValueType);
inline void Spill(int offset, LiftoffRegister, ValueType);
inline void Spill(int offset, WasmValue);
inline void Fill(LiftoffRegister, int offset, ValueType);
// Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
// 4 bytes on the stack holding half of a 64-bit value.
inline void FillI64Half(Register, uint32_t offset, RegPairHalf);
inline void FillStackSlotsWithZero(uint32_t start, uint32_t size);
inline void FillI64Half(Register, int offset, RegPairHalf);
inline void FillStackSlotsWithZero(int start, int size);
// i32 binops.
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
......@@ -690,12 +690,12 @@ class LiftoffAssembler : public TurboAssembler {
uint32_t num_locals() const { return num_locals_; }
void set_num_locals(uint32_t num_locals);
uint32_t GetTotalFrameSlotCount() const {
int GetTotalFrameSlotCount() const {
// TODO(zhin): Temporary for migration from index to offset.
return ((num_used_spill_bytes_ + kStackSlotSize - 1) / kStackSlotSize);
}
uint32_t GetTotalFrameSlotSize() const { return num_used_spill_bytes_; }
int GetTotalFrameSlotSize() const { return num_used_spill_bytes_; }
ValueType local_type(uint32_t index) {
DCHECK_GT(num_locals_, index);
......@@ -735,7 +735,7 @@ class LiftoffAssembler : public TurboAssembler {
static_assert(sizeof(ValueType) == 1,
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
uint32_t num_used_spill_bytes_ = 0;
int num_used_spill_bytes_ = 0;
LiftoffBailoutReason bailout_reason_ = kSuccess;
const char* bailout_detail_ = nullptr;
......
......@@ -511,7 +511,7 @@ class LiftoffCompiler {
for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
input_idx += ProcessParameter(__ local_type(param_idx), input_idx);
}
uint32_t params_size = __ TopSpillOffset();
int params_size = __ TopSpillOffset();
DCHECK_EQ(input_idx, descriptor_->InputCount());
// Initialize locals beyond parameters.
......@@ -521,7 +521,7 @@ class LiftoffCompiler {
ValueType type = decoder->GetLocalType(param_idx);
__ PushStack(type);
}
uint32_t spill_size = __ TopSpillOffset();
int spill_size = __ TopSpillOffset();
__ FillStackSlotsWithZero(params_size, spill_size);
} else {
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
......
......@@ -45,17 +45,17 @@ constexpr int32_t kHighWordOffset = 4;
// fp-4 holds the stack marker, fp-8 is the instance parameter, first stack
// slot is located at fp-8-offset.
constexpr int32_t kConstantStackSpace = 8;
constexpr int kConstantStackSpace = 8;
inline int GetStackSlotOffset(uint32_t offset) {
inline int GetStackSlotOffset(int offset) {
return kConstantStackSpace + offset;
}
inline MemOperand GetStackSlot(uint32_t offset) {
inline MemOperand GetStackSlot(int offset) {
return MemOperand(fp, -GetStackSlotOffset(offset));
}
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kConstantStackSpace - offset + half_offset);
......@@ -282,9 +282,8 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset;
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) {
uint32_t bytes = liftoff::kConstantStackSpace + spill_size;
DCHECK_LE(bytes, kMaxInt);
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
int bytes = liftoff::kConstantStackSpace + spill_size;
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
......@@ -301,7 +300,7 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {}
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) {
int LiftoffAssembler::SlotSizeForType(ValueType type) {
switch (type) {
case kWasmS128:
return ValueTypes::ElementSizeInBytes(type);
......@@ -556,8 +555,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
TurboAssembler::Move(dst, src);
}
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (type) {
......@@ -579,7 +577,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
}
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type()) {
......@@ -608,8 +606,7 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) {
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
MemOperand src = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
......@@ -630,12 +627,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
}
}
void LiftoffAssembler::FillI64Half(Register reg, uint32_t offset,
RegPairHalf half) {
void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) {
lw(reg, liftoff::GetHalfStackSlot(offset, half));
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) {
void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size);
DCHECK_EQ(0, size % 4);
RecordUsedSpillOffset(start + size);
......@@ -643,7 +639,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) {
if (size <= 48) {
// Special straight-line code for up to 12 words. Generates one
// instruction per word (<=12 instructions total).
for (uint32_t offset = 4; offset <= size; offset += 4) {
for (int offset = 4; offset <= size; offset += 4) {
Sw(zero_reg, liftoff::GetStackSlot(start + offset));
}
} else {
......
......@@ -41,13 +41,13 @@ namespace liftoff {
// fp-8 holds the stack marker, fp-16 is the instance parameter, first stack
// slot is located at fp-16-offset.
constexpr int32_t kConstantStackSpace = 16;
constexpr int kConstantStackSpace = 16;
inline int GetStackSlotOffset(uint32_t offset) {
inline int GetStackSlotOffset(int offset) {
return kConstantStackSpace + offset;
}
inline MemOperand GetStackSlot(uint32_t offset) {
inline MemOperand GetStackSlot(int offset) {
return MemOperand(fp, -GetStackSlotOffset(offset));
}
......@@ -240,9 +240,8 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset;
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) {
uint64_t bytes = liftoff::kConstantStackSpace + spill_size;
DCHECK_LE(bytes, kMaxInt);
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
int bytes = liftoff::kConstantStackSpace + spill_size;
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
......@@ -259,7 +258,7 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {}
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) {
int LiftoffAssembler::SlotSizeForType(ValueType type) {
switch (type) {
case kWasmS128:
return ValueTypes::ElementSizeInBytes(type);
......@@ -473,8 +472,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
TurboAssembler::Move(dst, src);
}
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (type) {
......@@ -495,7 +493,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
}
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type()) {
......@@ -518,8 +516,7 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) {
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
MemOperand src = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
......@@ -539,11 +536,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
}
}
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
UNREACHABLE();
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) {
void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size);
RecordUsedSpillOffset(start + size);
......
......@@ -39,11 +39,9 @@ namespace liftoff {
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
inline int GetStackSlotOffset(uint32_t offset) {
return kInstanceOffset + offset;
}
inline int GetStackSlotOffset(int offset) { return kInstanceOffset + offset; }
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kInstanceOffset - offset + half_offset);
......@@ -56,7 +54,7 @@ int LiftoffAssembler::PrepareStackFrame() {
return 0;
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) {
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame");
}
......@@ -64,7 +62,7 @@ void LiftoffAssembler::FinishCode() { EmitConstantPool(); }
void LiftoffAssembler::AbortCompilation() { FinishCode(); }
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) {
int LiftoffAssembler::SlotSizeForType(ValueType type) {
switch (type) {
case kWasmS128:
return ValueTypes::ElementSizeInBytes(type);
......@@ -147,25 +145,23 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
bailout(kUnsupportedArchitecture, "Move DoubleRegister");
}
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
bailout(kUnsupportedArchitecture, "Spill register");
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
void LiftoffAssembler::Spill(int offset, WasmValue value) {
bailout(kUnsupportedArchitecture, "Spill value");
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) {
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
bailout(kUnsupportedArchitecture, "Fill");
}
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
bailout(kUnsupportedArchitecture, "FillI64Half");
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) {
void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size);
DCHECK_EQ(0, size % 4);
RecordUsedSpillOffset(start + size);
......@@ -178,7 +174,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) {
if (size <= 36) {
// Special straight-line code for up to nine words. Generates one
// instruction per word.
for (uint32_t offset = 4; offset <= size; offset += 4) {
for (int offset = 4; offset <= size; offset += 4) {
StoreP(r0, liftoff::GetHalfStackSlot(start + offset, kLowWord));
}
} else {
......
......@@ -38,11 +38,9 @@ namespace liftoff {
//
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
inline int GetStackSlotOffset(uint32_t offset) {
return kInstanceOffset + offset;
}
inline int GetStackSlotOffset(int offset) { return kInstanceOffset + offset; }
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kInstanceOffset - offset + half_offset);
......@@ -55,7 +53,7 @@ int LiftoffAssembler::PrepareStackFrame() {
return 0;
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) {
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame");
}
......@@ -63,7 +61,7 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {}
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) {
int LiftoffAssembler::SlotSizeForType(ValueType type) {
switch (type) {
case kWasmS128:
return ValueTypes::ElementSizeInBytes(type);
......@@ -146,25 +144,23 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
bailout(kUnsupportedArchitecture, "Move DoubleRegister");
}
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
bailout(kUnsupportedArchitecture, "Spill register");
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
void LiftoffAssembler::Spill(int offset, WasmValue value) {
bailout(kUnsupportedArchitecture, "Spill value");
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) {
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
bailout(kUnsupportedArchitecture, "Fill");
}
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
bailout(kUnsupportedArchitecture, "FillI64Half");
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) {
void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size);
RecordUsedSpillOffset(start + size);
......
......@@ -35,9 +35,9 @@ static_assert((kLiftoffAssemblerFpCacheRegs &
// rbp-8 holds the stack marker, rbp-16 is the instance parameter, first stack
// slot is located at rbp-16-offset.
constexpr int32_t kConstantStackSpace = 16;
constexpr int kConstantStackSpace = 16;
inline Operand GetStackSlot(uint32_t offset) {
inline Operand GetStackSlot(int offset) {
return Operand(rbp, -kConstantStackSpace - offset);
}
......@@ -136,9 +136,8 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset;
}
void LiftoffAssembler::PatchPrepareStackFrame(int offset, uint32_t spill_size) {
uint32_t bytes = liftoff::kConstantStackSpace + spill_size;
DCHECK_LE(bytes, kMaxInt);
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
int bytes = liftoff::kConstantStackSpace + spill_size;
// Need to align sp to system pointer size.
bytes = RoundUp(bytes, kSystemPointerSize);
// We can't run out of space, just pass anything big enough to not cause the
......@@ -177,7 +176,7 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {}
uint32_t LiftoffAssembler::SlotSizeForType(ValueType type) {
int LiftoffAssembler::SlotSizeForType(ValueType type) {
return ValueTypes::ElementSizeInBytes(type);
}
......@@ -382,8 +381,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
}
}
void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
ValueType type) {
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (type) {
......@@ -404,7 +402,7 @@ void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg,
}
}
void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (value.type()) {
......@@ -431,8 +429,7 @@ void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) {
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
ValueType type) {
void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
Operand src = liftoff::GetStackSlot(offset);
switch (type) {
case kWasmI32:
......@@ -452,11 +449,11 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset,
}
}
void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) {
void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
UNREACHABLE();
}
void LiftoffAssembler::FillStackSlotsWithZero(uint32_t start, uint32_t size) {
void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size);
RecordUsedSpillOffset(start + size);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment