Commit aaf5c124 authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

[liftoff] Convert callers to use offset

This patch changes many callers of GetStackOffsetFromIndex to directly
use the offset that is stored in the VarState (and other structures).

The tricky part here is that in all archs, GetStackSlotOffset no longer
relies on kFirstStackSlotOffset, because the offset stored in VarState
is relative to the constant space (instance offset), and not offset of
the first stack slot.

For example, for slot 0, the offset was also 0, because it was relative
to the first stack slot offset (which in x64 is fp-24). With this
change, the offset of slot 0 is now 8, but since GetStackSlotOffset is
relative to fp-16, it ends up being fp-24 still.

Because of this change, callers of GetStackOffsetFromIndex need to add
1 to whatever index they were passing. Instead of doing that, we change
GetStackOffsetFromIndex to add 1 inside the body.

After this change, the only callers of GetStackOffsetFromIndex will be
inside of FillStackSlotsWithZero, because they still rely on index to
keep track of how many params were processed, and also how many locals
there are in order to zero those slots, and these is relied on by
RecordUsedSpillSlot to allocate sufficient stack space.

Bug: v8:9909
Change-Id: I52aa4572950565a39e9395192706a9934ac296d4
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1925524
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65109}
parent cfab6505
...@@ -38,8 +38,6 @@ namespace liftoff { ...@@ -38,8 +38,6 @@ namespace liftoff {
static_assert(2 * kSystemPointerSize == LiftoffAssembler::kStackSlotSize, static_assert(2 * kSystemPointerSize == LiftoffAssembler::kStackSlotSize,
"Slot size should be twice the size of the 32 bit pointer."); "Slot size should be twice the size of the 32 bit pointer.");
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize; constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset =
kInstanceOffset + 2 * kSystemPointerSize;
constexpr int32_t kConstantStackSpace = kSystemPointerSize; constexpr int32_t kConstantStackSpace = kSystemPointerSize;
// kPatchInstructionsRequired sets a maximum limit of how many instructions that // kPatchInstructionsRequired sets a maximum limit of how many instructions that
// PatchPrepareStackFrame will use in order to increase the stack appropriately. // PatchPrepareStackFrame will use in order to increase the stack appropriately.
...@@ -47,7 +45,7 @@ constexpr int32_t kConstantStackSpace = kSystemPointerSize; ...@@ -47,7 +45,7 @@ constexpr int32_t kConstantStackSpace = kSystemPointerSize;
constexpr int32_t kPatchInstructionsRequired = 3; constexpr int32_t kPatchInstructionsRequired = 3;
inline int GetStackSlotOffset(uint32_t offset) { inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset; return kInstanceOffset + offset;
} }
inline MemOperand GetStackSlot(uint32_t offset) { inline MemOperand GetStackSlot(uint32_t offset) {
...@@ -57,7 +55,7 @@ inline MemOperand GetStackSlot(uint32_t offset) { ...@@ -57,7 +55,7 @@ inline MemOperand GetStackSlot(uint32_t offset) {
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kFirstStackSlotOffset - offset + half_offset); return MemOperand(fp, -kInstanceOffset - offset + half_offset);
} }
inline MemOperand GetInstanceOperand() { inline MemOperand GetInstanceOperand() {
......
...@@ -40,11 +40,10 @@ namespace liftoff { ...@@ -40,11 +40,10 @@ namespace liftoff {
// //
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize; constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + kSystemPointerSize;
constexpr int32_t kConstantStackSpace = 0; constexpr int32_t kConstantStackSpace = 0;
inline int GetStackSlotOffset(uint32_t offset) { inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset; return kInstanceOffset + offset;
} }
inline MemOperand GetStackSlot(uint32_t offset) { inline MemOperand GetStackSlot(uint32_t offset) {
......
...@@ -21,19 +21,17 @@ namespace wasm { ...@@ -21,19 +21,17 @@ namespace wasm {
namespace liftoff { namespace liftoff {
// ebp-4 holds the stack marker, ebp-8 is the instance parameter, first stack // ebp-4 holds the stack marker, ebp-8 is the instance parameter, first stack
// slot is located at ebp-16. // slot is located at ebp-8-offset.
constexpr int32_t kConstantStackSpace = 8; constexpr int32_t kConstantStackSpace = 8;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline Operand GetStackSlot(uint32_t offset) { inline Operand GetStackSlot(uint32_t offset) {
return Operand(ebp, -kFirstStackSlotOffset - offset); return Operand(ebp, -kConstantStackSpace - offset);
} }
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return Operand(ebp, -kFirstStackSlotOffset - offset + half_offset); return Operand(ebp, -kConstantStackSpace - offset + half_offset);
} }
// TODO(clemensb): Make this a constexpr variable once Operand is constexpr. // TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
......
...@@ -91,23 +91,18 @@ class StackTransferRecipe { ...@@ -91,23 +91,18 @@ class StackTransferRecipe {
switch (src.loc()) { switch (src.loc()) {
case VarState::kStack: case VarState::kStack:
if (src_index == dst_index) break; if (src_index == dst_index) break;
asm_->MoveStackValue(asm_->GetStackOffsetFromIndex(dst_index), asm_->MoveStackValue(dst.offset(), src.offset(), src.type());
asm_->GetStackOffsetFromIndex(src_index),
src.type());
break; break;
case VarState::kRegister: case VarState::kRegister:
asm_->Spill(asm_->GetStackOffsetFromIndex(dst_index), src.reg(), asm_->Spill(dst.offset(), src.reg(), src.type());
src.type());
break; break;
case VarState::kIntConst: case VarState::kIntConst:
asm_->Spill(asm_->GetStackOffsetFromIndex(dst_index), asm_->Spill(dst.offset(), src.constant());
src.constant());
break; break;
} }
break; break;
case VarState::kRegister: case VarState::kRegister:
LoadIntoRegister(dst.reg(), src, LoadIntoRegister(dst.reg(), src, src.offset());
asm_->GetStackOffsetFromIndex(src_index));
break; break;
case VarState::kIntConst: case VarState::kIntConst:
DCHECK_EQ(dst, src); DCHECK_EQ(dst, src);
...@@ -293,18 +288,19 @@ class StackTransferRecipe { ...@@ -293,18 +288,19 @@ class StackTransferRecipe {
// All remaining moves are parts of a cycle. Just spill the first one, then // All remaining moves are parts of a cycle. Just spill the first one, then
// process all remaining moves in that cycle. Repeat for all cycles. // process all remaining moves in that cycle. Repeat for all cycles.
uint32_t next_spill_slot = asm_->cache_state()->stack_height(); uint32_t last_spill_offset =
(asm_->cache_state()->stack_state.empty()
? 0
: asm_->cache_state()->stack_state.back().offset());
while (!move_dst_regs_.is_empty()) { while (!move_dst_regs_.is_empty()) {
// TODO(clemensb): Use an unused register if available. // TODO(clemensb): Use an unused register if available.
LiftoffRegister dst = move_dst_regs_.GetFirstRegSet(); LiftoffRegister dst = move_dst_regs_.GetFirstRegSet();
RegisterMove* move = register_move(dst); RegisterMove* move = register_move(dst);
last_spill_offset += LiftoffAssembler::SlotSizeForType(move->type);
LiftoffRegister spill_reg = move->src; LiftoffRegister spill_reg = move->src;
asm_->Spill(LiftoffAssembler::GetStackOffsetFromIndex(next_spill_slot), asm_->Spill(last_spill_offset, spill_reg, move->type);
spill_reg, move->type);
// Remember to reload into the destination register later. // Remember to reload into the destination register later.
LoadStackSlot(dst, asm_->GetStackOffsetFromIndex(next_spill_slot), LoadStackSlot(dst, last_spill_offset, move->type);
move->type);
++next_spill_slot;
ClearExecutedMove(dst); ClearExecutedMove(dst);
} }
} }
...@@ -504,8 +500,7 @@ LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) { ...@@ -504,8 +500,7 @@ LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
case VarState::kStack: { case VarState::kStack: {
LiftoffRegister reg = LiftoffRegister reg =
GetUnusedRegister(reg_class_for(slot.type()), pinned); GetUnusedRegister(reg_class_for(slot.type()), pinned);
Fill(reg, GetStackOffsetFromIndex(cache_state_.stack_height()), Fill(reg, slot.offset(), slot.type());
slot.type());
return reg; return reg;
} }
case VarState::kRegister: case VarState::kRegister:
...@@ -562,11 +557,11 @@ void LiftoffAssembler::Spill(uint32_t index) { ...@@ -562,11 +557,11 @@ void LiftoffAssembler::Spill(uint32_t index) {
case VarState::kStack: case VarState::kStack:
return; return;
case VarState::kRegister: case VarState::kRegister:
Spill(GetStackOffsetFromIndex(index), slot.reg(), slot.type()); Spill(slot.offset(), slot.reg(), slot.type());
cache_state_.dec_used(slot.reg()); cache_state_.dec_used(slot.reg());
break; break;
case VarState::kIntConst: case VarState::kIntConst:
Spill(GetStackOffsetFromIndex(index), slot.constant()); Spill(slot.offset(), slot.constant());
break; break;
} }
slot.MakeStack(); slot.MakeStack();
...@@ -582,7 +577,7 @@ void LiftoffAssembler::SpillAllRegisters() { ...@@ -582,7 +577,7 @@ void LiftoffAssembler::SpillAllRegisters() {
for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) { for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
auto& slot = cache_state_.stack_state[i]; auto& slot = cache_state_.stack_state[i];
if (!slot.is_reg()) continue; if (!slot.is_reg()) continue;
Spill(GetStackOffsetFromIndex(i), slot.reg(), slot.type()); Spill(slot.offset(), slot.reg(), slot.type());
slot.MakeStack(); slot.MakeStack();
} }
cache_state_.reset_used_registers(); cache_state_.reset_used_registers();
...@@ -602,7 +597,7 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig, ...@@ -602,7 +597,7 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
idx < end; ++idx) { idx < end; ++idx) {
VarState& slot = cache_state_.stack_state[idx]; VarState& slot = cache_state_.stack_state[idx];
if (!slot.is_reg()) continue; if (!slot.is_reg()) continue;
Spill(GetStackOffsetFromIndex(idx), slot.reg(), slot.type()); Spill(slot.offset(), slot.reg(), slot.type());
slot.MakeStack(); slot.MakeStack();
} }
...@@ -636,8 +631,8 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig, ...@@ -636,8 +631,8 @@ void LiftoffAssembler::PrepareCall(FunctionSig* sig,
const bool is_pair = kNeedI64RegPair && type == kWasmI64; const bool is_pair = kNeedI64RegPair && type == kWasmI64;
const int num_lowered_params = is_pair ? 2 : 1; const int num_lowered_params = is_pair ? 2 : 1;
const uint32_t stack_idx = param_base + param; const uint32_t stack_idx = param_base + param;
const uint32_t stack_offset = GetStackOffsetFromIndex(stack_idx);
const VarState& slot = cache_state_.stack_state[stack_idx]; const VarState& slot = cache_state_.stack_state[stack_idx];
const uint32_t stack_offset = slot.offset();
// Process both halfs of a register pair separately, because they are passed // Process both halfs of a register pair separately, because they are passed
// as separate parameters. One or both of them could end up on the stack. // as separate parameters. One or both of them could end up on the stack.
for (int lowered_idx = 0; lowered_idx < num_lowered_params; ++lowered_idx) { for (int lowered_idx = 0; lowered_idx < num_lowered_params; ++lowered_idx) {
...@@ -775,9 +770,8 @@ void LiftoffAssembler::MoveToReturnRegisters(FunctionSig* sig) { ...@@ -775,9 +770,8 @@ void LiftoffAssembler::MoveToReturnRegisters(FunctionSig* sig) {
: reg_class_for(return_type) == kGpReg : reg_class_for(return_type) == kGpReg
? LiftoffRegister(kGpReturnRegisters[0]) ? LiftoffRegister(kGpReturnRegisters[0])
: LiftoffRegister(kFpReturnRegisters[0]); : LiftoffRegister(kFpReturnRegisters[0]);
stack_transfers.LoadIntoRegister( stack_transfers.LoadIntoRegister(return_reg, cache_state_.stack_state.back(),
return_reg, cache_state_.stack_state.back(), cache_state_.stack_state.back().offset());
GetStackOffsetFromIndex(cache_state_.stack_height() - 1));
} }
#ifdef ENABLE_SLOW_DCHECKS #ifdef ENABLE_SLOW_DCHECKS
...@@ -831,7 +825,7 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) { ...@@ -831,7 +825,7 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
cache_state_.dec_used(slot->reg().low()); cache_state_.dec_used(slot->reg().low());
cache_state_.dec_used(slot->reg().high()); cache_state_.dec_used(slot->reg().high());
} }
Spill(GetStackOffsetFromIndex(idx), slot->reg(), slot->type()); Spill(slot->offset(), slot->reg(), slot->type());
slot->MakeStack(); slot->MakeStack();
if (--remaining_uses == 0) break; if (--remaining_uses == 0) break;
} }
......
...@@ -45,7 +45,11 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -45,7 +45,11 @@ class LiftoffAssembler : public TurboAssembler {
// TODO(zhin): Temporary for migration from index to offset. // TODO(zhin): Temporary for migration from index to offset.
inline static uint32_t GetStackOffsetFromIndex(uint32_t index) { inline static uint32_t GetStackOffsetFromIndex(uint32_t index) {
return index * LiftoffAssembler::kStackSlotSize; // The idea of a stack offset changed from being relative to first stack
// offset (instance offset + kStackSlotSize), to being relative to instance
// offset. So the stack offset of a particular index needs to take into
// account the size of the first slot.
return (index + 1) * LiftoffAssembler::kStackSlotSize;
} }
class VarState { class VarState {
......
...@@ -1292,7 +1292,7 @@ class LiftoffCompiler { ...@@ -1292,7 +1292,7 @@ class LiftoffCompiler {
case kStack: { case kStack: {
auto rc = reg_class_for(imm.type); auto rc = reg_class_for(imm.type);
LiftoffRegister reg = __ GetUnusedRegister(rc); LiftoffRegister reg = __ GetUnusedRegister(rc);
__ Fill(reg, __ GetStackOffsetFromIndex(imm.index), imm.type); __ Fill(reg, slot.offset(), imm.type);
__ PushRegister(slot.type(), reg); __ PushRegister(slot.type(), reg);
break; break;
} }
...@@ -1302,12 +1302,12 @@ class LiftoffCompiler { ...@@ -1302,12 +1302,12 @@ class LiftoffCompiler {
void LocalSetFromStackSlot(LiftoffAssembler::VarState* dst_slot, void LocalSetFromStackSlot(LiftoffAssembler::VarState* dst_slot,
uint32_t local_index) { uint32_t local_index) {
auto& state = *__ cache_state(); auto& state = *__ cache_state();
auto& src_slot = state.stack_state.back();
ValueType type = dst_slot->type(); ValueType type = dst_slot->type();
if (dst_slot->is_reg()) { if (dst_slot->is_reg()) {
LiftoffRegister slot_reg = dst_slot->reg(); LiftoffRegister slot_reg = dst_slot->reg();
if (state.get_use_count(slot_reg) == 1) { if (state.get_use_count(slot_reg) == 1) {
__ Fill(dst_slot->reg(), __ Fill(dst_slot->reg(), src_slot.offset(), type);
__ GetStackOffsetFromIndex(state.stack_height() - 1), type);
return; return;
} }
state.dec_used(slot_reg); state.dec_used(slot_reg);
...@@ -1316,9 +1316,7 @@ class LiftoffCompiler { ...@@ -1316,9 +1316,7 @@ class LiftoffCompiler {
DCHECK_EQ(type, __ local_type(local_index)); DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type); RegClass rc = reg_class_for(type);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc); LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
__ Fill(dst_reg, __ Fill(dst_reg, src_slot.offset(), type);
__ GetStackOffsetFromIndex(__ cache_state()->stack_height() - 1),
type);
*dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset()); *dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset());
__ cache_state()->inc_used(dst_reg); __ cache_state()->inc_used(dst_reg);
} }
......
...@@ -44,13 +44,11 @@ constexpr int32_t kHighWordOffset = 4; ...@@ -44,13 +44,11 @@ constexpr int32_t kHighWordOffset = 4;
#endif #endif
// fp-4 holds the stack marker, fp-8 is the instance parameter, first stack // fp-4 holds the stack marker, fp-8 is the instance parameter, first stack
// slot is located at fp-16. // slot is located at fp-8-offset.
constexpr int32_t kConstantStackSpace = 8; constexpr int32_t kConstantStackSpace = 8;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline int GetStackSlotOffset(uint32_t offset) { inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset; return kConstantStackSpace + offset;
} }
inline MemOperand GetStackSlot(uint32_t offset) { inline MemOperand GetStackSlot(uint32_t offset) {
...@@ -60,7 +58,7 @@ inline MemOperand GetStackSlot(uint32_t offset) { ...@@ -60,7 +58,7 @@ inline MemOperand GetStackSlot(uint32_t offset) {
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kFirstStackSlotOffset - offset + half_offset); return MemOperand(fp, -kConstantStackSpace - offset + half_offset);
} }
inline MemOperand GetInstanceOperand() { return MemOperand(fp, -8); } inline MemOperand GetInstanceOperand() { return MemOperand(fp, -8); }
......
...@@ -40,13 +40,11 @@ namespace liftoff { ...@@ -40,13 +40,11 @@ namespace liftoff {
// //
// fp-8 holds the stack marker, fp-16 is the instance parameter, first stack // fp-8 holds the stack marker, fp-16 is the instance parameter, first stack
// slot is located at fp-24. // slot is located at fp-16-offset.
constexpr int32_t kConstantStackSpace = 16; constexpr int32_t kConstantStackSpace = 16;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline int GetStackSlotOffset(uint32_t offset) { inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset; return kConstantStackSpace + offset;
} }
inline MemOperand GetStackSlot(uint32_t offset) { inline MemOperand GetStackSlot(uint32_t offset) {
......
...@@ -38,17 +38,15 @@ namespace liftoff { ...@@ -38,17 +38,15 @@ namespace liftoff {
// //
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize; constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset =
kInstanceOffset + 2 * kSystemPointerSize;
inline int GetStackSlotOffset(uint32_t offset) { inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset; return kInstanceOffset + offset;
} }
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kFirstStackSlotOffset - offset + half_offset); return MemOperand(fp, -kInstanceOffset - offset + half_offset);
} }
} // namespace liftoff } // namespace liftoff
......
...@@ -37,17 +37,15 @@ namespace liftoff { ...@@ -37,17 +37,15 @@ namespace liftoff {
// -----+--------------------+ <-- stack ptr (sp) // -----+--------------------+ <-- stack ptr (sp)
// //
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize; constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
constexpr int32_t kFirstStackSlotOffset =
kInstanceOffset + 2 * kSystemPointerSize;
inline int GetStackSlotOffset(uint32_t offset) { inline int GetStackSlotOffset(uint32_t offset) {
return kFirstStackSlotOffset + offset; return kInstanceOffset + offset;
} }
inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(uint32_t offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kFirstStackSlotOffset - offset + half_offset); return MemOperand(fp, -kInstanceOffset - offset + half_offset);
} }
} // namespace liftoff } // namespace liftoff
......
...@@ -34,13 +34,11 @@ static_assert((kLiftoffAssemblerFpCacheRegs & ...@@ -34,13 +34,11 @@ static_assert((kLiftoffAssemblerFpCacheRegs &
"scratch registers must not be used as cache registers"); "scratch registers must not be used as cache registers");
// rbp-8 holds the stack marker, rbp-16 is the instance parameter, first stack // rbp-8 holds the stack marker, rbp-16 is the instance parameter, first stack
// slot is located at rbp-24. // slot is located at rbp-16-offset.
constexpr int32_t kConstantStackSpace = 16; constexpr int32_t kConstantStackSpace = 16;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline Operand GetStackSlot(uint32_t offset) { inline Operand GetStackSlot(uint32_t offset) {
return Operand(rbp, -kFirstStackSlotOffset - offset); return Operand(rbp, -kConstantStackSpace - offset);
} }
// TODO(clemensb): Make this a constexpr variable once Operand is constexpr. // TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment