Commit 7ddc3f66 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[Liftoff] Include static frame offset in spill offsets

This saves the addition when accessing the stack slot, and (more
importantly) will make it easier to access the stack slot for debugging,
since there is no platform-specific constant to be added any more.

R=zhin@chromium.org

Bug: v8:10019
Change-Id: I3eaf1838b78c2b7b343a435d7c8a32e7e71508ed
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1998082Reviewed-by: 's avatarZhi An Ng <zhin@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65749}
parent f28cc379
...@@ -38,22 +38,17 @@ namespace liftoff { ...@@ -38,22 +38,17 @@ namespace liftoff {
static_assert(2 * kSystemPointerSize == LiftoffAssembler::kStackSlotSize, static_assert(2 * kSystemPointerSize == LiftoffAssembler::kStackSlotSize,
"Slot size should be twice the size of the 32 bit pointer."); "Slot size should be twice the size of the 32 bit pointer.");
constexpr int kInstanceOffset = 2 * kSystemPointerSize; constexpr int kInstanceOffset = 2 * kSystemPointerSize;
constexpr int kConstantStackSpace = kSystemPointerSize;
// kPatchInstructionsRequired sets a maximum limit of how many instructions that // kPatchInstructionsRequired sets a maximum limit of how many instructions that
// PatchPrepareStackFrame will use in order to increase the stack appropriately. // PatchPrepareStackFrame will use in order to increase the stack appropriately.
// Three instructions are required to sub a large constant, movw + movt + sub. // Three instructions are required to sub a large constant, movw + movt + sub.
constexpr int32_t kPatchInstructionsRequired = 3; constexpr int32_t kPatchInstructionsRequired = 3;
inline int GetStackSlotOffset(int offset) { return kInstanceOffset + offset; } inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
inline MemOperand GetStackSlot(int offset) {
return MemOperand(fp, -GetStackSlotOffset(offset));
}
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kInstanceOffset - offset + half_offset); return MemOperand(fp, -offset + half_offset);
} }
inline MemOperand GetInstanceOperand() { inline MemOperand GetInstanceOperand() {
...@@ -240,14 +235,12 @@ int LiftoffAssembler::PrepareStackFrame() { ...@@ -240,14 +235,12 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset; return offset;
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
// Allocate space for instance plus what is needed for the frame slots.
int bytes = liftoff::kConstantStackSpace + spill_size;
#ifdef USE_SIMULATOR #ifdef USE_SIMULATOR
// When using the simulator, deal with Liftoff which allocates the stack // When using the simulator, deal with Liftoff which allocates the stack
// before checking it. // before checking it.
// TODO(arm): Remove this when the stack check mechanism will be updated. // TODO(arm): Remove this when the stack check mechanism will be updated.
if (bytes > KB / 2) { if (frame_size > KB / 2) {
bailout(kOtherReason, bailout(kOtherReason,
"Stack limited to 512 bytes to avoid a bug in StackCheck"); "Stack limited to 512 bytes to avoid a bug in StackCheck");
return; return;
...@@ -257,7 +250,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { ...@@ -257,7 +250,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
buffer_start_ + offset, buffer_start_ + offset,
liftoff::kPatchInstructionsRequired); liftoff::kPatchInstructionsRequired);
#if V8_OS_WIN #if V8_OS_WIN
if (bytes > kStackPageSize) { if (frame_size > kStackPageSize) {
// Generate OOL code (at the end of the function, where the current // Generate OOL code (at the end of the function, where the current
// assembler is pointing) to do the explicit stack limit check (see // assembler is pointing) to do the explicit stack limit check (see
// https://docs.microsoft.com/en-us/previous-versions/visualstudio/ // https://docs.microsoft.com/en-us/previous-versions/visualstudio/
...@@ -278,7 +271,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { ...@@ -278,7 +271,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
return; return;
} }
#endif #endif
patching_assembler.sub(sp, sp, Operand(bytes)); patching_assembler.sub(sp, sp, Operand(frame_size));
patching_assembler.PadWithNops(); patching_assembler.PadWithNops();
} }
...@@ -286,6 +279,11 @@ void LiftoffAssembler::FinishCode() { CheckConstPool(true, false); } ...@@ -286,6 +279,11 @@ void LiftoffAssembler::FinishCode() { CheckConstPool(true, false); }
void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); } void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
int LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueType type) {
switch (type) { switch (type) {
case kWasmS128: case kWasmS128:
...@@ -682,8 +680,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { ...@@ -682,8 +680,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
// Use r1 for start address (inclusive), r2 for end address (exclusive). // Use r1 for start address (inclusive), r2 for end address (exclusive).
push(r1); push(r1);
push(r2); push(r2);
sub(r1, fp, Operand(liftoff::GetStackSlotOffset(start + size))); sub(r1, fp, Operand(start + size));
sub(r2, fp, Operand(liftoff::GetStackSlotOffset(start))); sub(r2, fp, Operand(start));
Label loop; Label loop;
bind(&loop); bind(&loop);
......
...@@ -40,13 +40,8 @@ namespace liftoff { ...@@ -40,13 +40,8 @@ namespace liftoff {
// //
constexpr int kInstanceOffset = 2 * kSystemPointerSize; constexpr int kInstanceOffset = 2 * kSystemPointerSize;
constexpr int kConstantStackSpace = 0;
inline int GetStackSlotOffset(int offset) { return kInstanceOffset + offset; } inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
inline MemOperand GetStackSlot(int offset) {
return MemOperand(fp, -GetStackSlotOffset(offset));
}
inline MemOperand GetInstanceOperand() { inline MemOperand GetInstanceOperand() {
return MemOperand(fp, -kInstanceOffset); return MemOperand(fp, -kInstanceOffset);
...@@ -118,19 +113,18 @@ int LiftoffAssembler::PrepareStackFrame() { ...@@ -118,19 +113,18 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset; return offset;
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
static_assert(kStackSlotSize == kXRegSize, static_assert(kStackSlotSize == kXRegSize,
"kStackSlotSize must equal kXRegSize"); "kStackSlotSize must equal kXRegSize");
int bytes = liftoff::kConstantStackSpace + spill_size;
// The stack pointer is required to be quadword aligned. // The stack pointer is required to be quadword aligned.
// Misalignment will cause a stack alignment fault. // Misalignment will cause a stack alignment fault.
bytes = RoundUp(bytes, kQuadWordSizeInBytes); frame_size = RoundUp(frame_size, kQuadWordSizeInBytes);
if (!IsImmAddSub(bytes)) { if (!IsImmAddSub(frame_size)) {
// Round the stack to a page to try to fit a add/sub immediate. // Round the stack to a page to try to fit a add/sub immediate.
bytes = RoundUp(bytes, 0x1000); frame_size = RoundUp(frame_size, 0x1000);
if (!IsImmAddSub(bytes)) { if (!IsImmAddSub(frame_size)) {
// Stack greater than 4M! Because this is a quite improbable case, we // Stack greater than 4M! Because this is a quite improbable case, we
// just fallback to Turbofan. // just fallback to TurboFan.
bailout(kOtherReason, "Stack too big"); bailout(kOtherReason, "Stack too big");
return; return;
} }
...@@ -139,7 +133,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { ...@@ -139,7 +133,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
// When using the simulator, deal with Liftoff which allocates the stack // When using the simulator, deal with Liftoff which allocates the stack
// before checking it. // before checking it.
// TODO(arm): Remove this when the stack check mechanism will be updated. // TODO(arm): Remove this when the stack check mechanism will be updated.
if (bytes > KB / 2) { if (frame_size > KB / 2) {
bailout(kOtherReason, bailout(kOtherReason,
"Stack limited to 512 bytes to avoid a bug in StackCheck"); "Stack limited to 512 bytes to avoid a bug in StackCheck");
return; return;
...@@ -148,7 +142,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { ...@@ -148,7 +142,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
PatchingAssembler patching_assembler(AssemblerOptions{}, PatchingAssembler patching_assembler(AssemblerOptions{},
buffer_start_ + offset, 1); buffer_start_ + offset, 1);
#if V8_OS_WIN #if V8_OS_WIN
if (bytes > kStackPageSize) { if (frame_size > kStackPageSize) {
// Generate OOL code (at the end of the function, where the current // Generate OOL code (at the end of the function, where the current
// assembler is pointing) to do the explicit stack limit check (see // assembler is pointing) to do the explicit stack limit check (see
// https://docs.microsoft.com/en-us/previous-versions/visualstudio/ // https://docs.microsoft.com/en-us/previous-versions/visualstudio/
...@@ -159,7 +153,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { ...@@ -159,7 +153,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
patching_assembler.b(ool_offset >> kInstrSizeLog2); patching_assembler.b(ool_offset >> kInstrSizeLog2);
// Now generate the OOL code. // Now generate the OOL code.
Claim(bytes, 1); Claim(frame_size, 1);
// Jump back to the start of the function (from {pc_offset()} to {offset + // Jump back to the start of the function (from {pc_offset()} to {offset +
// kInstrSize}). // kInstrSize}).
int func_start_offset = offset + kInstrSize - pc_offset(); int func_start_offset = offset + kInstrSize - pc_offset();
...@@ -167,13 +161,18 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { ...@@ -167,13 +161,18 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
return; return;
} }
#endif #endif
patching_assembler.PatchSubSp(bytes); patching_assembler.PatchSubSp(frame_size);
} }
void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); } void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); }
void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); } void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
int LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueType type) {
// TODO(zhin): Unaligned access typically take additional cycles, we should do // TODO(zhin): Unaligned access typically take additional cycles, we should do
// some performance testing to see how big an effect it will take. // some performance testing to see how big an effect it will take.
...@@ -423,7 +422,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { ...@@ -423,7 +422,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_EQ(0, size % 4); DCHECK_EQ(0, size % 4);
RecordUsedSpillOffset(start + size); RecordUsedSpillOffset(start + size);
int max_stp_offset = -liftoff::GetStackSlotOffset(start + size); int max_stp_offset = -start - size;
if (size <= 12 * kStackSlotSize && if (size <= 12 * kStackSlotSize &&
IsImmLSPair(max_stp_offset, kXRegSizeLog2)) { IsImmLSPair(max_stp_offset, kXRegSizeLog2)) {
// Special straight-line code for up to 12 slots. Generates one // Special straight-line code for up to 12 slots. Generates one
...@@ -456,7 +455,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { ...@@ -456,7 +455,7 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register address_reg = temps.AcquireX(); Register address_reg = temps.AcquireX();
// This {Sub} might use another temp register if the offset is too large. // This {Sub} might use another temp register if the offset is too large.
Sub(address_reg, fp, liftoff::GetStackSlotOffset(start + size)); Sub(address_reg, fp, start + size);
Register count_reg = temps.AcquireX(); Register count_reg = temps.AcquireX();
Mov(count_reg, size / 4); Mov(count_reg, size / 4);
......
...@@ -24,14 +24,12 @@ namespace liftoff { ...@@ -24,14 +24,12 @@ namespace liftoff {
// slot is located at ebp-8-offset. // slot is located at ebp-8-offset.
constexpr int kConstantStackSpace = 8; constexpr int kConstantStackSpace = 8;
inline Operand GetStackSlot(int offset) { inline Operand GetStackSlot(int offset) { return Operand(ebp, -offset); }
return Operand(ebp, -kConstantStackSpace - offset);
}
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return Operand(ebp, -kConstantStackSpace - offset + half_offset); return Operand(ebp, -offset + half_offset);
} }
// TODO(clemensb): Make this a constexpr variable once Operand is constexpr. // TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
...@@ -153,9 +151,8 @@ int LiftoffAssembler::PrepareStackFrame() { ...@@ -153,9 +151,8 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset; return offset;
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
int bytes = liftoff::kConstantStackSpace + spill_size; DCHECK_EQ(frame_size % kSystemPointerSize, 0);
DCHECK_EQ(bytes % kSystemPointerSize, 0);
// We can't run out of space, just pass anything big enough to not cause the // We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer. // assembler to try to grow the buffer.
constexpr int kAvailableSpace = 64; constexpr int kAvailableSpace = 64;
...@@ -163,7 +160,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { ...@@ -163,7 +160,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
AssemblerOptions{}, AssemblerOptions{},
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
#if V8_OS_WIN #if V8_OS_WIN
if (bytes > kStackPageSize) { if (frame_size > kStackPageSize) {
// Generate OOL code (at the end of the function, where the current // Generate OOL code (at the end of the function, where the current
// assembler is pointing) to do the explicit stack limit check (see // assembler is pointing) to do the explicit stack limit check (see
// https://docs.microsoft.com/en-us/previous-versions/visualstudio/ // https://docs.microsoft.com/en-us/previous-versions/visualstudio/
...@@ -177,7 +174,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { ...@@ -177,7 +174,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
patching_assembler.pc_offset()); patching_assembler.pc_offset());
// Now generate the OOL code. // Now generate the OOL code.
AllocateStackSpace(bytes); AllocateStackSpace(frame_size);
// Jump back to the start of the function (from {pc_offset()} to {offset + // Jump back to the start of the function (from {pc_offset()} to {offset +
// kSubSpSize}). // kSubSpSize}).
int func_start_offset = offset + liftoff::kSubSpSize - pc_offset(); int func_start_offset = offset + liftoff::kSubSpSize - pc_offset();
...@@ -185,7 +182,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { ...@@ -185,7 +182,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
return; return;
} }
#endif #endif
patching_assembler.sub_sp_32(bytes); patching_assembler.sub_sp_32(frame_size);
DCHECK_EQ(liftoff::kSubSpSize, patching_assembler.pc_offset()); DCHECK_EQ(liftoff::kSubSpSize, patching_assembler.pc_offset());
} }
...@@ -193,6 +190,11 @@ void LiftoffAssembler::FinishCode() {} ...@@ -193,6 +190,11 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {} void LiftoffAssembler::AbortCompilation() {}
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kConstantStackSpace;
}
int LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueType type) {
return ValueTypes::ElementSizeInBytes(type); return ValueTypes::ElementSizeInBytes(type);
} }
......
...@@ -282,10 +282,7 @@ class StackTransferRecipe { ...@@ -282,10 +282,7 @@ class StackTransferRecipe {
// All remaining moves are parts of a cycle. Just spill the first one, then // All remaining moves are parts of a cycle. Just spill the first one, then
// process all remaining moves in that cycle. Repeat for all cycles. // process all remaining moves in that cycle. Repeat for all cycles.
uint32_t last_spill_offset = int last_spill_offset = asm_->TopSpillOffset();
(asm_->cache_state()->stack_state.empty()
? 0
: asm_->cache_state()->stack_state.back().offset());
while (!move_dst_regs_.is_empty()) { while (!move_dst_regs_.is_empty()) {
// TODO(clemensb): Use an unused register if available. // TODO(clemensb): Use an unused register if available.
LiftoffRegister dst = move_dst_regs_.GetFirstRegSet(); LiftoffRegister dst = move_dst_regs_.GetFirstRegSet();
......
...@@ -290,11 +290,10 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -290,11 +290,10 @@ class LiftoffAssembler : public TurboAssembler {
return offset; return offset;
} }
int TopSpillOffset() { int TopSpillOffset() const {
if (cache_state_.stack_state.empty()) { return cache_state_.stack_state.empty()
return 0; ? StaticStackFrameSize()
} : cache_state_.stack_state.back().offset();
return cache_state_.stack_state.back().offset();
} }
void PushRegister(ValueType type, LiftoffRegister reg) { void PushRegister(ValueType type, LiftoffRegister reg) {
...@@ -371,7 +370,7 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -371,7 +370,7 @@ class LiftoffAssembler : public TurboAssembler {
// Call this method whenever spilling something, such that the number of used // Call this method whenever spilling something, such that the number of used
// spill slot can be tracked and the stack frame will be allocated big enough. // spill slot can be tracked and the stack frame will be allocated big enough.
void RecordUsedSpillOffset(int offset) { void RecordUsedSpillOffset(int offset) {
if (offset >= num_used_spill_bytes_) num_used_spill_bytes_ = offset; if (offset >= max_used_spill_offset_) max_used_spill_offset_ = offset;
} }
// Load parameters into the right registers / stack slots for the call. // Load parameters into the right registers / stack slots for the call.
...@@ -415,9 +414,10 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -415,9 +414,10 @@ class LiftoffAssembler : public TurboAssembler {
// which can later be patched (via {PatchPrepareStackFrame)} when the size of // which can later be patched (via {PatchPrepareStackFrame)} when the size of
// the frame is known. // the frame is known.
inline int PrepareStackFrame(); inline int PrepareStackFrame();
inline void PatchPrepareStackFrame(int offset, int spill_size); inline void PatchPrepareStackFrame(int offset, int frame_size);
inline void FinishCode(); inline void FinishCode();
inline void AbortCompilation(); inline void AbortCompilation();
inline static constexpr int StaticStackFrameSize();
inline static int SlotSizeForType(ValueType type); inline static int SlotSizeForType(ValueType type);
inline static bool NeedsAlignment(ValueType type); inline static bool NeedsAlignment(ValueType type);
...@@ -692,10 +692,10 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -692,10 +692,10 @@ class LiftoffAssembler : public TurboAssembler {
int GetTotalFrameSlotCount() const { int GetTotalFrameSlotCount() const {
// TODO(zhin): Temporary for migration from index to offset. // TODO(zhin): Temporary for migration from index to offset.
return ((num_used_spill_bytes_ + kStackSlotSize - 1) / kStackSlotSize); return ((max_used_spill_offset_ + kStackSlotSize - 1) / kStackSlotSize);
} }
int GetTotalFrameSlotSize() const { return num_used_spill_bytes_; } int GetTotalFrameSize() const { return max_used_spill_offset_; }
ValueType local_type(uint32_t index) { ValueType local_type(uint32_t index) {
DCHECK_GT(num_locals_, index); DCHECK_GT(num_locals_, index);
...@@ -735,7 +735,7 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -735,7 +735,7 @@ class LiftoffAssembler : public TurboAssembler {
static_assert(sizeof(ValueType) == 1, static_assert(sizeof(ValueType) == 1,
"Reconsider this inlining if ValueType gets bigger"); "Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_; CacheState cache_state_;
int num_used_spill_bytes_ = 0; int max_used_spill_offset_ = StaticStackFrameSize();
LiftoffBailoutReason bailout_reason_ = kSuccess; LiftoffBailoutReason bailout_reason_ = kSuccess;
const char* bailout_detail_ = nullptr; const char* bailout_detail_ = nullptr;
......
...@@ -521,7 +521,7 @@ class LiftoffCompiler { ...@@ -521,7 +521,7 @@ class LiftoffCompiler {
ValueType type = decoder->GetLocalType(param_idx); ValueType type = decoder->GetLocalType(param_idx);
__ PushStack(type); __ PushStack(type);
} }
int spill_size = __ TopSpillOffset(); int spill_size = __ TopSpillOffset() - params_size;
__ FillStackSlotsWithZero(params_size, spill_size); __ FillStackSlotsWithZero(params_size, spill_size);
} else { } else {
for (uint32_t param_idx = num_params; param_idx < __ num_locals(); for (uint32_t param_idx = num_params; param_idx < __ num_locals();
...@@ -588,7 +588,7 @@ class LiftoffCompiler { ...@@ -588,7 +588,7 @@ class LiftoffCompiler {
GenerateOutOfLineCode(&ool); GenerateOutOfLineCode(&ool);
} }
__ PatchPrepareStackFrame(pc_offset_stack_frame_construction_, __ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
__ GetTotalFrameSlotSize()); __ GetTotalFrameSize());
__ FinishCode(); __ FinishCode();
safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCount()); safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCount());
__ MaybeEmitOutOfLineConstantPool(); __ MaybeEmitOutOfLineConstantPool();
......
...@@ -47,18 +47,12 @@ constexpr int32_t kHighWordOffset = 4; ...@@ -47,18 +47,12 @@ constexpr int32_t kHighWordOffset = 4;
// slot is located at fp-8-offset. // slot is located at fp-8-offset.
constexpr int kConstantStackSpace = 8; constexpr int kConstantStackSpace = 8;
inline int GetStackSlotOffset(int offset) { inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
return kConstantStackSpace + offset;
}
inline MemOperand GetStackSlot(int offset) {
return MemOperand(fp, -GetStackSlotOffset(offset));
}
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kConstantStackSpace - offset + half_offset); return MemOperand(fp, -offset + half_offset);
} }
inline MemOperand GetInstanceOperand() { return MemOperand(fp, -8); } inline MemOperand GetInstanceOperand() { return MemOperand(fp, -8); }
...@@ -282,8 +276,7 @@ int LiftoffAssembler::PrepareStackFrame() { ...@@ -282,8 +276,7 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset; return offset;
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
int bytes = liftoff::kConstantStackSpace + spill_size;
// We can't run out of space, just pass anything big enough to not cause the // We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer. // assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256; constexpr int kAvailableSpace = 256;
...@@ -293,13 +286,18 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { ...@@ -293,13 +286,18 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
// If bytes can be represented as 16bit, addiu will be generated and two // If bytes can be represented as 16bit, addiu will be generated and two
// nops will stay untouched. Otherwise, lui-ori sequence will load it to // nops will stay untouched. Otherwise, lui-ori sequence will load it to
// register and, as third instruction, addu will be generated. // register and, as third instruction, addu will be generated.
patching_assembler.Addu(sp, sp, Operand(-bytes)); patching_assembler.Addu(sp, sp, Operand(-frame_size));
} }
void LiftoffAssembler::FinishCode() {} void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {} void LiftoffAssembler::AbortCompilation() {}
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
int LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueType type) {
switch (type) { switch (type) {
case kWasmS128: case kWasmS128:
...@@ -646,8 +644,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { ...@@ -646,8 +644,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
// General case for bigger counts (12 instructions). // General case for bigger counts (12 instructions).
// Use a0 for start address (inclusive), a1 for end address (exclusive). // Use a0 for start address (inclusive), a1 for end address (exclusive).
Push(a1, a0); Push(a1, a0);
Addu(a0, fp, Operand(-liftoff::GetStackSlotOffset(start + size))); Addu(a0, fp, Operand(-start - size));
Addu(a1, fp, Operand(-liftoff::GetStackSlotOffset(start))); Addu(a1, fp, Operand(-start));
Label loop; Label loop;
bind(&loop); bind(&loop);
......
...@@ -43,13 +43,7 @@ namespace liftoff { ...@@ -43,13 +43,7 @@ namespace liftoff {
// slot is located at fp-16-offset. // slot is located at fp-16-offset.
constexpr int kConstantStackSpace = 16; constexpr int kConstantStackSpace = 16;
inline int GetStackSlotOffset(int offset) { inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
return kConstantStackSpace + offset;
}
inline MemOperand GetStackSlot(int offset) {
return MemOperand(fp, -GetStackSlotOffset(offset));
}
inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); } inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); }
...@@ -240,8 +234,7 @@ int LiftoffAssembler::PrepareStackFrame() { ...@@ -240,8 +234,7 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset; return offset;
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
int bytes = liftoff::kConstantStackSpace + spill_size;
// We can't run out of space, just pass anything big enough to not cause the // We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer. // assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256; constexpr int kAvailableSpace = 256;
...@@ -251,13 +244,18 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { ...@@ -251,13 +244,18 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
// If bytes can be represented as 16bit, daddiu will be generated and two // If bytes can be represented as 16bit, daddiu will be generated and two
// nops will stay untouched. Otherwise, lui-ori sequence will load it to // nops will stay untouched. Otherwise, lui-ori sequence will load it to
// register and, as third instruction, daddu will be generated. // register and, as third instruction, daddu will be generated.
patching_assembler.Daddu(sp, sp, Operand(-bytes)); patching_assembler.Daddu(sp, sp, Operand(-frame_size));
} }
void LiftoffAssembler::FinishCode() {} void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {} void LiftoffAssembler::AbortCompilation() {}
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kConstantStackSpace;
}
int LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueType type) {
switch (type) { switch (type) {
case kWasmS128: case kWasmS128:
...@@ -559,8 +557,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { ...@@ -559,8 +557,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
// General case for bigger counts (12 instructions). // General case for bigger counts (12 instructions).
// Use a0 for start address (inclusive), a1 for end address (exclusive). // Use a0 for start address (inclusive), a1 for end address (exclusive).
Push(a1, a0); Push(a1, a0);
Daddu(a0, fp, Operand(-liftoff::GetStackSlotOffset(start + size))); Daddu(a0, fp, Operand(-start - size));
Daddu(a1, fp, Operand(-liftoff::GetStackSlotOffset(start))); Daddu(a1, fp, Operand(-start));
Label loop; Label loop;
bind(&loop); bind(&loop);
......
...@@ -39,8 +39,6 @@ namespace liftoff { ...@@ -39,8 +39,6 @@ namespace liftoff {
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize; constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
inline int GetStackSlotOffset(int offset) { return kInstanceOffset + offset; }
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
...@@ -54,7 +52,7 @@ int LiftoffAssembler::PrepareStackFrame() { ...@@ -54,7 +52,7 @@ int LiftoffAssembler::PrepareStackFrame() {
return 0; return 0;
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame"); bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame");
} }
...@@ -62,6 +60,11 @@ void LiftoffAssembler::FinishCode() { EmitConstantPool(); } ...@@ -62,6 +60,11 @@ void LiftoffAssembler::FinishCode() { EmitConstantPool(); }
void LiftoffAssembler::AbortCompilation() { FinishCode(); } void LiftoffAssembler::AbortCompilation() { FinishCode(); }
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
int LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueType type) {
switch (type) { switch (type) {
case kWasmS128: case kWasmS128:
...@@ -182,8 +185,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { ...@@ -182,8 +185,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
// Use r4 for start address (inclusive), r5 for end address (exclusive). // Use r4 for start address (inclusive), r5 for end address (exclusive).
push(r4); push(r4);
push(r5); push(r5);
subi(r4, fp, Operand(liftoff::GetStackSlotOffset(start + size))); subi(r4, fp, Operand(start + size));
subi(r5, fp, Operand(liftoff::GetStackSlotOffset(start))); subi(r5, fp, Operand(start));
Label loop; Label loop;
bind(&loop); bind(&loop);
......
...@@ -38,12 +38,10 @@ namespace liftoff { ...@@ -38,12 +38,10 @@ namespace liftoff {
// //
constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize; constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
inline int GetStackSlotOffset(int offset) { return kInstanceOffset + offset; }
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) { inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset = int32_t half_offset =
half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
return MemOperand(fp, -kInstanceOffset - offset + half_offset); return MemOperand(fp, -offset + half_offset);
} }
} // namespace liftoff } // namespace liftoff
...@@ -53,7 +51,7 @@ int LiftoffAssembler::PrepareStackFrame() { ...@@ -53,7 +51,7 @@ int LiftoffAssembler::PrepareStackFrame() {
return 0; return 0;
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame"); bailout(kUnsupportedArchitecture, "PatchPrepareStackFrame");
} }
...@@ -61,6 +59,11 @@ void LiftoffAssembler::FinishCode() {} ...@@ -61,6 +59,11 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {} void LiftoffAssembler::AbortCompilation() {}
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
int LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueType type) {
switch (type) { switch (type) {
case kWasmS128: case kWasmS128:
...@@ -186,8 +189,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { ...@@ -186,8 +189,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
// Use r3 for start address (inclusive), r4 for end address (exclusive). // Use r3 for start address (inclusive), r4 for end address (exclusive).
push(r3); push(r3);
push(r4); push(r4);
SubP(r3, fp, Operand(liftoff::GetStackSlotOffset(start + size))); SubP(r3, fp, Operand(start + size));
SubP(r4, fp, Operand(liftoff::GetStackSlotOffset(start))); SubP(r4, fp, Operand(start));
Label loop; Label loop;
bind(&loop); bind(&loop);
......
...@@ -37,9 +37,7 @@ static_assert((kLiftoffAssemblerFpCacheRegs & ...@@ -37,9 +37,7 @@ static_assert((kLiftoffAssemblerFpCacheRegs &
// slot is located at rbp-16-offset. // slot is located at rbp-16-offset.
constexpr int kConstantStackSpace = 16; constexpr int kConstantStackSpace = 16;
inline Operand GetStackSlot(int offset) { inline Operand GetStackSlot(int offset) { return Operand(rbp, -offset); }
return Operand(rbp, -kConstantStackSpace - offset);
}
// TODO(clemensb): Make this a constexpr variable once Operand is constexpr. // TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
inline Operand GetInstanceOperand() { return Operand(rbp, -16); } inline Operand GetInstanceOperand() { return Operand(rbp, -16); }
...@@ -136,10 +134,9 @@ int LiftoffAssembler::PrepareStackFrame() { ...@@ -136,10 +134,9 @@ int LiftoffAssembler::PrepareStackFrame() {
return offset; return offset;
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { void LiftoffAssembler::PatchPrepareStackFrame(int offset, int frame_size) {
int bytes = liftoff::kConstantStackSpace + spill_size;
// Need to align sp to system pointer size. // Need to align sp to system pointer size.
bytes = RoundUp(bytes, kSystemPointerSize); frame_size = RoundUp(frame_size, kSystemPointerSize);
// We can't run out of space, just pass anything big enough to not cause the // We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer. // assembler to try to grow the buffer.
constexpr int kAvailableSpace = 64; constexpr int kAvailableSpace = 64;
...@@ -147,7 +144,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { ...@@ -147,7 +144,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
AssemblerOptions{}, AssemblerOptions{},
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
#if V8_OS_WIN #if V8_OS_WIN
if (bytes > kStackPageSize) { if (frame_size > kStackPageSize) {
// Generate OOL code (at the end of the function, where the current // Generate OOL code (at the end of the function, where the current
// assembler is pointing) to do the explicit stack limit check (see // assembler is pointing) to do the explicit stack limit check (see
// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-6.0/aa227153(v=vs.60)). // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-6.0/aa227153(v=vs.60)).
...@@ -160,7 +157,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { ...@@ -160,7 +157,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
patching_assembler.pc_offset()); patching_assembler.pc_offset());
// Now generate the OOL code. // Now generate the OOL code.
AllocateStackSpace(bytes); AllocateStackSpace(frame_size);
// Jump back to the start of the function (from {pc_offset()} to {offset + // Jump back to the start of the function (from {pc_offset()} to {offset +
// kSubSpSize}). // kSubSpSize}).
int func_start_offset = offset + liftoff::kSubSpSize - pc_offset(); int func_start_offset = offset + liftoff::kSubSpSize - pc_offset();
...@@ -168,7 +165,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) { ...@@ -168,7 +165,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, int spill_size) {
return; return;
} }
#endif #endif
patching_assembler.sub_sp_32(bytes); patching_assembler.sub_sp_32(frame_size);
DCHECK_EQ(liftoff::kSubSpSize, patching_assembler.pc_offset()); DCHECK_EQ(liftoff::kSubSpSize, patching_assembler.pc_offset());
} }
...@@ -176,6 +173,11 @@ void LiftoffAssembler::FinishCode() {} ...@@ -176,6 +173,11 @@ void LiftoffAssembler::FinishCode() {}
void LiftoffAssembler::AbortCompilation() {} void LiftoffAssembler::AbortCompilation() {}
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kConstantStackSpace;
}
int LiftoffAssembler::SlotSizeForType(ValueType type) { int LiftoffAssembler::SlotSizeForType(ValueType type) {
return ValueTypes::ElementSizeInBytes(type); return ValueTypes::ElementSizeInBytes(type);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment