Commit 366e5e24 authored by Bill Budge's avatar Bill Budge Committed by Commit Bot

[compiler] Adjust slot calculations for return slots.

- Uses linkage location information, to keep in sync with how
  LinkageAllocator and Frame work to assign stack slots.

Bug: v8:9198

Change-Id: I299038e4cff706355263f00603ba32515449fefe
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2556259Reviewed-by: 's avatarMaya Lekova <mslekova@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarThibaud Michaud <thibaudm@chromium.org>
Commit-Queue: Bill Budge <bbudge@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71532}
parent 5c9cd96b
......@@ -1673,7 +1673,6 @@ void InstructionSelector::EmitPrepareResults(
Node* node) {
ArmOperandGenerator g(this);
int reverse_slot = 1;
for (PushParameter output : *results) {
if (!output.location.IsCallerFrameSlot()) continue;
// Skip any alignment holes in nodes.
......@@ -1686,10 +1685,11 @@ void InstructionSelector::EmitPrepareResults(
} else if (output.location.GetType() == MachineType::Simd128()) {
MarkAsSimd128(output.node);
}
int offset = call_descriptor->GetOffsetToReturns();
int reverse_slot = -output.location.GetLocation() - offset;
Emit(kArmPeek, g.DefineAsRegister(output.node),
g.UseImmediate(reverse_slot));
}
reverse_slot += output.location.GetSizeInPointers();
}
}
......
......@@ -2054,7 +2054,6 @@ void InstructionSelector::EmitPrepareResults(
Node* node) {
Arm64OperandGenerator g(this);
int reverse_slot = 1;
for (PushParameter output : *results) {
if (!output.location.IsCallerFrameSlot()) continue;
// Skip any alignment holes in nodes.
......@@ -2069,10 +2068,11 @@ void InstructionSelector::EmitPrepareResults(
MarkAsSimd128(output.node);
}
int offset = call_descriptor->GetOffsetToReturns();
int reverse_slot = -output.location.GetLocation() - offset;
Emit(kArm64Peek, g.DefineAsRegister(output.node),
g.UseImmediate(reverse_slot));
}
reverse_slot += output.location.GetSizeInPointers();
}
}
......
......@@ -1323,7 +1323,6 @@ void InstructionSelector::EmitPrepareResults(
Node* node) {
IA32OperandGenerator g(this);
int reverse_slot = 1;
for (PushParameter output : *results) {
if (!output.location.IsCallerFrameSlot()) continue;
// Skip any alignment holes in nodes.
......@@ -1336,10 +1335,11 @@ void InstructionSelector::EmitPrepareResults(
} else if (output.location.GetType() == MachineType::Simd128()) {
MarkAsSimd128(output.node);
}
int offset = call_descriptor->GetOffsetToReturns();
int reverse_slot = -output.location.GetLocation() - offset;
Emit(kIA32Peek, g.DefineAsRegister(output.node),
g.UseImmediate(reverse_slot));
}
reverse_slot += output.location.GetSizeInPointers();
}
}
......
......@@ -883,13 +883,9 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
buffer->output_nodes.push_back(result);
} else {
buffer->output_nodes.resize(ret_count);
int stack_count = 0;
for (size_t i = 0; i < ret_count; ++i) {
LinkageLocation location = buffer->descriptor->GetReturnLocation(i);
buffer->output_nodes[i] = PushParameter(nullptr, location);
if (location.IsCallerFrameSlot()) {
stack_count += location.GetSizeInPointers();
}
}
for (Edge const edge : call->use_edges()) {
if (!NodeProperties::IsValueEdge(edge)) continue;
......@@ -901,7 +897,9 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
DCHECK(!buffer->output_nodes[index].node);
buffer->output_nodes[index].node = node;
}
frame_->EnsureReturnSlots(stack_count);
frame_->EnsureReturnSlots(
static_cast<int>(buffer->descriptor->StackReturnCount()));
}
// Filter out the outputs that aren't live because no projection uses them.
......
......@@ -1373,7 +1373,6 @@ void InstructionSelector::EmitPrepareResults(
Node* node) {
MipsOperandGenerator g(this);
int reverse_slot = 0;
for (PushParameter output : *results) {
if (!output.location.IsCallerFrameSlot()) continue;
// Skip any alignment holes in nodes.
......@@ -1384,10 +1383,11 @@ void InstructionSelector::EmitPrepareResults(
} else if (output.location.GetType() == MachineType::Float64()) {
MarkAsFloat64(output.node);
}
int offset = call_descriptor->GetOffsetToReturns();
int reverse_slot = -output.location.GetLocation() - offset;
Emit(kMipsPeek, g.DefineAsRegister(output.node),
g.UseImmediate(reverse_slot));
}
reverse_slot += output.location.GetSizeInPointers();
}
}
......
......@@ -1745,7 +1745,6 @@ void InstructionSelector::EmitPrepareResults(
Node* node) {
Mips64OperandGenerator g(this);
int reverse_slot = 1;
for (PushParameter output : *results) {
if (!output.location.IsCallerFrameSlot()) continue;
// Skip any alignment holes in nodes.
......@@ -1758,10 +1757,11 @@ void InstructionSelector::EmitPrepareResults(
} else if (output.location.GetType() == MachineType::Simd128()) {
MarkAsSimd128(output.node);
}
int offset = call_descriptor->GetOffsetToReturns();
int reverse_slot = -output.location.GetLocation() - offset;
Emit(kMips64Peek, g.DefineAsRegister(output.node),
g.UseImmediate(reverse_slot));
}
reverse_slot += output.location.GetSizeInPointers();
}
}
......
......@@ -2463,7 +2463,6 @@ void InstructionSelector::EmitPrepareResults(
Node* node) {
PPCOperandGenerator g(this);
int reverse_slot = 1;
for (PushParameter output : *results) {
if (!output.location.IsCallerFrameSlot()) continue;
// Skip any alignment holes in nodes.
......@@ -2476,10 +2475,11 @@ void InstructionSelector::EmitPrepareResults(
} else if (output.location.GetType() == MachineType::Simd128()) {
MarkAsSimd128(output.node);
}
int offset = call_descriptor->GetOffsetToReturns();
int reverse_slot = -output.location.GetLocation() - offset;
Emit(kPPC_Peek, g.DefineAsRegister(output.node),
g.UseImmediate(reverse_slot));
}
reverse_slot += output.location.GetSizeInPointers();
}
}
......
......@@ -2891,7 +2891,6 @@ void InstructionSelector::EmitPrepareResults(
Node* node) {
S390OperandGenerator g(this);
int reverse_slot = 1;
for (PushParameter output : *results) {
if (!output.location.IsCallerFrameSlot()) continue;
// Skip any alignment holes in nodes.
......@@ -2904,10 +2903,11 @@ void InstructionSelector::EmitPrepareResults(
} else if (output.location.GetType() == MachineType::Simd128()) {
MarkAsSimd128(output.node);
}
int offset = call_descriptor->GetOffsetToReturns();
int reverse_slot = -output.location.GetLocation() - offset;
Emit(kS390_Peek, g.DefineAsRegister(output.node),
g.UseImmediate(reverse_slot));
}
reverse_slot += output.location.GetSizeInPointers();
}
}
......
......@@ -1826,8 +1826,6 @@ void InstructionSelector::EmitPrepareResults(
ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
Node* node) {
X64OperandGenerator g(this);
int reverse_slot = 1;
for (PushParameter output : *results) {
if (!output.location.IsCallerFrameSlot()) continue;
// Skip any alignment holes in nodes.
......@@ -1841,10 +1839,11 @@ void InstructionSelector::EmitPrepareResults(
MarkAsSimd128(output.node);
}
InstructionOperand result = g.DefineAsRegister(output.node);
int offset = call_descriptor->GetOffsetToReturns();
int reverse_slot = -output.location.GetLocation() - offset;
InstructionOperand slot = g.UseImmediate(reverse_slot);
Emit(kX64Peek, 1, &result, 1, &slot);
}
reverse_slot += output.location.GetSizeInPointers();
}
}
......
......@@ -119,6 +119,12 @@ int CallDescriptor::GetStackParameterDelta(
return stack_param_delta;
}
int CallDescriptor::GetOffsetToReturns() const {
int offset = static_cast<int>(StackParameterCount());
if (ShouldPadArguments(offset)) offset++;
return offset;
}
int CallDescriptor::GetTaggedParameterSlots() const {
int result = 0;
for (size_t i = 0; i < InputCount(); ++i) {
......
......@@ -307,10 +307,10 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// The number of C parameters to this call.
size_t ParameterCount() const { return location_sig_->parameter_count(); }
// The number of stack parameters to the call.
// The number of stack parameter slots to the call.
size_t StackParameterCount() const { return stack_param_count_; }
// The number of stack return values from the call.
// The number of stack return value slots from the call.
size_t StackReturnCount() const { return stack_return_count_; }
// The number of parameters to the JS function call.
......@@ -394,6 +394,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
int GetStackParameterDelta(const CallDescriptor* tail_caller) const;
// Returns the number of slots to the first return value slot.
int GetOffsetToReturns() const;
int GetTaggedParameterSlots() const;
bool CanTailCall(const CallDescriptor* callee) const;
......
......@@ -884,8 +884,6 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
void LiftoffAssembler::FinishCall(const FunctionSig* sig,
compiler::CallDescriptor* call_descriptor) {
// Offset of the current return value relative to the stack pointer.
int return_offset = 0;
int call_desc_return_idx = 0;
for (ValueType return_type : sig->returns()) {
DCHECK_LT(call_desc_return_idx, call_descriptor->ReturnCount());
......@@ -907,10 +905,11 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig,
} else {
DCHECK(loc.IsCallerFrameSlot());
reg_pair[pair_idx] = GetUnusedRegister(rc, pinned);
LoadReturnStackSlot(reg_pair[pair_idx], return_offset, lowered_type);
const int type_size = lowered_type.element_size_bytes();
const int slot_size = RoundUp<kSystemPointerSize>(type_size);
return_offset += slot_size;
// Get slot offset relative to the stack pointer.
int offset = call_descriptor->GetOffsetToReturns();
int return_slot = -loc.GetLocation() - offset - 1;
LoadReturnStackSlot(reg_pair[pair_idx],
return_slot * kSystemPointerSize, lowered_type);
}
if (pair_idx == 0) {
pinned.set(reg_pair[0]);
......@@ -923,7 +922,8 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig,
reg_pair[1].gp()));
}
}
RecordUsedSpillOffset(TopSpillOffset() + return_offset);
int return_slots = static_cast<int>(call_descriptor->StackReturnCount());
RecordUsedSpillOffset(TopSpillOffset() + return_slots * kSystemPointerSize);
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment