Commit 943ccb98 authored by Andreas Haas's avatar Andreas Haas Committed by Commit Bot

Revert "[turbofan] Implement on-stack returns (Intel)"

This reverts commit 1e49864f.

Reason for revert: Crashing test on the waterfall https://logs.chromium.org/v/?s=chromium%2Fbb%2Fclient.v8%2FV8_Linux_gcc_4.8%2F16871%2F%2B%2Frecipes%2Fsteps%2FCheck%2F0%2Flogs%2FReturnMultipleRandom%2F0

Original change's description:
> [turbofan] Implement on-stack returns (Intel)
> 
> Add the ability to return (multiple) return values on the stack:
> 
> - Extend stack frames with a new buffer region for return slots.
>   This region is located at the end of a caller's frame such that
>   its slots can be indexed as caller frame slots in a callee
>   (located beyond its parameters) and assigned return values.
> - Adjust stack frame constructon and deconstruction accordingly.
> - Extend linkage computation to support register plus stack returns.
> - Reserve return slots in caller frame when respective calls occur.
> - Introduce and generate architecture instructions ('peek') for
>   reading back results from return slots in the caller.
> - Aggressive tests.
> - Some minor clean-up.
> 
> So far, only ia32 and x64 are implemented.
> 
> Change-Id: I9532ad13aa307c1dec40548c5b84600fe2f762ce
> Reviewed-on: https://chromium-review.googlesource.com/766371
> Commit-Queue: Andreas Haas <ahaas@chromium.org>
> Reviewed-by: Ben Titzer <titzer@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#49994}

TBR=titzer@chromium.org,rossberg@chromium.org,ahaas@chromium.org

Change-Id: Ib257e92448942f8ef07d5ef246f9381f4784f014
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/819637Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50000}
parent e55f3ce6
...@@ -1596,27 +1596,22 @@ void InstructionSelector::EmitPrepareArguments( ...@@ -1596,27 +1596,22 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments. // Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) { for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n]; PushParameter input = (*arguments)[n];
if (input.node) { if (input.node()) {
int slot = static_cast<int>(n); int slot = static_cast<int>(n);
Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(), Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(),
g.UseRegister(input.node)); g.UseRegister(input.node()));
} }
} }
} else { } else {
// Push any stack arguments. // Push any stack arguments.
for (PushParameter input : base::Reversed(*arguments)) { for (PushParameter input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes. // Skip any alignment holes in pushed nodes.
if (input.node == nullptr) continue; if (input.node() == nullptr) continue;
Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node)); Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node()));
} }
} }
} }
void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
const CallDescriptor* descriptor,
Node* node) {
// TODO(ahaas): Port.
}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; } bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
......
...@@ -1805,7 +1805,7 @@ void InstructionSelector::EmitPrepareArguments( ...@@ -1805,7 +1805,7 @@ void InstructionSelector::EmitPrepareArguments(
// Poke the arguments into the stack. // Poke the arguments into the stack.
ArchOpcode poke = to_native_stack ? kArm64PokeCSP : kArm64PokeJSSP; ArchOpcode poke = to_native_stack ? kArm64PokeCSP : kArm64PokeJSSP;
while (slot >= 0) { while (slot >= 0) {
Emit(poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node), Emit(poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
g.TempImmediate(slot)); g.TempImmediate(slot));
slot--; slot--;
// TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the // TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
...@@ -1816,11 +1816,6 @@ void InstructionSelector::EmitPrepareArguments( ...@@ -1816,11 +1816,6 @@ void InstructionSelector::EmitPrepareArguments(
} }
} }
void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
const CallDescriptor* descriptor,
Node* node) {
// TODO(ahaas): Port.
}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; } bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
......
...@@ -13,10 +13,8 @@ namespace internal { ...@@ -13,10 +13,8 @@ namespace internal {
namespace compiler { namespace compiler {
Frame::Frame(int fixed_frame_size_in_slots) Frame::Frame(int fixed_frame_size_in_slots)
: fixed_slot_count_(fixed_frame_size_in_slots), : frame_slot_count_(fixed_frame_size_in_slots),
frame_slot_count_(fixed_frame_size_in_slots),
spill_slot_count_(0), spill_slot_count_(0),
return_slot_count_(0),
allocated_registers_(nullptr), allocated_registers_(nullptr),
allocated_double_registers_(nullptr) {} allocated_double_registers_(nullptr) {}
......
...@@ -22,7 +22,7 @@ class CallDescriptor; ...@@ -22,7 +22,7 @@ class CallDescriptor;
// into them. Mutable state associated with the frame is stored separately in // into them. Mutable state associated with the frame is stored separately in
// FrameAccessState. // FrameAccessState.
// //
// Frames are divided up into four regions. // Frames are divided up into three regions.
// - The first is the fixed header, which always has a constant size and can be // - The first is the fixed header, which always has a constant size and can be
// predicted before code generation begins depending on the type of code being // predicted before code generation begins depending on the type of code being
// generated. // generated.
...@@ -33,15 +33,11 @@ class CallDescriptor; ...@@ -33,15 +33,11 @@ class CallDescriptor;
// reserved after register allocation, since its size can only be precisely // reserved after register allocation, since its size can only be precisely
// determined after register allocation once the number of used callee-saved // determined after register allocation once the number of used callee-saved
// register is certain. // register is certain.
// - The fourth region is a scratch area for return values from other functions
// called, if multiple returns cannot all be passed in registers. This region
// Must be last in a stack frame, so that it is positioned immediately below
// the stack frame of a callee to store to.
// //
// The frame region immediately below the fixed header contains spill slots // The frame region immediately below the fixed header contains spill slots
// starting at slot 4 for JSFunctions. The callee-saved frame region below that // starting at slot 4 for JSFunctions. The callee-saved frame region below that
// starts at 4+spill_slot_count_. Callee stack slots correspond to // starts at 4+spill_slot_count_. Callee stack slots corresponding to
// parameters that are accessible through negative slot ids. // parameters are accessible through negative slot ids.
// //
// Every slot of a caller or callee frame is accessible by the register // Every slot of a caller or callee frame is accessible by the register
// allocator and gap resolver with a SpillSlotOperand containing its // allocator and gap resolver with a SpillSlotOperand containing its
...@@ -77,13 +73,7 @@ class CallDescriptor; ...@@ -77,13 +73,7 @@ class CallDescriptor;
// |- - - - - - - - -| | | // |- - - - - - - - -| | |
// | ... | Callee-saved | // | ... | Callee-saved |
// |- - - - - - - - -| | | // |- - - - - - - - -| | |
// m+r+3 | callee-saved r | v | // m+r+3 | callee-saved r | v v
// +-----------------+---- |
// m+r+4 | return 0 | ^ |
// |- - - - - - - - -| | |
// | ... | Return |
// |- - - - - - - - -| | |
// | return q-1 | v v
// -----+-----------------+----- <-- stack ptr ------------- // -----+-----------------+----- <-- stack ptr -------------
// //
class Frame : public ZoneObject { class Frame : public ZoneObject {
...@@ -91,9 +81,8 @@ class Frame : public ZoneObject { ...@@ -91,9 +81,8 @@ class Frame : public ZoneObject {
explicit Frame(int fixed_frame_size_in_slots); explicit Frame(int fixed_frame_size_in_slots);
inline int GetTotalFrameSlotCount() const { return frame_slot_count_; } inline int GetTotalFrameSlotCount() const { return frame_slot_count_; }
inline int GetFixedSlotCount() const { return fixed_slot_count_; }
inline int GetSpillSlotCount() const { return spill_slot_count_; } inline int GetSpillSlotCount() const { return spill_slot_count_; }
inline int GetReturnSlotCount() const { return return_slot_count_; }
void SetAllocatedRegisters(BitVector* regs) { void SetAllocatedRegisters(BitVector* regs) {
DCHECK_NULL(allocated_registers_); DCHECK_NULL(allocated_registers_);
...@@ -123,25 +112,19 @@ class Frame : public ZoneObject { ...@@ -123,25 +112,19 @@ class Frame : public ZoneObject {
} }
int AllocateSpillSlot(int width, int alignment = 0) { int AllocateSpillSlot(int width, int alignment = 0) {
DCHECK_EQ(frame_slot_count_,
fixed_slot_count_ + spill_slot_count_ + return_slot_count_);
int frame_slot_count_before = frame_slot_count_; int frame_slot_count_before = frame_slot_count_;
if (alignment > kPointerSize) { if (alignment <= kPointerSize) {
// Slots are pointer sized, so alignment greater than a pointer size
// requires allocating additional slots.
width += alignment - kPointerSize;
}
AllocateAlignedFrameSlots(width); AllocateAlignedFrameSlots(width);
spill_slot_count_ += frame_slot_count_ - frame_slot_count_before; } else {
return frame_slot_count_ - return_slot_count_ - 1; // We need to allocate more place for spill slot
} // in case we need an aligned spill slot to be
// able to properly align start of spill slot
void EnsureReturnSlots(int count) { // and still have enough place to hold all the
if (count > return_slot_count_) { // data
count -= return_slot_count_; AllocateAlignedFrameSlots(width + alignment - kPointerSize);
frame_slot_count_ += count;
return_slot_count_ += count;
} }
spill_slot_count_ += frame_slot_count_ - frame_slot_count_before;
return frame_slot_count_ - 1;
} }
int AlignFrame(int alignment = kDoubleSize); int AlignFrame(int alignment = kDoubleSize);
...@@ -169,10 +152,8 @@ class Frame : public ZoneObject { ...@@ -169,10 +152,8 @@ class Frame : public ZoneObject {
} }
private: private:
int fixed_slot_count_;
int frame_slot_count_; int frame_slot_count_;
int spill_slot_count_; int spill_slot_count_;
int return_slot_count_;
BitVector* allocated_registers_; BitVector* allocated_registers_;
BitVector* allocated_double_registers_; BitVector* allocated_double_registers_;
......
...@@ -53,7 +53,7 @@ MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep, ...@@ -53,7 +53,7 @@ MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
src_index = src_loc.register_code() * aliases; src_index = src_loc.register_code() * aliases;
} else { } else {
src_index = src_loc.index(); src_index = src_loc.index();
// For operands that occupy multiple slots, the index refers to the last // For operands that occuply multiple slots, the index refers to the last
// slot. On little-endian architectures, we start at the high slot and use a // slot. On little-endian architectures, we start at the high slot and use a
// negative step so that register-to-slot moves are in the correct order. // negative step so that register-to-slot moves are in the correct order.
src_step = -slot_size; src_step = -slot_size;
......
...@@ -2006,7 +2006,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2006,7 +2006,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
break; break;
case kIA32Poke: { case kIA32Poke: {
int slot = MiscField::decode(instr->opcode()); int const slot = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) { if (HasImmediateInput(instr, 0)) {
__ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0)); __ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
} else { } else {
...@@ -2014,27 +2014,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2014,27 +2014,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
break; break;
} }
case kIA32PeekFloat32: {
int reverse_slot = MiscField::decode(instr->opcode());
int offset =
FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
__ movss(i.OutputFloatRegister(), Operand(ebp, offset));
break;
}
case kIA32PeekFloat64: {
int reverse_slot = MiscField::decode(instr->opcode());
int offset =
FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
__ movsd(i.OutputDoubleRegister(), Operand(ebp, offset));
break;
}
case kIA32Peek: {
int reverse_slot = MiscField::decode(instr->opcode());
int offset =
FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
__ mov(i.OutputRegister(), Operand(ebp, offset));
break;
}
case kSSEF32x4Splat: { case kSSEF32x4Splat: {
DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
...@@ -3514,9 +3493,8 @@ void CodeGenerator::AssembleConstructFrame() { ...@@ -3514,9 +3493,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ bind(&done); __ bind(&done);
} }
// Skip callee-saved and return slots, which are created below. // Skip callee-saved slots, which are pushed below.
shrink_slots -= base::bits::CountPopulation(saves); shrink_slots -= base::bits::CountPopulation(saves);
shrink_slots -= frame()->GetReturnSlotCount();
if (shrink_slots > 0) { if (shrink_slots > 0) {
__ sub(esp, Immediate(shrink_slots * kPointerSize)); __ sub(esp, Immediate(shrink_slots * kPointerSize));
} }
...@@ -3528,11 +3506,6 @@ void CodeGenerator::AssembleConstructFrame() { ...@@ -3528,11 +3506,6 @@ void CodeGenerator::AssembleConstructFrame() {
if (((1 << i) & saves)) __ push(Register::from_code(i)); if (((1 << i) & saves)) __ push(Register::from_code(i));
} }
} }
// Allocate return slots (located after callee-saved).
if (frame()->GetReturnSlotCount() > 0) {
__ sub(esp, Immediate(frame()->GetReturnSlotCount() * kPointerSize));
}
} }
void CodeGenerator::AssembleReturn(InstructionOperand* pop) { void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
...@@ -3541,10 +3514,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) { ...@@ -3541,10 +3514,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
const RegList saves = descriptor->CalleeSavedRegisters(); const RegList saves = descriptor->CalleeSavedRegisters();
// Restore registers. // Restore registers.
if (saves != 0) { if (saves != 0) {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
__ add(esp, Immediate(returns * kPointerSize));
}
for (int i = 0; i < Register::kNumRegisters; i++) { for (int i = 0; i < Register::kNumRegisters; i++) {
if (!((1 << i) & saves)) continue; if (!((1 << i) & saves)) continue;
__ pop(Register::from_code(i)); __ pop(Register::from_code(i));
......
...@@ -111,9 +111,6 @@ namespace compiler { ...@@ -111,9 +111,6 @@ namespace compiler {
V(IA32PushFloat32) \ V(IA32PushFloat32) \
V(IA32PushFloat64) \ V(IA32PushFloat64) \
V(IA32Poke) \ V(IA32Poke) \
V(IA32Peek) \
V(IA32PeekFloat32) \
V(IA32PeekFloat64) \
V(IA32StackCheck) \ V(IA32StackCheck) \
V(SSEF32x4Splat) \ V(SSEF32x4Splat) \
V(AVXF32x4Splat) \ V(AVXF32x4Splat) \
......
...@@ -263,9 +263,6 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -263,9 +263,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect; return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
case kIA32StackCheck: case kIA32StackCheck:
case kIA32Peek:
case kIA32PeekFloat32:
case kIA32PeekFloat64:
return kIsLoadOperation; return kIsLoadOperation;
case kIA32Push: case kIA32Push:
......
...@@ -1114,11 +1114,11 @@ void InstructionSelector::EmitPrepareArguments( ...@@ -1114,11 +1114,11 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments. // Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) { for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n]; PushParameter input = (*arguments)[n];
if (input.node) { if (input.node()) {
int const slot = static_cast<int>(n); int const slot = static_cast<int>(n);
InstructionOperand value = g.CanBeImmediate(node) InstructionOperand value = g.CanBeImmediate(node)
? g.UseImmediate(input.node) ? g.UseImmediate(input.node())
: g.UseRegister(input.node); : g.UseRegister(input.node());
Emit(kIA32Poke | MiscField::encode(slot), g.NoOutput(), value); Emit(kIA32Poke | MiscField::encode(slot), g.NoOutput(), value);
} }
} }
...@@ -1127,27 +1127,28 @@ void InstructionSelector::EmitPrepareArguments( ...@@ -1127,27 +1127,28 @@ void InstructionSelector::EmitPrepareArguments(
int effect_level = GetEffectLevel(node); int effect_level = GetEffectLevel(node);
for (PushParameter input : base::Reversed(*arguments)) { for (PushParameter input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes. // Skip any alignment holes in pushed nodes.
if (input.node == nullptr) continue; Node* input_node = input.node();
if (g.CanBeMemoryOperand(kIA32Push, node, input.node, effect_level)) { if (input.node() == nullptr) continue;
if (g.CanBeMemoryOperand(kIA32Push, node, input_node, effect_level)) {
InstructionOperand outputs[1]; InstructionOperand outputs[1];
InstructionOperand inputs[4]; InstructionOperand inputs[4];
size_t input_count = 0; size_t input_count = 0;
InstructionCode opcode = kIA32Push; InstructionCode opcode = kIA32Push;
AddressingMode mode = g.GetEffectiveAddressMemoryOperand( AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
input.node, inputs, &input_count); input_node, inputs, &input_count);
opcode |= AddressingModeField::encode(mode); opcode |= AddressingModeField::encode(mode);
Emit(opcode, 0, outputs, input_count, inputs); Emit(opcode, 0, outputs, input_count, inputs);
} else { } else {
InstructionOperand value = InstructionOperand value =
g.CanBeImmediate(input.node) g.CanBeImmediate(input.node())
? g.UseImmediate(input.node) ? g.UseImmediate(input.node())
: IsSupported(ATOM) || : IsSupported(ATOM) ||
sequence()->IsFP(GetVirtualRegister(input.node)) sequence()->IsFP(GetVirtualRegister(input.node()))
? g.UseRegister(input.node) ? g.UseRegister(input.node())
: g.Use(input.node); : g.Use(input.node());
if (input.location.GetType() == MachineType::Float32()) { if (input.type() == MachineType::Float32()) {
Emit(kIA32PushFloat32, g.NoOutput(), value); Emit(kIA32PushFloat32, g.NoOutput(), value);
} else if (input.location.GetType() == MachineType::Float64()) { } else if (input.type() == MachineType::Float64()) {
Emit(kIA32PushFloat64, g.NoOutput(), value); Emit(kIA32PushFloat64, g.NoOutput(), value);
} else { } else {
Emit(kIA32Push, g.NoOutput(), value); Emit(kIA32Push, g.NoOutput(), value);
...@@ -1157,33 +1158,6 @@ void InstructionSelector::EmitPrepareArguments( ...@@ -1157,33 +1158,6 @@ void InstructionSelector::EmitPrepareArguments(
} }
} }
void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
const CallDescriptor* descriptor,
Node* node) {
IA32OperandGenerator g(this);
int reverse_slot = 0;
for (PushParameter output : *results) {
if (!output.location.IsCallerFrameSlot()) continue;
reverse_slot += output.location.GetSizeInPointers();
// Skip any alignment holes in nodes.
if (output.node == nullptr) continue;
DCHECK(!descriptor->IsCFunctionCall());
if (output.location.GetType() == MachineType::Float32()) {
MarkAsFloat32(output.node);
InstructionOperand result = g.DefineAsRegister(output.node);
Emit(kIA32PeekFloat32 | MiscField::encode(reverse_slot), result);
} else if (output.location.GetType() == MachineType::Float64()) {
MarkAsFloat64(output.node);
InstructionOperand result = g.DefineAsRegister(output.node);
Emit(kIA32PeekFloat64 | MiscField::encode(reverse_slot - 1), result);
} else {
InstructionOperand result = g.DefineAsRegister(output.node);
Emit(kIA32Peek | MiscField::encode(reverse_slot), result);
}
}
}
bool InstructionSelector::IsTailCallAddressImmediate() { return true; } bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
......
...@@ -668,7 +668,7 @@ struct CallBuffer { ...@@ -668,7 +668,7 @@ struct CallBuffer {
const CallDescriptor* descriptor; const CallDescriptor* descriptor;
FrameStateDescriptor* frame_state_descriptor; FrameStateDescriptor* frame_state_descriptor;
ZoneVector<PushParameter> output_nodes; NodeVector output_nodes;
InstructionOperandVector outputs; InstructionOperandVector outputs;
InstructionOperandVector instruction_args; InstructionOperandVector instruction_args;
ZoneVector<PushParameter> pushed_nodes; ZoneVector<PushParameter> pushed_nodes;
...@@ -702,28 +702,17 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, ...@@ -702,28 +702,17 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
if (buffer->descriptor->ReturnCount() > 0) { if (buffer->descriptor->ReturnCount() > 0) {
// Collect the projections that represent multiple outputs from this call. // Collect the projections that represent multiple outputs from this call.
if (buffer->descriptor->ReturnCount() == 1) { if (buffer->descriptor->ReturnCount() == 1) {
PushParameter result = {call, buffer->descriptor->GetReturnLocation(0)}; buffer->output_nodes.push_back(call);
buffer->output_nodes.push_back(result);
} else { } else {
buffer->output_nodes.resize(buffer->descriptor->ReturnCount()); buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), nullptr);
int stack_count = 0;
for (Edge const edge : call->use_edges()) { for (Edge const edge : call->use_edges()) {
if (!NodeProperties::IsValueEdge(edge)) continue; if (!NodeProperties::IsValueEdge(edge)) continue;
Node* node = edge.from(); DCHECK_EQ(IrOpcode::kProjection, edge.from()->opcode());
DCHECK_EQ(IrOpcode::kProjection, node->opcode()); size_t const index = ProjectionIndexOf(edge.from()->op());
size_t const index = ProjectionIndexOf(node->op());
DCHECK_LT(index, buffer->output_nodes.size()); DCHECK_LT(index, buffer->output_nodes.size());
DCHECK(!buffer->output_nodes[index].node); DCHECK(!buffer->output_nodes[index]);
PushParameter result = {node, buffer->output_nodes[index] = edge.from();
buffer->descriptor->GetReturnLocation(index)};
buffer->output_nodes[index] = result;
if (result.location.IsCallerFrameSlot()) {
stack_count += result.location.GetSizeInPointers();
}
} }
frame_->EnsureReturnSlots(stack_count);
} }
// Filter out the outputs that aren't live because no projection uses them. // Filter out the outputs that aren't live because no projection uses them.
...@@ -733,22 +722,22 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, ...@@ -733,22 +722,22 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
: buffer->frame_state_descriptor->state_combine() : buffer->frame_state_descriptor->state_combine()
.ConsumedOutputCount(); .ConsumedOutputCount();
for (size_t i = 0; i < buffer->output_nodes.size(); i++) { for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
bool output_is_live = buffer->output_nodes[i].node != nullptr || bool output_is_live = buffer->output_nodes[i] != nullptr ||
i < outputs_needed_by_framestate; i < outputs_needed_by_framestate;
if (output_is_live) { if (output_is_live) {
LinkageLocation location = buffer->output_nodes[i].location; MachineRepresentation rep =
MachineRepresentation rep = location.GetType().representation(); buffer->descriptor->GetReturnType(static_cast<int>(i))
.representation();
LinkageLocation location =
buffer->descriptor->GetReturnLocation(static_cast<int>(i));
Node* output = buffer->output_nodes[i].node; Node* output = buffer->output_nodes[i];
InstructionOperand op = output == nullptr InstructionOperand op = output == nullptr
? g.TempLocation(location) ? g.TempLocation(location)
: g.DefineAsLocation(output, location); : g.DefineAsLocation(output, location);
MarkAsRepresentation(rep, op); MarkAsRepresentation(rep, op);
if (!UnallocatedOperand::cast(op).HasFixedSlotPolicy()) {
buffer->outputs.push_back(op); buffer->outputs.push_back(op);
buffer->output_nodes[i].node = nullptr;
}
} }
} }
} }
...@@ -853,8 +842,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, ...@@ -853,8 +842,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) { if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
buffer->pushed_nodes.resize(stack_index + 1); buffer->pushed_nodes.resize(stack_index + 1);
} }
PushParameter param = {*iter, location}; PushParameter parameter(*iter, buffer->descriptor->GetInputType(index));
buffer->pushed_nodes[stack_index] = param; buffer->pushed_nodes[stack_index] = parameter;
pushed_count++; pushed_count++;
} else { } else {
buffer->instruction_args.push_back(op); buffer->instruction_args.push_back(op);
...@@ -2444,8 +2433,6 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) { ...@@ -2444,8 +2433,6 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
&buffer.instruction_args.front()); &buffer.instruction_args.front());
if (instruction_selection_failed()) return; if (instruction_selection_failed()) return;
call_instr->MarkAsCall(); call_instr->MarkAsCall();
EmitPrepareResults(&(buffer.output_nodes), descriptor, node);
} }
void InstructionSelector::VisitCallWithCallerSavedRegisters( void InstructionSelector::VisitCallWithCallerSavedRegisters(
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#include "src/compiler/common-operator.h" #include "src/compiler/common-operator.h"
#include "src/compiler/instruction-scheduler.h" #include "src/compiler/instruction-scheduler.h"
#include "src/compiler/instruction.h" #include "src/compiler/instruction.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h" #include "src/compiler/machine-operator.h"
#include "src/compiler/node.h" #include "src/compiler/node.h"
#include "src/globals.h" #include "src/globals.h"
...@@ -31,13 +30,17 @@ class StateObjectDeduplicator; ...@@ -31,13 +30,17 @@ class StateObjectDeduplicator;
// This struct connects nodes of parameters which are going to be pushed on the // This struct connects nodes of parameters which are going to be pushed on the
// call stack with their parameter index in the call descriptor of the callee. // call stack with their parameter index in the call descriptor of the callee.
struct PushParameter { class PushParameter {
PushParameter(Node* n = nullptr, public:
LinkageLocation l = LinkageLocation::ForAnyRegister()) PushParameter() : node_(nullptr), type_(MachineType::None()) {}
: node(n), location(l) {} PushParameter(Node* node, MachineType type) : node_(node), type_(type) {}
Node* node() const { return node_; }
MachineType type() const { return type_; }
Node* node; private:
LinkageLocation location; Node* node_;
MachineType type_;
}; };
enum class FrameStateInputKind { kAny, kStackSlot }; enum class FrameStateInputKind { kAny, kStackSlot };
...@@ -350,8 +353,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final { ...@@ -350,8 +353,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments, void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
const CallDescriptor* descriptor, Node* node); const CallDescriptor* descriptor, Node* node);
void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
const CallDescriptor* descriptor, Node* node);
void EmitIdentity(Node* node); void EmitIdentity(Node* node);
bool CanProduceSignalingNaN(Node* node); bool CanProduceSignalingNaN(Node* node);
......
...@@ -316,10 +316,9 @@ void Int64Lowering::LowerNode(Node* node) { ...@@ -316,10 +316,9 @@ void Int64Lowering::LowerNode(Node* node) {
case IrOpcode::kTailCall: { case IrOpcode::kTailCall: {
CallDescriptor* descriptor = CallDescriptor* descriptor =
const_cast<CallDescriptor*>(CallDescriptorOf(node->op())); const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
bool returns_require_lowering = if (DefaultLowering(node) ||
GetReturnCountAfterLowering(descriptor) != (descriptor->ReturnCount() == 1 &&
static_cast<int>(descriptor->ReturnCount()); descriptor->GetReturnType(0) == MachineType::Int64())) {
if (DefaultLowering(node) || returns_require_lowering) {
// Tail calls do not have return values, so adjusting the call // Tail calls do not have return values, so adjusting the call
// descriptor is enough. // descriptor is enough.
auto new_descriptor = GetI32WasmCallDescriptor(zone(), descriptor); auto new_descriptor = GetI32WasmCallDescriptor(zone(), descriptor);
......
...@@ -197,14 +197,12 @@ class V8_EXPORT_PRIVATE CallDescriptor final ...@@ -197,14 +197,12 @@ class V8_EXPORT_PRIVATE CallDescriptor final
RegList callee_saved_registers, RegList callee_saved_registers,
RegList callee_saved_fp_registers, Flags flags, RegList callee_saved_fp_registers, Flags flags,
const char* debug_name = "", const char* debug_name = "",
const RegList allocatable_registers = 0, const RegList allocatable_registers = 0)
size_t stack_return_count = 0)
: kind_(kind), : kind_(kind),
target_type_(target_type), target_type_(target_type),
target_loc_(target_loc), target_loc_(target_loc),
location_sig_(location_sig), location_sig_(location_sig),
stack_param_count_(stack_param_count), stack_param_count_(stack_param_count),
stack_return_count_(stack_return_count),
properties_(properties), properties_(properties),
callee_saved_registers_(callee_saved_registers), callee_saved_registers_(callee_saved_registers),
callee_saved_fp_registers_(callee_saved_fp_registers), callee_saved_fp_registers_(callee_saved_fp_registers),
...@@ -234,9 +232,6 @@ class V8_EXPORT_PRIVATE CallDescriptor final ...@@ -234,9 +232,6 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// The number of stack parameters to the call. // The number of stack parameters to the call.
size_t StackParameterCount() const { return stack_param_count_; } size_t StackParameterCount() const { return stack_param_count_; }
// The number of stack return values from the call.
size_t StackReturnCount() const { return stack_return_count_; }
// The number of parameters to the JS function call. // The number of parameters to the JS function call.
size_t JSParameterCount() const { size_t JSParameterCount() const {
DCHECK(IsJSFunctionCall()); DCHECK(IsJSFunctionCall());
...@@ -323,7 +318,6 @@ class V8_EXPORT_PRIVATE CallDescriptor final ...@@ -323,7 +318,6 @@ class V8_EXPORT_PRIVATE CallDescriptor final
const LinkageLocation target_loc_; const LinkageLocation target_loc_;
const LocationSignature* const location_sig_; const LocationSignature* const location_sig_;
const size_t stack_param_count_; const size_t stack_param_count_;
const size_t stack_return_count_;
const Operator::Properties properties_; const Operator::Properties properties_;
const RegList callee_saved_registers_; const RegList callee_saved_registers_;
const RegList callee_saved_fp_registers_; const RegList callee_saved_fp_registers_;
......
...@@ -1181,8 +1181,8 @@ void InstructionSelector::EmitPrepareArguments( ...@@ -1181,8 +1181,8 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments. // Poke any stack arguments.
int slot = kCArgSlotCount; int slot = kCArgSlotCount;
for (PushParameter input : (*arguments)) { for (PushParameter input : (*arguments)) {
if (input.node) { if (input.node()) {
Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(slot << kPointerSizeLog2)); g.TempImmediate(slot << kPointerSizeLog2));
++slot; ++slot;
} }
...@@ -1196,19 +1196,14 @@ void InstructionSelector::EmitPrepareArguments( ...@@ -1196,19 +1196,14 @@ void InstructionSelector::EmitPrepareArguments(
} }
for (size_t n = 0; n < arguments->size(); ++n) { for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n]; PushParameter input = (*arguments)[n];
if (input.node) { if (input.node()) {
Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(n << kPointerSizeLog2)); g.TempImmediate(n << kPointerSizeLog2));
} }
} }
} }
} }
void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
const CallDescriptor* descriptor,
Node* node) {
// TODO(ahaas): Port.
}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; } bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
......
...@@ -1676,7 +1676,7 @@ void InstructionSelector::EmitPrepareArguments( ...@@ -1676,7 +1676,7 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments. // Poke any stack arguments.
int slot = kCArgSlotCount; int slot = kCArgSlotCount;
for (PushParameter input : (*arguments)) { for (PushParameter input : (*arguments)) {
Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(slot << kPointerSizeLog2)); g.TempImmediate(slot << kPointerSizeLog2));
++slot; ++slot;
} }
...@@ -1688,19 +1688,14 @@ void InstructionSelector::EmitPrepareArguments( ...@@ -1688,19 +1688,14 @@ void InstructionSelector::EmitPrepareArguments(
} }
for (size_t n = 0; n < arguments->size(); ++n) { for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n]; PushParameter input = (*arguments)[n];
if (input.node) { if (input.node()) {
Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(static_cast<int>(n << kPointerSizeLog2))); g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
} }
} }
} }
} }
void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
const CallDescriptor* descriptor,
Node* node) {
// TODO(ahaas): Port.
}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; } bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
......
...@@ -134,6 +134,7 @@ void RawMachineAssembler::Return(Node* value) { ...@@ -134,6 +134,7 @@ void RawMachineAssembler::Return(Node* value) {
current_block_ = nullptr; current_block_ = nullptr;
} }
void RawMachineAssembler::Return(Node* v1, Node* v2) { void RawMachineAssembler::Return(Node* v1, Node* v2) {
Node* values[] = {Int32Constant(0), v1, v2}; Node* values[] = {Int32Constant(0), v1, v2};
Node* ret = MakeNode(common()->Return(2), 3, values); Node* ret = MakeNode(common()->Return(2), 3, values);
...@@ -141,6 +142,7 @@ void RawMachineAssembler::Return(Node* v1, Node* v2) { ...@@ -141,6 +142,7 @@ void RawMachineAssembler::Return(Node* v1, Node* v2) {
current_block_ = nullptr; current_block_ = nullptr;
} }
void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) { void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
Node* values[] = {Int32Constant(0), v1, v2, v3}; Node* values[] = {Int32Constant(0), v1, v2, v3};
Node* ret = MakeNode(common()->Return(3), 4, values); Node* ret = MakeNode(common()->Return(3), 4, values);
...@@ -148,24 +150,6 @@ void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) { ...@@ -148,24 +150,6 @@ void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
current_block_ = nullptr; current_block_ = nullptr;
} }
void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3, Node* v4) {
Node* values[] = {Int32Constant(0), v1, v2, v3, v4};
Node* ret = MakeNode(common()->Return(4), 5, values);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
void RawMachineAssembler::Return(int count, Node* vs[]) {
typedef Node* Node_ptr;
Node** values = new Node_ptr[count + 1];
values[0] = Int32Constant(0);
for (int i = 0; i < count; ++i) values[i + 1] = vs[i];
Node* ret = MakeNode(common()->Return(count), count + 1, values);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
delete[] values;
}
void RawMachineAssembler::PopAndReturn(Node* pop, Node* value) { void RawMachineAssembler::PopAndReturn(Node* pop, Node* value) {
Node* values[] = {pop, value}; Node* values[] = {pop, value};
Node* ret = MakeNode(common()->Return(1), 2, values); Node* ret = MakeNode(common()->Return(1), 2, values);
...@@ -188,14 +172,6 @@ void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2, ...@@ -188,14 +172,6 @@ void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2,
current_block_ = nullptr; current_block_ = nullptr;
} }
void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3,
Node* v4) {
Node* values[] = {pop, v1, v2, v3, v4};
Node* ret = MakeNode(common()->Return(4), 5, values);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
void RawMachineAssembler::DebugAbort(Node* message) { void RawMachineAssembler::DebugAbort(Node* message) {
AddNode(machine()->DebugAbort(), message); AddNode(machine()->DebugAbort(), message);
} }
......
...@@ -828,12 +828,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { ...@@ -828,12 +828,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
void Return(Node* value); void Return(Node* value);
void Return(Node* v1, Node* v2); void Return(Node* v1, Node* v2);
void Return(Node* v1, Node* v2, Node* v3); void Return(Node* v1, Node* v2, Node* v3);
void Return(Node* v1, Node* v2, Node* v3, Node* v4);
void Return(int count, Node* v[]);
void PopAndReturn(Node* pop, Node* value); void PopAndReturn(Node* pop, Node* value);
void PopAndReturn(Node* pop, Node* v1, Node* v2); void PopAndReturn(Node* pop, Node* v1, Node* v2);
void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3); void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3);
void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3, Node* v4);
void Bind(RawMachineLabel* label); void Bind(RawMachineLabel* label);
void Deoptimize(Node* state); void Deoptimize(Node* state);
void DebugAbort(Node* message); void DebugAbort(Node* message);
......
...@@ -47,7 +47,7 @@ LinkageLocation stackloc(int i, MachineType type) { ...@@ -47,7 +47,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == ia32 =================================================================== // == ia32 ===================================================================
// =========================================================================== // ===========================================================================
#define GP_PARAM_REGISTERS esi, eax, edx, ecx, ebx #define GP_PARAM_REGISTERS esi, eax, edx, ecx, ebx
#define GP_RETURN_REGISTERS eax, edx #define GP_RETURN_REGISTERS eax, edx, ecx
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6 #define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2 #define FP_RETURN_REGISTERS xmm1, xmm2
...@@ -56,7 +56,7 @@ LinkageLocation stackloc(int i, MachineType type) { ...@@ -56,7 +56,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == x64 ==================================================================== // == x64 ====================================================================
// =========================================================================== // ===========================================================================
#define GP_PARAM_REGISTERS rsi, rax, rdx, rcx, rbx, rdi #define GP_PARAM_REGISTERS rsi, rax, rdx, rcx, rbx, rdi
#define GP_RETURN_REGISTERS rax, rdx #define GP_RETURN_REGISTERS rax, rdx, rcx
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6 #define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2 #define FP_RETURN_REGISTERS xmm1, xmm2
...@@ -65,7 +65,7 @@ LinkageLocation stackloc(int i, MachineType type) { ...@@ -65,7 +65,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == arm ==================================================================== // == arm ====================================================================
// =========================================================================== // ===========================================================================
#define GP_PARAM_REGISTERS r3, r0, r1, r2 #define GP_PARAM_REGISTERS r3, r0, r1, r2
#define GP_RETURN_REGISTERS r0, r1 #define GP_RETURN_REGISTERS r0, r1, r3
#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7 #define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
#define FP_RETURN_REGISTERS d0, d1 #define FP_RETURN_REGISTERS d0, d1
...@@ -74,7 +74,7 @@ LinkageLocation stackloc(int i, MachineType type) { ...@@ -74,7 +74,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == arm64 ==================================================================== // == arm64 ====================================================================
// =========================================================================== // ===========================================================================
#define GP_PARAM_REGISTERS x7, x0, x1, x2, x3, x4, x5, x6 #define GP_PARAM_REGISTERS x7, x0, x1, x2, x3, x4, x5, x6
#define GP_RETURN_REGISTERS x0, x1 #define GP_RETURN_REGISTERS x0, x1, x2
#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7 #define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
#define FP_RETURN_REGISTERS d0, d1 #define FP_RETURN_REGISTERS d0, d1
...@@ -83,7 +83,7 @@ LinkageLocation stackloc(int i, MachineType type) { ...@@ -83,7 +83,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == mips =================================================================== // == mips ===================================================================
// =========================================================================== // ===========================================================================
#define GP_PARAM_REGISTERS a0, a1, a2, a3 #define GP_PARAM_REGISTERS a0, a1, a2, a3
#define GP_RETURN_REGISTERS v0, v1 #define GP_RETURN_REGISTERS v0, v1, t7
#define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14 #define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14
#define FP_RETURN_REGISTERS f2, f4 #define FP_RETURN_REGISTERS f2, f4
...@@ -92,7 +92,7 @@ LinkageLocation stackloc(int i, MachineType type) { ...@@ -92,7 +92,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == mips64 ================================================================= // == mips64 =================================================================
// =========================================================================== // ===========================================================================
#define GP_PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7 #define GP_PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
#define GP_RETURN_REGISTERS v0, v1 #define GP_RETURN_REGISTERS v0, v1, t3
#define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14 #define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14
#define FP_RETURN_REGISTERS f2, f4 #define FP_RETURN_REGISTERS f2, f4
...@@ -101,7 +101,7 @@ LinkageLocation stackloc(int i, MachineType type) { ...@@ -101,7 +101,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == ppc & ppc64 ============================================================ // == ppc & ppc64 ============================================================
// =========================================================================== // ===========================================================================
#define GP_PARAM_REGISTERS r10, r3, r4, r5, r6, r7, r8, r9 #define GP_PARAM_REGISTERS r10, r3, r4, r5, r6, r7, r8, r9
#define GP_RETURN_REGISTERS r3, r4 #define GP_RETURN_REGISTERS r3, r4, r5
#define FP_PARAM_REGISTERS d1, d2, d3, d4, d5, d6, d7, d8 #define FP_PARAM_REGISTERS d1, d2, d3, d4, d5, d6, d7, d8
#define FP_RETURN_REGISTERS d1, d2 #define FP_RETURN_REGISTERS d1, d2
...@@ -110,7 +110,7 @@ LinkageLocation stackloc(int i, MachineType type) { ...@@ -110,7 +110,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == s390x ================================================================== // == s390x ==================================================================
// =========================================================================== // ===========================================================================
#define GP_PARAM_REGISTERS r6, r2, r3, r4, r5 #define GP_PARAM_REGISTERS r6, r2, r3, r4, r5
#define GP_RETURN_REGISTERS r2, r3 #define GP_RETURN_REGISTERS r2, r3, r4
#define FP_PARAM_REGISTERS d0, d2, d4, d6 #define FP_PARAM_REGISTERS d0, d2, d4, d6
#define FP_RETURN_REGISTERS d0, d2, d4, d6 #define FP_RETURN_REGISTERS d0, d2, d4, d6
...@@ -119,7 +119,7 @@ LinkageLocation stackloc(int i, MachineType type) { ...@@ -119,7 +119,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == s390 =================================================================== // == s390 ===================================================================
// =========================================================================== // ===========================================================================
#define GP_PARAM_REGISTERS r6, r2, r3, r4, r5 #define GP_PARAM_REGISTERS r6, r2, r3, r4, r5
#define GP_RETURN_REGISTERS r2, r3 #define GP_RETURN_REGISTERS r2, r3, r4
#define FP_PARAM_REGISTERS d0, d2 #define FP_PARAM_REGISTERS d0, d2
#define FP_RETURN_REGISTERS d0, d2 #define FP_RETURN_REGISTERS d0, d2
...@@ -158,8 +158,6 @@ struct Allocator { ...@@ -158,8 +158,6 @@ struct Allocator {
int stack_offset; int stack_offset;
void AdjustStackOffset(int offset) { stack_offset += offset; }
LinkageLocation Next(ValueType type) { LinkageLocation Next(ValueType type) {
if (IsFloatingPoint(type)) { if (IsFloatingPoint(type)) {
// Allocate a floating point register/stack location. // Allocate a floating point register/stack location.
...@@ -228,28 +226,25 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) { ...@@ -228,28 +226,25 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
LocationSignature::Builder locations(zone, fsig->return_count(), LocationSignature::Builder locations(zone, fsig->return_count(),
fsig->parameter_count() + 1); fsig->parameter_count() + 1);
// Add register and/or stack parameter(s). Allocator rets = return_registers;
// Add return location(s).
const int return_count = static_cast<int>(locations.return_count_);
for (int i = 0; i < return_count; i++) {
ValueType ret = fsig->GetReturn(i);
locations.AddReturn(rets.Next(ret));
}
Allocator params = parameter_registers; Allocator params = parameter_registers;
// The wasm_context. // Add parameter for the wasm_context.
locations.AddParam(params.Next(MachineType::PointerRepresentation())); locations.AddParam(params.Next(MachineType::PointerRepresentation()));
// Add register and/or stack parameter(s).
const int parameter_count = static_cast<int>(fsig->parameter_count()); const int parameter_count = static_cast<int>(fsig->parameter_count());
for (int i = 0; i < parameter_count; i++) { for (int i = 0; i < parameter_count; i++) {
ValueType param = fsig->GetParam(i); ValueType param = fsig->GetParam(i);
auto l = params.Next(param); locations.AddParam(params.Next(param));
locations.AddParam(l);
}
// Add return location(s).
Allocator rets = return_registers;
rets.AdjustStackOffset(params.stack_offset);
const int return_count = static_cast<int>(locations.return_count_);
for (int i = 0; i < return_count; i++) {
ValueType ret = fsig->GetReturn(i);
auto l = rets.Next(ret);
locations.AddReturn(l);
} }
const RegList kCalleeSaveRegisters = 0; const RegList kCalleeSaveRegisters = 0;
...@@ -275,9 +270,7 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) { ...@@ -275,9 +270,7 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
kCalleeSaveRegisters, // callee-saved registers kCalleeSaveRegisters, // callee-saved registers
kCalleeSaveFPRegisters, // callee-saved fp regs kCalleeSaveFPRegisters, // callee-saved fp regs
flags, // flags flags, // flags
"wasm-call", // debug name "wasm-call");
0, // allocatable registers
rets.stack_offset - params.stack_offset); // stack_return_count
} }
CallDescriptor* ReplaceTypeInCallDescriptorWith( CallDescriptor* ReplaceTypeInCallDescriptorWith(
...@@ -302,20 +295,8 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith( ...@@ -302,20 +295,8 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
LocationSignature::Builder locations(zone, return_count, parameter_count); LocationSignature::Builder locations(zone, return_count, parameter_count);
Allocator params = parameter_registers;
for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
if (descriptor->GetParameterType(i) == input_type) {
for (size_t j = 0; j < num_replacements; j++) {
locations.AddParam(params.Next(output_type));
}
} else {
locations.AddParam(
params.Next(descriptor->GetParameterType(i).representation()));
}
}
Allocator rets = return_registers; Allocator rets = return_registers;
rets.AdjustStackOffset(params.stack_offset);
for (size_t i = 0; i < descriptor->ReturnCount(); i++) { for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
if (descriptor->GetReturnType(i) == input_type) { if (descriptor->GetReturnType(i) == input_type) {
for (size_t j = 0; j < num_replacements; j++) { for (size_t j = 0; j < num_replacements; j++) {
...@@ -327,6 +308,19 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith( ...@@ -327,6 +308,19 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
} }
} }
Allocator params = parameter_registers;
for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
if (descriptor->GetParameterType(i) == input_type) {
for (size_t j = 0; j < num_replacements; j++) {
locations.AddParam(params.Next(output_type));
}
} else {
locations.AddParam(
params.Next(descriptor->GetParameterType(i).representation()));
}
}
return new (zone) CallDescriptor( // -- return new (zone) CallDescriptor( // --
descriptor->kind(), // kind descriptor->kind(), // kind
descriptor->GetInputType(0), // target MachineType descriptor->GetInputType(0), // target MachineType
...@@ -337,9 +331,7 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith( ...@@ -337,9 +331,7 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
descriptor->CalleeSavedRegisters(), // callee-saved registers descriptor->CalleeSavedRegisters(), // callee-saved registers
descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
descriptor->flags(), // flags descriptor->flags(), // flags
descriptor->debug_name(), // debug name descriptor->debug_name());
descriptor->AllocatableRegisters(), // allocatable registers
rets.stack_offset - params.stack_offset); // stack_return_count
} }
CallDescriptor* GetI32WasmCallDescriptor(Zone* zone, CallDescriptor* GetI32WasmCallDescriptor(Zone* zone,
......
...@@ -2231,7 +2231,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2231,7 +2231,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
break; break;
case kX64Poke: { case kX64Poke: {
int slot = MiscField::decode(instr->opcode()); int const slot = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) { if (HasImmediateInput(instr, 0)) {
__ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0)); __ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0));
} else { } else {
...@@ -2239,27 +2239,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2239,27 +2239,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
break; break;
} }
case kX64PeekFloat32: {
int reverse_slot = MiscField::decode(instr->opcode());
int offset =
FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
__ Movss(i.OutputFloatRegister(), Operand(rbp, offset));
break;
}
case kX64PeekFloat64: {
int reverse_slot = MiscField::decode(instr->opcode());
int offset =
FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
__ Movsd(i.OutputDoubleRegister(), Operand(rbp, offset));
break;
}
case kX64Peek: {
int reverse_slot = MiscField::decode(instr->opcode());
int offset =
FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
__ movq(i.OutputRegister(), Operand(rbp, offset));
break;
}
case kX64I32x4Splat: { case kX64I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
__ movd(dst, i.InputRegister(0)); __ movd(dst, i.InputRegister(0));
...@@ -3149,10 +3128,9 @@ void CodeGenerator::AssembleConstructFrame() { ...@@ -3149,10 +3128,9 @@ void CodeGenerator::AssembleConstructFrame() {
__ bind(&done); __ bind(&done);
} }
// Skip callee-saved and return slots, which are created below. // Skip callee-saved slots, which are pushed below.
shrink_slots -= base::bits::CountPopulation(saves); shrink_slots -= base::bits::CountPopulation(saves);
shrink_slots -= base::bits::CountPopulation(saves_fp); shrink_slots -= base::bits::CountPopulation(saves_fp);
shrink_slots -= frame()->GetReturnSlotCount();
if (shrink_slots > 0) { if (shrink_slots > 0) {
__ subq(rsp, Immediate(shrink_slots * kPointerSize)); __ subq(rsp, Immediate(shrink_slots * kPointerSize));
} }
...@@ -3179,11 +3157,6 @@ void CodeGenerator::AssembleConstructFrame() { ...@@ -3179,11 +3157,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ pushq(Register::from_code(i)); __ pushq(Register::from_code(i));
} }
} }
// Allocate return slots (located after callee-saved).
if (frame()->GetReturnSlotCount() > 0) {
__ subq(rsp, Immediate(frame()->GetReturnSlotCount() * kPointerSize));
}
} }
void CodeGenerator::AssembleReturn(InstructionOperand* pop) { void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
...@@ -3192,10 +3165,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) { ...@@ -3192,10 +3165,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
// Restore registers. // Restore registers.
const RegList saves = descriptor->CalleeSavedRegisters(); const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) { if (saves != 0) {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
__ addq(rsp, Immediate(returns * kPointerSize));
}
for (int i = 0; i < Register::kNumRegisters; i++) { for (int i = 0; i < Register::kNumRegisters; i++) {
if (!((1 << i) & saves)) continue; if (!((1 << i) & saves)) continue;
__ popq(Register::from_code(i)); __ popq(Register::from_code(i));
......
...@@ -143,9 +143,6 @@ namespace compiler { ...@@ -143,9 +143,6 @@ namespace compiler {
V(X64Inc32) \ V(X64Inc32) \
V(X64Push) \ V(X64Push) \
V(X64Poke) \ V(X64Poke) \
V(X64Peek) \
V(X64PeekFloat32) \
V(X64PeekFloat64) \
V(X64StackCheck) \ V(X64StackCheck) \
V(X64I32x4Splat) \ V(X64I32x4Splat) \
V(X64I32x4ExtractLane) \ V(X64I32x4ExtractLane) \
......
...@@ -240,9 +240,6 @@ int InstructionScheduler::GetTargetInstructionFlags( ...@@ -240,9 +240,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect; return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
case kX64StackCheck: case kX64StackCheck:
case kX64Peek:
case kX64PeekFloat32:
case kX64PeekFloat64:
return kIsLoadOperation; return kIsLoadOperation;
case kX64Push: case kX64Push:
......
...@@ -1538,11 +1538,11 @@ void InstructionSelector::EmitPrepareArguments( ...@@ -1538,11 +1538,11 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments. // Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) { for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n]; PushParameter input = (*arguments)[n];
if (input.node) { if (input.node()) {
int slot = static_cast<int>(n); int slot = static_cast<int>(n);
InstructionOperand value = g.CanBeImmediate(input.node) InstructionOperand value = g.CanBeImmediate(input.node())
? g.UseImmediate(input.node) ? g.UseImmediate(input.node())
: g.UseRegister(input.node); : g.UseRegister(input.node());
Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value); Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
} }
} }
...@@ -1550,56 +1550,31 @@ void InstructionSelector::EmitPrepareArguments( ...@@ -1550,56 +1550,31 @@ void InstructionSelector::EmitPrepareArguments(
// Push any stack arguments. // Push any stack arguments.
int effect_level = GetEffectLevel(node); int effect_level = GetEffectLevel(node);
for (PushParameter input : base::Reversed(*arguments)) { for (PushParameter input : base::Reversed(*arguments)) {
if (g.CanBeImmediate(input.node)) { Node* input_node = input.node();
Emit(kX64Push, g.NoOutput(), g.UseImmediate(input.node)); if (g.CanBeImmediate(input_node)) {
Emit(kX64Push, g.NoOutput(), g.UseImmediate(input_node));
} else if (IsSupported(ATOM) || } else if (IsSupported(ATOM) ||
sequence()->IsFP(GetVirtualRegister(input.node))) { sequence()->IsFP(GetVirtualRegister(input_node))) {
// TODO(titzer): X64Push cannot handle stack->stack double moves // TODO(titzer): X64Push cannot handle stack->stack double moves
// because there is no way to encode fixed double slots. // because there is no way to encode fixed double slots.
Emit(kX64Push, g.NoOutput(), g.UseRegister(input.node)); Emit(kX64Push, g.NoOutput(), g.UseRegister(input_node));
} else if (g.CanBeMemoryOperand(kX64Push, node, input.node, } else if (g.CanBeMemoryOperand(kX64Push, node, input_node,
effect_level)) { effect_level)) {
InstructionOperand outputs[1]; InstructionOperand outputs[1];
InstructionOperand inputs[4]; InstructionOperand inputs[4];
size_t input_count = 0; size_t input_count = 0;
InstructionCode opcode = kX64Push; InstructionCode opcode = kX64Push;
AddressingMode mode = g.GetEffectiveAddressMemoryOperand( AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
input.node, inputs, &input_count); input_node, inputs, &input_count);
opcode |= AddressingModeField::encode(mode); opcode |= AddressingModeField::encode(mode);
Emit(opcode, 0, outputs, input_count, inputs); Emit(opcode, 0, outputs, input_count, inputs);
} else { } else {
Emit(kX64Push, g.NoOutput(), g.Use(input.node)); Emit(kX64Push, g.NoOutput(), g.Use(input_node));
} }
} }
} }
} }
void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
const CallDescriptor* descriptor,
Node* node) {
X64OperandGenerator g(this);
int reverse_slot = 0;
for (PushParameter output : *results) {
if (!output.location.IsCallerFrameSlot()) continue;
reverse_slot += output.location.GetSizeInPointers();
// Skip any alignment holes in nodes.
if (output.node == nullptr) continue;
DCHECK(!descriptor->IsCFunctionCall());
if (output.location.GetType() == MachineType::Float32()) {
MarkAsFloat32(output.node);
InstructionOperand result = g.DefineAsRegister(output.node);
Emit(kX64PeekFloat32 | MiscField::encode(reverse_slot), result);
} else if (output.location.GetType() == MachineType::Float64()) {
MarkAsFloat64(output.node);
InstructionOperand result = g.DefineAsRegister(output.node);
Emit(kX64PeekFloat64 | MiscField::encode(reverse_slot), result);
} else {
InstructionOperand result = g.DefineAsRegister(output.node);
Emit(kX64Peek | MiscField::encode(reverse_slot), result);
}
}
}
bool InstructionSelector::IsTailCallAddressImmediate() { return true; } bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
......
...@@ -157,18 +157,6 @@ ...@@ -157,18 +157,6 @@
'test-api/Float64Array': [SKIP], 'test-api/Float64Array': [SKIP],
}], # 'arch == arm64 and mode == debug and simulator_run' }], # 'arch == arm64 and mode == debug and simulator_run'
##############################################################################
# TODO(ahaas): Port multiple retrn values to ARM and MIPS
['arch == arm or arch == arm64 or arch == mips or arch == mips64 or arch == mipsel or arch == mips64el', {
'test-multiple-return/*': [SKIP],
}],
['system == windows and arch == x64', {
'test-multiple-return/ReturnMultipleInt32': [SKIP],
'test-multiple-return/ReturnMultipleInt64': [SKIP],
'test-multiple-return/ReturnMultipleFloat32': [SKIP],
'test-multiple-return/ReturnMultipleFloat64': [SKIP],
}],
############################################################################## ##############################################################################
['asan == True', { ['asan == True', {
# Skip tests not suitable for ASAN. # Skip tests not suitable for ASAN.
......
...@@ -198,12 +198,6 @@ ...@@ -198,12 +198,6 @@
'asm/embenchen/lua_binarytrees': [SKIP], 'asm/embenchen/lua_binarytrees': [SKIP],
}], # novfp3 == True }], # novfp3 == True
##############################################################################
# TODO(ahaas): Port multiple return values to ARM and MIPS
['arch == arm or arch == arm64 or arch == mips or arch == mips64 or arch == mipsel or arch == mips64el', {
'wasm/multi-value': [SKIP],
}],
############################################################################## ##############################################################################
['gc_stress == True', { ['gc_stress == True', {
# Skip tests not suitable for GC stress. # Skip tests not suitable for GC stress.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment