Commit 8798b3ef authored by Bill Budge's avatar Bill Budge Committed by Commit Bot

[compiler][ia32][arm] Fix pushing of arguments

- Fixes some incorrect assumptions about padding in the
  code generation. Slots may have apparent extra padding
  when allocation fragments go unused.
- Reworks 32 bit push code to simplify skipping slot gaps
  when 'push' instructions are used.
- Adds a ElementSizeInPointers function on machine
  representations.

Bug: chromium:1171759,v8:9198

Change-Id: I029e300fa9c306d7e35344576fd1c68857cf2bca
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2660379
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72502}
parent c781da6c
...@@ -324,6 +324,12 @@ V8_EXPORT_PRIVATE inline constexpr int ElementSizeInBytes( ...@@ -324,6 +324,12 @@ V8_EXPORT_PRIVATE inline constexpr int ElementSizeInBytes(
return 1 << ElementSizeLog2Of(rep); return 1 << ElementSizeLog2Of(rep);
} }
V8_EXPORT_PRIVATE inline constexpr int ElementSizeInPointers(
MachineRepresentation rep) {
return (ElementSizeInBytes(rep) + kSystemPointerSize - 1) /
kSystemPointerSize;
}
// Converts representation to bit for representation masks. // Converts representation to bit for representation masks.
V8_EXPORT_PRIVATE inline constexpr int RepresentationBit( V8_EXPORT_PRIVATE inline constexpr int RepresentationBit(
MachineRepresentation rep) { MachineRepresentation rep) {
......
...@@ -1763,48 +1763,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1763,48 +1763,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kArmPush: { case kArmPush: {
int stack_decrement = i.InputInt32(0); int stack_decrement = i.InputInt32(0);
if (instr->InputAt(1)->IsFPRegister()) { int slots = stack_decrement / kSystemPointerSize;
LocationOperand* op = LocationOperand::cast(instr->InputAt(1)); LocationOperand* op = LocationOperand::cast(instr->InputAt(1));
switch (op->representation()) { MachineRepresentation rep = op->representation();
case MachineRepresentation::kFloat32: int pushed_slots = ElementSizeInPointers(rep);
// 1 slot values are never padded. // Slot-sized arguments are never padded but there may be a gap if
DCHECK_EQ(stack_decrement, kSystemPointerSize); // the slot allocator reclaimed other padding slots. Adjust the stack
__ vpush(i.InputFloatRegister(1)); // here to skip any gap.
frame_access_state()->IncreaseSPDelta(1); if (slots > pushed_slots) {
break; __ AllocateStackSpace(slots - pushed_slots);
case MachineRepresentation::kFloat64: }
// 2 slot values have up to 1 slot of padding. switch (rep) {
DCHECK_GE(stack_decrement, kDoubleSize); case MachineRepresentation::kFloat32:
if (stack_decrement > kDoubleSize) { __ vpush(i.InputFloatRegister(1));
DCHECK_EQ(stack_decrement, kDoubleSize + kSystemPointerSize); break;
__ AllocateStackSpace(kSystemPointerSize); case MachineRepresentation::kFloat64:
} __ vpush(i.InputDoubleRegister(1));
__ vpush(i.InputDoubleRegister(1)); break;
frame_access_state()->IncreaseSPDelta(stack_decrement / case MachineRepresentation::kSimd128:
kSystemPointerSize); __ vpush(i.InputSimd128Register(1));
break; break;
case MachineRepresentation::kSimd128: { default:
// 4 slot values have up to 3 slots of padding. __ push(i.InputRegister(1));
DCHECK_GE(stack_decrement, kSimd128Size); break;
if (stack_decrement > kSimd128Size) {
int padding = stack_decrement - kSimd128Size;
DCHECK_LT(padding, kSimd128Size);
__ AllocateStackSpace(padding);
}
__ vpush(i.InputSimd128Register(1));
frame_access_state()->IncreaseSPDelta(stack_decrement /
kSystemPointerSize);
break;
}
default:
UNREACHABLE();
break;
}
} else {
DCHECK_EQ(stack_decrement, kSystemPointerSize);
__ push(i.InputRegister(1));
frame_access_state()->IncreaseSPDelta(1);
} }
frame_access_state()->IncreaseSPDelta(slots);
DCHECK_EQ(LeaveCC, i.OutputSBit()); DCHECK_EQ(LeaveCC, i.OutputSBit());
break; break;
} }
......
...@@ -1783,28 +1783,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1783,28 +1783,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kIA32PushFloat32: { case kIA32PushFloat32: {
// 1 slot values are never padded. int stack_decrement = i.InputInt32(0);
DCHECK_EQ(i.InputInt32(0), kFloatSize);
if (instr->InputAt(1)->IsFPRegister()) { if (instr->InputAt(1)->IsFPRegister()) {
__ AllocateStackSpace(kFloatSize); __ AllocateStackSpace(stack_decrement);
__ Movss(Operand(esp, 0), i.InputDoubleRegister(1)); __ Movss(Operand(esp, 0), i.InputDoubleRegister(1));
} else if (HasImmediateInput(instr, 1)) { } else if (HasImmediateInput(instr, 1)) {
__ AllocateStackSpace(kFloatSize);
__ Move(kScratchDoubleReg, i.InputFloat32(1)); __ Move(kScratchDoubleReg, i.InputFloat32(1));
__ AllocateStackSpace(stack_decrement);
__ Movss(Operand(esp, 0), kScratchDoubleReg); __ Movss(Operand(esp, 0), kScratchDoubleReg);
} else { } else {
__ Movss(kScratchDoubleReg, i.InputOperand(1)); __ Movss(kScratchDoubleReg, i.InputOperand(1));
__ AllocateStackSpace(kFloatSize); __ AllocateStackSpace(stack_decrement);
__ Movss(Operand(esp, 0), kScratchDoubleReg); __ Movss(Operand(esp, 0), kScratchDoubleReg);
} }
int slots = kFloatSize / kSystemPointerSize; int slots = stack_decrement / kSystemPointerSize;
frame_access_state()->IncreaseSPDelta(slots); frame_access_state()->IncreaseSPDelta(slots);
break; break;
} }
case kIA32PushFloat64: { case kIA32PushFloat64: {
int stack_decrement = i.InputInt32(0); int stack_decrement = i.InputInt32(0);
// 2 slot values have up to 1 slot of padding.
DCHECK_GE(stack_decrement, kDoubleSize);
if (instr->InputAt(1)->IsFPRegister()) { if (instr->InputAt(1)->IsFPRegister()) {
__ AllocateStackSpace(stack_decrement); __ AllocateStackSpace(stack_decrement);
__ Movsd(Operand(esp, 0), i.InputDoubleRegister(1)); __ Movsd(Operand(esp, 0), i.InputDoubleRegister(1));
...@@ -1823,14 +1820,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1823,14 +1820,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kIA32PushSimd128: { case kIA32PushSimd128: {
int stack_decrement = i.InputInt32(0); int stack_decrement = i.InputInt32(0);
// 4 slot values have up to 3 slots of padding.
DCHECK_GE(stack_decrement, kSimd128Size);
if (instr->InputAt(1)->IsFPRegister()) { if (instr->InputAt(1)->IsFPRegister()) {
__ AllocateStackSpace(stack_decrement); __ AllocateStackSpace(stack_decrement);
// TODO(bbudge) Use Movaps when slots are aligned.
__ Movups(Operand(esp, 0), i.InputSimd128Register(1)); __ Movups(Operand(esp, 0), i.InputSimd128Register(1));
} else { } else {
__ Movups(kScratchDoubleReg, i.InputOperand(1)); __ Movups(kScratchDoubleReg, i.InputOperand(1));
__ AllocateStackSpace(stack_decrement); __ AllocateStackSpace(stack_decrement);
// TODO(bbudge) Use Movaps when slots are aligned.
__ Movups(Operand(esp, 0), kScratchDoubleReg); __ Movups(Operand(esp, 0), kScratchDoubleReg);
} }
int slots = stack_decrement / kSystemPointerSize; int slots = stack_decrement / kSystemPointerSize;
...@@ -1839,21 +1836,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1839,21 +1836,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kIA32Push: { case kIA32Push: {
// TODO(bbudge) Merge the push opcodes into a single one, as on x64. // TODO(bbudge) Merge the push opcodes into a single one, as on x64.
// 1 slot values are never padded. int stack_decrement = i.InputInt32(0);
DCHECK_EQ(i.InputInt32(0), kSystemPointerSize); if (instr->InputAt(1)->IsFPRegister()) {
if (HasAddressingMode(instr)) { __ AllocateStackSpace(stack_decrement);
size_t index = 1;
Operand operand = i.MemoryOperand(&index);
__ push(operand);
} else if (instr->InputAt(1)->IsFPRegister()) {
__ AllocateStackSpace(kSystemPointerSize);
__ Movsd(Operand(esp, 0), i.InputDoubleRegister(1)); __ Movsd(Operand(esp, 0), i.InputDoubleRegister(1));
} else if (HasImmediateInput(instr, 1)) {
__ push(i.InputImmediate(1));
} else { } else {
__ push(i.InputOperand(1)); // Slot-sized arguments are never padded but there may be a gap if
// the slot allocator reclaimed other padding slots. Adjust the stack
// here to skip any gap.
if (stack_decrement > kSystemPointerSize) {
__ AllocateStackSpace(stack_decrement - kSystemPointerSize);
}
if (HasAddressingMode(instr)) {
size_t index = 1;
Operand operand = i.MemoryOperand(&index);
__ push(operand);
} else if (HasImmediateInput(instr, 1)) {
__ push(i.InputImmediate(1));
} else {
__ push(i.InputOperand(1));
}
} }
frame_access_state()->IncreaseSPDelta(1); int slots = stack_decrement / kSystemPointerSize;
frame_access_state()->IncreaseSPDelta(slots);
break; break;
} }
case kIA32Poke: { case kIA32Poke: {
......
...@@ -10,13 +10,14 @@ ...@@ -10,13 +10,14 @@
#include "src/compiler/backend/code-generator.h" #include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/instruction.h" #include "src/compiler/backend/instruction.h"
#include "src/compiler/linkage.h" #include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
#include "src/execution/isolate.h" #include "src/execution/isolate.h"
#include "src/objects/heap-number-inl.h" #include "src/objects/heap-number-inl.h"
#include "src/objects/objects-inl.h" #include "src/objects/objects-inl.h"
#include "src/objects/smi.h" #include "src/objects/smi.h"
#include "test/cctest/cctest.h" #include "test/cctest/cctest.h"
#include "test/cctest/compiler/code-assembler-tester.h" #include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/function-tester.h" #include "test/cctest/compiler/function-tester.h"
namespace v8 { namespace v8 {
...@@ -1430,6 +1431,106 @@ TEST(AssembleTailCallGap) { ...@@ -1430,6 +1431,106 @@ TEST(AssembleTailCallGap) {
} }
} }
namespace {
std::shared_ptr<wasm::NativeModule> AllocateNativeModule(Isolate* isolate,
size_t code_size) {
std::shared_ptr<wasm::WasmModule> module(new wasm::WasmModule());
module->num_declared_functions = 1;
// We have to add the code object to a NativeModule, because the
// WasmCallDescriptor assumes that code is on the native heap and not
// within a code object.
auto native_module = isolate->wasm_engine()->NewNativeModule(
isolate, wasm::WasmFeatures::All(), std::move(module), code_size);
native_module->SetWireBytes({});
return native_module;
}
} // namespace
// Test stack argument pushing with some gaps that require stack pointer
// adjustment.
TEST(Regress_1171759) {
v8::internal::AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
// Create a minimal callee with enlough parameters to exhaust parameter
// registers and force some stack parameters.
constexpr int kDoubleParams = 16;
// These are followed by a single, and another double to create a gap.
constexpr int kTotalParams = kDoubleParams + 2;
wasm::FunctionSig::Builder builder(&zone, 1, kTotalParams);
// Make the first parameter slots double width.
for (int i = 0; i < kDoubleParams; i++) {
builder.AddParam(wasm::ValueType::For(MachineType::Float64()));
}
// Allocate a single parameter.
builder.AddParam(wasm::ValueType::For(MachineType::Float32()));
// Allocate a double parameter which should create a stack gap.
builder.AddParam(wasm::ValueType::For(MachineType::Float64()));
builder.AddReturn(wasm::ValueType::For(MachineType::Int32()));
CallDescriptor* desc =
compiler::GetWasmCallDescriptor(&zone, builder.Build());
HandleAndZoneScope handles(kCompressGraphZone);
RawMachineAssembler m(handles.main_isolate(),
handles.main_zone()->New<Graph>(handles.main_zone()),
desc, MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags());
m.Return(m.Int32Constant(0));
OptimizedCompilationInfo info(ArrayVector("testing"), handles.main_zone(),
CodeKind::WASM_FUNCTION);
Handle<Code> code =
Pipeline::GenerateCodeForTesting(
&info, handles.main_isolate(), desc, m.graph(),
AssemblerOptions::Default(handles.main_isolate()), m.ExportForTest())
.ToHandleChecked();
std::shared_ptr<wasm::NativeModule> module = AllocateNativeModule(
handles.main_isolate(), code->raw_instruction_size());
wasm::WasmCodeRefScope wasm_code_ref_scope;
byte* code_start = module->AddCodeForTesting(code)->instructions().begin();
// Generate a minimal calling function, to push stack arguments.
RawMachineAssemblerTester<int32_t> mt;
Node* function = mt.PointerConstant(code_start);
Node* dummy_context = mt.PointerConstant(nullptr);
Node* double_slot = mt.Float64Constant(0);
Node* single_slot_that_creates_gap = mt.Float32Constant(0);
Node* call_inputs[] = {function,
dummy_context,
double_slot,
double_slot,
double_slot,
double_slot,
double_slot,
double_slot,
double_slot,
double_slot,
double_slot,
double_slot,
double_slot,
double_slot,
double_slot,
double_slot,
double_slot,
double_slot,
single_slot_that_creates_gap,
double_slot};
Node* call =
mt.AddNode(mt.common()->Call(desc), 2 + kTotalParams, call_inputs);
mt.Return(call);
CHECK_EQ(0, mt.Call());
}
} // namespace compiler } // namespace compiler
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment