Commit 63c5dd5d authored by machenbach's avatar machenbach Committed by Commit bot

Revert of [turbofan] Add alignment parameter to StackSlot operator (patchset...

Revert of [turbofan] Add alignment parameter to StackSlot operator (patchset #7 id:120001 of https://codereview.chromium.org/2816743003/ )

Reason for revert:
Seems to break cfi:
https://build.chromium.org/p/client.v8/builders/V8%20Linux64%20-%20cfi/builds/9989

Original issue's description:
> [turbofan] Add alignment parameter to StackSlot operator
>
> If alignment parameter is set, the memory returned by the
> StackSlot operator will be aligned according to the parameter.
>
> The implementation goes like this. If alignment parameter is set
> we allocate a bit more memory than actually needed and so we
> can move the beginning of the StackSlot in order to have it aligned.
>
>
> BUG=
>
> Review-Url: https://codereview.chromium.org/2816743003
> Cr-Commit-Position: refs/heads/master@{#45197}
> Committed: https://chromium.googlesource.com/v8/v8/+/d8bfdb7a998adadc56aa5705a5998e75ceae7675

TBR=ahaas@chromium.org,clemensh@chromium.org,titzer@chromium.org,bmeurer@chromium.org,ivica.bogosavljevic@imgtec.com
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=

Review-Url: https://codereview.chromium.org/2867403002
Cr-Commit-Position: refs/heads/master@{#45203}
parent 4a4699ed
......@@ -419,14 +419,6 @@ void EmitStore(InstructionSelector* selector, InstructionCode opcode,
} // namespace
void InstructionSelector::VisitStackSlot(Node* node) {
StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
int slot = frame_->AllocateSpillSlot(rep.size());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
......
......@@ -527,15 +527,6 @@ int32_t LeftShiftForReducedMultiply(Matcher* m) {
} // namespace
void InstructionSelector::VisitStackSlot(Node* node) {
StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
int slot = frame_->AllocateSpillSlot(rep.size());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
ImmediateMode immediate_mode, MachineRepresentation rep,
Node* output = nullptr) {
......
......@@ -111,18 +111,9 @@ class Frame : public ZoneObject {
frame_slot_count_ += count;
}
int AllocateSpillSlot(int width, int alignment = 0) {
int AllocateSpillSlot(int width) {
int frame_slot_count_before = frame_slot_count_;
if (alignment <= kPointerSize) {
AllocateAlignedFrameSlots(width);
} else {
// We need to allocate more place for spill slot
// in case we need an aligned spill slot to be
// able to properly align start of spill slot
// and still have enough place to hold all the
// data
AllocateAlignedFrameSlots(width + alignment - kPointerSize);
}
AllocateAlignedFrameSlots(width);
spill_slot_count_ += frame_slot_count_ - frame_slot_count_before;
return frame_slot_count_ - 1;
}
......
......@@ -212,14 +212,6 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
} // namespace
void InstructionSelector::VisitStackSlot(Node* node) {
StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
int slot = frame_->AllocateSpillSlot(rep.size());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
......
......@@ -1900,6 +1900,14 @@ void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
}
void InstructionSelector::VisitStackSlot(Node* node) {
int size = StackSlotSizeOf(node->op());
int slot = frame_->AllocateSpillSlot(size);
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
void InstructionSelector::VisitBitcastTaggedToWord(Node* node) {
EmitIdentity(node);
......
......@@ -70,25 +70,9 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
return OpParameter<CheckedStoreRepresentation>(op);
}
bool operator==(StackSlotRepresentation lhs, StackSlotRepresentation rhs) {
return lhs.size() == rhs.size() && lhs.alignment() == rhs.alignment();
}
bool operator!=(StackSlotRepresentation lhs, StackSlotRepresentation rhs) {
return !(lhs == rhs);
}
size_t hash_value(StackSlotRepresentation rep) {
return base::hash_combine(rep.size(), rep.alignment());
}
std::ostream& operator<<(std::ostream& os, StackSlotRepresentation rep) {
return os << "(" << rep.size() << " : " << rep.alignment() << ")";
}
StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) {
int StackSlotSizeOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kStackSlot, op->opcode());
return OpParameter<StackSlotRepresentation>(op);
return OpParameter<int>(op);
}
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
......@@ -439,15 +423,13 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(16x8, 16) \
V(8x16, 8)
#define STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(V) \
V(4, 0) V(8, 0) V(16, 0) V(4, 4) V(8, 8) V(16, 16)
#define STACK_SLOT_CACHED_SIZES_LIST(V) V(4) V(8) V(16)
struct StackSlotOperator : public Operator1<StackSlotRepresentation> {
explicit StackSlotOperator(int size, int alignment)
: Operator1<StackSlotRepresentation>(
IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow,
"StackSlot", 0, 0, 0, 1, 0, 0,
StackSlotRepresentation(size, alignment)) {}
struct StackSlotOperator : public Operator1<int> {
explicit StackSlotOperator(int size)
: Operator1<int>(IrOpcode::kStackSlot,
Operator::kNoDeopt | Operator::kNoThrow, "StackSlot", 0,
0, 0, 1, 0, 0, size) {}
};
struct MachineOperatorGlobalCache {
......@@ -514,15 +496,12 @@ struct MachineOperatorGlobalCache {
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
#define STACKSLOT(Size, Alignment) \
struct StackSlotOfSize##Size##OfAlignment##Alignment##Operator final \
: public StackSlotOperator { \
StackSlotOfSize##Size##OfAlignment##Alignment##Operator() \
: StackSlotOperator(Size, Alignment) {} \
}; \
StackSlotOfSize##Size##OfAlignment##Alignment##Operator \
kStackSlotOfSize##Size##OfAlignment##Alignment;
STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(STACKSLOT)
#define STACKSLOT(Size) \
struct StackSlotOfSize##Size##Operator final : public StackSlotOperator { \
StackSlotOfSize##Size##Operator() : StackSlotOperator(Size) {} \
}; \
StackSlotOfSize##Size##Operator kStackSlotSize##Size;
STACK_SLOT_CACHED_SIZES_LIST(STACKSLOT)
#undef STACKSLOT
#define STORE(Type) \
......@@ -773,23 +752,21 @@ const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
return nullptr;
}
const Operator* MachineOperatorBuilder::StackSlot(int size, int alignment) {
const Operator* MachineOperatorBuilder::StackSlot(int size) {
DCHECK_LE(0, size);
DCHECK(alignment == 0 || alignment == 4 || alignment == 8 || alignment == 16);
#define CASE_CACHED_SIZE(Size, Alignment) \
if (size == Size && alignment == Alignment) { \
return &cache_.kStackSlotOfSize##Size##OfAlignment##Alignment; \
#define CASE_CACHED_SIZE(Size) \
case Size: \
return &cache_.kStackSlotSize##Size;
switch (size) {
STACK_SLOT_CACHED_SIZES_LIST(CASE_CACHED_SIZE);
default:
return new (zone_) StackSlotOperator(size);
}
STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(CASE_CACHED_SIZE)
#undef CASE_CACHED_SIZE
return new (zone_) StackSlotOperator(size, alignment);
}
const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep,
int alignment) {
return StackSlot(1 << ElementSizeLog2Of(rep), alignment);
const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep) {
return StackSlot(1 << ElementSizeLog2Of(rep));
}
const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
......
......@@ -93,29 +93,7 @@ typedef MachineRepresentation CheckedStoreRepresentation;
CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
class StackSlotRepresentation final {
public:
StackSlotRepresentation(int size, int alignment)
: size_(size), alignment_(alignment) {}
int size() const { return size_; }
int alignment() const { return alignment_; }
private:
int size_;
int alignment_;
};
V8_EXPORT_PRIVATE bool operator==(StackSlotRepresentation,
StackSlotRepresentation);
bool operator!=(StackSlotRepresentation, StackSlotRepresentation);
size_t hash_value(StackSlotRepresentation);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
StackSlotRepresentation);
StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op);
int StackSlotSizeOf(Operator const* op);
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op);
......@@ -621,8 +599,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// unaligned store [base + index], value
const Operator* UnalignedStore(UnalignedStoreRepresentation rep);
const Operator* StackSlot(int size, int alignment = 0);
const Operator* StackSlot(MachineRepresentation rep, int alignment = 0);
const Operator* StackSlot(int size);
const Operator* StackSlot(MachineRepresentation rep);
// Access to the machine stack.
const Operator* LoadStackPointer();
......
......@@ -775,35 +775,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
Register base_reg = offset.from_stack_pointer() ? sp : fp;
__ Addu(i.OutputRegister(), base_reg, Operand(offset.offset()));
int alignment = i.InputInt32(1);
DCHECK(alignment == 0 || alignment == 4 || alignment == 8 ||
alignment == 16);
if (FLAG_debug_code && alignment > 0) {
// Verify that the output_register is properly aligned
__ And(kScratchReg, i.OutputRegister(), Operand(kPointerSize - 1));
__ Assert(eq, kAllocationIsNotDoubleAligned, kScratchReg,
Operand(zero_reg));
}
if (alignment == 2 * kPointerSize) {
Label done;
__ Addu(kScratchReg, base_reg, Operand(offset.offset()));
__ And(kScratchReg, kScratchReg, Operand(alignment - 1));
__ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
__ Addu(i.OutputRegister(), i.OutputRegister(), kPointerSize);
__ bind(&done);
} else if (alignment > 2 * kPointerSize) {
Label done;
__ Addu(kScratchReg, base_reg, Operand(offset.offset()));
__ And(kScratchReg, kScratchReg, Operand(alignment - 1));
__ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
__ li(kScratchReg2, alignment);
__ Subu(kScratchReg2, kScratchReg2, Operand(kScratchReg));
__ Addu(i.OutputRegister(), i.OutputRegister(), kScratchReg2);
__ bind(&done);
}
__ Addu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
Operand(offset.offset()));
break;
}
case kIeee754Float64Acos:
......
......@@ -256,16 +256,6 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
VisitBinop(selector, node, opcode, false, kArchNop);
}
void InstructionSelector::VisitStackSlot(Node* node) {
StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
int alignment = rep.alignment();
int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
sequence()->AddImmediate(Constant(slot)),
sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
......
......@@ -813,35 +813,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
Register base_reg = offset.from_stack_pointer() ? sp : fp;
__ Daddu(i.OutputRegister(), base_reg, Operand(offset.offset()));
int alignment = i.InputInt32(1);
DCHECK(alignment == 0 || alignment == 4 || alignment == 8 ||
alignment == 16);
if (FLAG_debug_code && alignment > 0) {
// Verify that the output_register is properly aligned
__ And(kScratchReg, i.OutputRegister(), Operand(kPointerSize - 1));
__ Assert(eq, kAllocationIsNotDoubleAligned, kScratchReg,
Operand(zero_reg));
}
if (alignment == 2 * kPointerSize) {
Label done;
__ Daddu(kScratchReg, base_reg, Operand(offset.offset()));
__ And(kScratchReg, kScratchReg, Operand(alignment - 1));
__ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
__ Daddu(i.OutputRegister(), i.OutputRegister(), kPointerSize);
__ bind(&done);
} else if (alignment > 2 * kPointerSize) {
Label done;
__ Daddu(kScratchReg, base_reg, Operand(offset.offset()));
__ And(kScratchReg, kScratchReg, Operand(alignment - 1));
__ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
__ li(kScratchReg2, alignment);
__ Dsubu(kScratchReg2, kScratchReg2, Operand(kScratchReg));
__ Daddu(i.OutputRegister(), i.OutputRegister(), kScratchReg2);
__ bind(&done);
}
__ Daddu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
Operand(offset.offset()));
break;
}
case kIeee754Float64Acos:
......
......@@ -352,17 +352,6 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
VisitBinop(selector, node, opcode, false, kArchNop);
}
void InstructionSelector::VisitStackSlot(Node* node) {
StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
int alignment = rep.alignment();
int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
sequence()->AddImmediate(Constant(slot)),
sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
Node* output = nullptr) {
Mips64OperandGenerator g(selector);
......
......@@ -174,14 +174,6 @@ void VisitBinop(InstructionSelector* selector, Node* node,
} // namespace
void InstructionSelector::VisitStackSlot(Node* node) {
StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
int slot = frame_->AllocateSpillSlot(rep.size());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
......
......@@ -84,8 +84,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* Int32Constant(int32_t value) {
return AddNode(common()->Int32Constant(value));
}
Node* StackSlot(MachineRepresentation rep, int alignment = 0) {
return AddNode(machine()->StackSlot(rep, alignment));
Node* StackSlot(MachineRepresentation rep) {
return AddNode(machine()->StackSlot(rep));
}
Node* Int64Constant(int64_t value) {
return AddNode(common()->Int64Constant(value));
......
......@@ -702,15 +702,6 @@ void VisitBinOp(InstructionSelector* selector, Node* node,
} // namespace
void InstructionSelector::VisitStackSlot(Node* node) {
StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
int slot = frame_->AllocateSpillSlot(rep.size());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
void InstructionSelector::VisitLoad(Node* node) {
S390OperandGenerator g(this);
ArchOpcode opcode = SelectLoadOpcode(node);
......
......@@ -283,15 +283,6 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
} // namespace
void InstructionSelector::VisitStackSlot(Node* node) {
StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
int slot = frame_->AllocateSpillSlot(rep.size());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
X64OperandGenerator g(this);
......
......@@ -168,14 +168,6 @@ class X87OperandGenerator final : public OperandGenerator {
}
};
void InstructionSelector::VisitStackSlot(Node* node) {
StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
int slot = frame_->AllocateSpillSlot(rep.size());
OperandGenerator g(this);
Emit(kArchStackSlot, g.DefineAsRegister(node),
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
......
......@@ -6739,34 +6739,6 @@ TEST(ParentFramePointer) {
CHECK_EQ(1, r.Call(1));
}
#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
TEST(StackSlotAlignment) {
RawMachineAssemblerTester<int32_t> r;
RawMachineLabel tlabel;
RawMachineLabel flabel;
RawMachineLabel merge;
int alignments[] = {4, 8, 16};
int alignment_count = arraysize(alignments);
Node* alignment_counter = r.Int32Constant(0);
for (int i = 0; i < alignment_count; i++) {
for (int j = 0; j < 5; j++) {
Node* stack_slot =
r.StackSlot(MachineRepresentation::kWord32, alignments[i]);
alignment_counter = r.Int32Add(
alignment_counter,
r.Word32And(stack_slot, r.Int32Constant(alignments[i] - 1)));
}
}
r.Return(alignment_counter);
CHECK_EQ(0, r.Call(1));
}
#endif // V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
#if V8_TARGET_ARCH_64_BIT
TEST(Regression5923) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment