Commit 948ce214 authored by bmeurer@chromium.org's avatar bmeurer@chromium.org

[turbofan] First step towards correctified 64-bit addressing.

Also remove the LEA matching from x64, since it was never really
effective. We'll optimize that once we're correct.

TEST=cctest,unittests
R=dcarney@chromium.org

Review URL: https://codereview.chromium.org/652363006

Cr-Commit-Position: refs/heads/master@{#25024}
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@25024 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent bd5c9834
...@@ -550,7 +550,6 @@ source_set("v8_base") { ...@@ -550,7 +550,6 @@ source_set("v8_base") {
"src/compiler/node-aux-data.h", "src/compiler/node-aux-data.h",
"src/compiler/node-cache.cc", "src/compiler/node-cache.cc",
"src/compiler/node-cache.h", "src/compiler/node-cache.h",
"src/compiler/node-matchers.cc",
"src/compiler/node-matchers.h", "src/compiler/node-matchers.h",
"src/compiler/node-properties-inl.h", "src/compiler/node-properties-inl.h",
"src/compiler/node-properties.h", "src/compiler/node-properties.h",
......
...@@ -46,14 +46,14 @@ Node* ChangeLowering::HeapNumberValueIndexConstant() { ...@@ -46,14 +46,14 @@ Node* ChangeLowering::HeapNumberValueIndexConstant() {
STATIC_ASSERT(HeapNumber::kValueOffset % kPointerSize == 0); STATIC_ASSERT(HeapNumber::kValueOffset % kPointerSize == 0);
const int heap_number_value_offset = const int heap_number_value_offset =
((HeapNumber::kValueOffset / kPointerSize) * (machine()->Is64() ? 8 : 4)); ((HeapNumber::kValueOffset / kPointerSize) * (machine()->Is64() ? 8 : 4));
return jsgraph()->Int32Constant(heap_number_value_offset - kHeapObjectTag); return jsgraph()->IntPtrConstant(heap_number_value_offset - kHeapObjectTag);
} }
Node* ChangeLowering::SmiMaxValueConstant() { Node* ChangeLowering::SmiMaxValueConstant() {
const int smi_value_size = machine()->Is32() ? SmiTagging<4>::SmiValueSize() const int smi_value_size = machine()->Is32() ? SmiTagging<4>::SmiValueSize()
: SmiTagging<8>::SmiValueSize(); : SmiTagging<8>::SmiValueSize();
return jsgraph()->Int32Constant( return jsgraph()->IntPtrConstant(
-(static_cast<int>(0xffffffffu << (smi_value_size - 1)) + 1)); -(static_cast<int>(0xffffffffu << (smi_value_size - 1)) + 1));
} }
...@@ -61,7 +61,7 @@ Node* ChangeLowering::SmiMaxValueConstant() { ...@@ -61,7 +61,7 @@ Node* ChangeLowering::SmiMaxValueConstant() {
Node* ChangeLowering::SmiShiftBitsConstant() { Node* ChangeLowering::SmiShiftBitsConstant() {
const int smi_shift_size = machine()->Is32() ? SmiTagging<4>::SmiShiftSize() const int smi_shift_size = machine()->Is32() ? SmiTagging<4>::SmiShiftSize()
: SmiTagging<8>::SmiShiftSize(); : SmiTagging<8>::SmiShiftSize();
return jsgraph()->Int32Constant(smi_shift_size + kSmiTagSize); return jsgraph()->IntPtrConstant(smi_shift_size + kSmiTagSize);
} }
...@@ -166,7 +166,7 @@ Reduction ChangeLowering::ChangeTaggedToUI32(Node* val, Node* control, ...@@ -166,7 +166,7 @@ Reduction ChangeLowering::ChangeTaggedToUI32(Node* val, Node* control,
STATIC_ASSERT(kSmiTagMask == 1); STATIC_ASSERT(kSmiTagMask == 1);
Node* tag = graph()->NewNode(machine()->WordAnd(), val, Node* tag = graph()->NewNode(machine()->WordAnd(), val,
jsgraph()->Int32Constant(kSmiTagMask)); jsgraph()->IntPtrConstant(kSmiTagMask));
Node* branch = graph()->NewNode(common()->Branch(), tag, control); Node* branch = graph()->NewNode(common()->Branch(), tag, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch); Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
...@@ -192,7 +192,7 @@ Reduction ChangeLowering::ChangeTaggedToFloat64(Node* val, Node* control) { ...@@ -192,7 +192,7 @@ Reduction ChangeLowering::ChangeTaggedToFloat64(Node* val, Node* control) {
STATIC_ASSERT(kSmiTagMask == 1); STATIC_ASSERT(kSmiTagMask == 1);
Node* tag = graph()->NewNode(machine()->WordAnd(), val, Node* tag = graph()->NewNode(machine()->WordAnd(), val,
jsgraph()->Int32Constant(kSmiTagMask)); jsgraph()->IntPtrConstant(kSmiTagMask));
Node* branch = graph()->NewNode(common()->Branch(), tag, control); Node* branch = graph()->NewNode(common()->Branch(), tag, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch); Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
......
...@@ -53,6 +53,178 @@ static AddressingMode AdjustAddressingMode(AddressingMode base_mode, ...@@ -53,6 +53,178 @@ static AddressingMode AdjustAddressingMode(AddressingMode base_mode,
} }
// Fairly intel-specify node matcher used for matching scale factors in
// addressing modes.
// Matches nodes of form [x * N] for N in {1,2,4,8}
class ScaleFactorMatcher : public NodeMatcher {
public:
static const int kMatchedFactors[4];
explicit ScaleFactorMatcher(Node* node);
bool Matches() const { return left_ != NULL; }
int Power() const {
DCHECK(Matches());
return power_;
}
Node* Left() const {
DCHECK(Matches());
return left_;
}
private:
Node* left_;
int power_;
};
// Fairly intel-specify node matcher used for matching index and displacement
// operands in addressing modes.
// Matches nodes of form:
// [x * N]
// [x * N + K]
// [x + K]
// [x] -- fallback case
// for N in {1,2,4,8} and K int32_t
class IndexAndDisplacementMatcher : public NodeMatcher {
public:
explicit IndexAndDisplacementMatcher(Node* node);
Node* index_node() const { return index_node_; }
int displacement() const { return displacement_; }
int power() const { return power_; }
private:
Node* index_node_;
int displacement_;
int power_;
};
// Fairly intel-specify node matcher used for matching multiplies that can be
// transformed to lea instructions.
// Matches nodes of form:
// [x * N]
// for N in {1,2,3,4,5,8,9}
class LeaMultiplyMatcher : public NodeMatcher {
public:
static const int kMatchedFactors[7];
explicit LeaMultiplyMatcher(Node* node);
bool Matches() const { return left_ != NULL; }
int Power() const {
DCHECK(Matches());
return power_;
}
Node* Left() const {
DCHECK(Matches());
return left_;
}
// Displacement will be either 0 or 1.
int32_t Displacement() const {
DCHECK(Matches());
return displacement_;
}
private:
Node* left_;
int power_;
int displacement_;
};
const int ScaleFactorMatcher::kMatchedFactors[] = {1, 2, 4, 8};
ScaleFactorMatcher::ScaleFactorMatcher(Node* node)
: NodeMatcher(node), left_(NULL), power_(0) {
if (opcode() != IrOpcode::kInt32Mul) return;
// TODO(dcarney): should test 64 bit ints as well.
Int32BinopMatcher m(this->node());
if (!m.right().HasValue()) return;
int32_t value = m.right().Value();
switch (value) {
case 8:
power_++; // Fall through.
case 4:
power_++; // Fall through.
case 2:
power_++; // Fall through.
case 1:
break;
default:
return;
}
left_ = m.left().node();
}
IndexAndDisplacementMatcher::IndexAndDisplacementMatcher(Node* node)
: NodeMatcher(node), index_node_(node), displacement_(0), power_(0) {
if (opcode() == IrOpcode::kInt32Add) {
Int32BinopMatcher m(this->node());
if (m.right().HasValue()) {
displacement_ = m.right().Value();
index_node_ = m.left().node();
}
}
// Test scale factor.
ScaleFactorMatcher scale_matcher(index_node_);
if (scale_matcher.Matches()) {
index_node_ = scale_matcher.Left();
power_ = scale_matcher.Power();
}
}
const int LeaMultiplyMatcher::kMatchedFactors[7] = {1, 2, 3, 4, 5, 8, 9};
LeaMultiplyMatcher::LeaMultiplyMatcher(Node* node)
: NodeMatcher(node), left_(NULL), power_(0), displacement_(0) {
if (opcode() != IrOpcode::kInt32Mul && opcode() != IrOpcode::kInt64Mul) {
return;
}
int64_t value;
Node* left = NULL;
{
Int32BinopMatcher m(this->node());
if (m.right().HasValue()) {
value = m.right().Value();
left = m.left().node();
} else {
Int64BinopMatcher m(this->node());
if (m.right().HasValue()) {
value = m.right().Value();
left = m.left().node();
} else {
return;
}
}
}
switch (value) {
case 9:
case 8:
power_++; // Fall through.
case 5:
case 4:
power_++; // Fall through.
case 3:
case 2:
power_++; // Fall through.
case 1:
break;
default:
return;
}
if (!base::bits::IsPowerOfTwo64(value)) {
displacement_ = 1;
}
left_ = left;
}
class AddressingModeMatcher { class AddressingModeMatcher {
public: public:
AddressingModeMatcher(IA32OperandGenerator* g, Node* base, Node* index) AddressingModeMatcher(IA32OperandGenerator* g, Node* base, Node* index)
......
...@@ -675,8 +675,10 @@ class Constant FINAL { ...@@ -675,8 +675,10 @@ class Constant FINAL {
Type type() const { return type_; } Type type() const { return type_; }
int32_t ToInt32() const { int32_t ToInt32() const {
DCHECK_EQ(kInt32, type()); DCHECK(type() == kInt32 || type() == kInt64);
return static_cast<int32_t>(value_); const int32_t value = static_cast<int32_t>(value_);
DCHECK_EQ(value_, static_cast<int64_t>(value));
return value;
} }
int64_t ToInt64() const { int64_t ToInt64() const {
......
...@@ -81,26 +81,34 @@ inline MachineType RepresentationOf(MachineType machine_type) { ...@@ -81,26 +81,34 @@ inline MachineType RepresentationOf(MachineType machine_type) {
return static_cast<MachineType>(result); return static_cast<MachineType>(result);
} }
// Gets the element size in bytes of the machine type. // Gets the log2 of the element size in bytes of the machine type.
inline int ElementSizeOf(MachineType machine_type) { inline int ElementSizeLog2Of(MachineType machine_type) {
switch (RepresentationOf(machine_type)) { switch (RepresentationOf(machine_type)) {
case kRepBit: case kRepBit:
case kRepWord8: case kRepWord8:
return 1; return 0;
case kRepWord16: case kRepWord16:
return 2; return 1;
case kRepWord32: case kRepWord32:
case kRepFloat32: case kRepFloat32:
return 4; return 2;
case kRepWord64: case kRepWord64:
case kRepFloat64: case kRepFloat64:
return 8; return 3;
case kRepTagged: case kRepTagged:
return kPointerSize; return kPointerSizeLog2;
default: default:
UNREACHABLE(); break;
return kPointerSize;
} }
UNREACHABLE();
return -1;
}
// Gets the element size in bytes of the machine type.
inline int ElementSizeOf(MachineType machine_type) {
const int shift = ElementSizeLog2Of(machine_type);
DCHECK_NE(-1, shift);
return 1 << shift;
} }
// Describes the inputs and outputs of a function or call. // Describes the inputs and outputs of a function or call.
......
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/generic-node-inl.h"
#include "src/compiler/node-matchers.h"
namespace v8 {
namespace internal {
namespace compiler {
const int ScaleFactorMatcher::kMatchedFactors[] = {1, 2, 4, 8};
ScaleFactorMatcher::ScaleFactorMatcher(Node* node)
: NodeMatcher(node), left_(NULL), power_(0) {
if (opcode() != IrOpcode::kInt32Mul) return;
// TODO(dcarney): should test 64 bit ints as well.
Int32BinopMatcher m(this->node());
if (!m.right().HasValue()) return;
int32_t value = m.right().Value();
switch (value) {
case 8:
power_++; // Fall through.
case 4:
power_++; // Fall through.
case 2:
power_++; // Fall through.
case 1:
break;
default:
return;
}
left_ = m.left().node();
}
IndexAndDisplacementMatcher::IndexAndDisplacementMatcher(Node* node)
: NodeMatcher(node), index_node_(node), displacement_(0), power_(0) {
if (opcode() == IrOpcode::kInt32Add) {
Int32BinopMatcher m(this->node());
if (m.right().HasValue()) {
displacement_ = m.right().Value();
index_node_ = m.left().node();
}
}
// Test scale factor.
ScaleFactorMatcher scale_matcher(index_node_);
if (scale_matcher.Matches()) {
index_node_ = scale_matcher.Left();
power_ = scale_matcher.Power();
}
}
const int LeaMultiplyMatcher::kMatchedFactors[7] = {1, 2, 3, 4, 5, 8, 9};
LeaMultiplyMatcher::LeaMultiplyMatcher(Node* node)
: NodeMatcher(node), left_(NULL), power_(0), displacement_(0) {
if (opcode() != IrOpcode::kInt32Mul && opcode() != IrOpcode::kInt64Mul) {
return;
}
int64_t value;
Node* left = NULL;
{
Int32BinopMatcher m(this->node());
if (m.right().HasValue()) {
value = m.right().Value();
left = m.left().node();
} else {
Int64BinopMatcher m(this->node());
if (m.right().HasValue()) {
value = m.right().Value();
left = m.left().node();
} else {
return;
}
}
}
switch (value) {
case 9:
case 8:
power_++; // Fall through.
case 5:
case 4:
power_++; // Fall through.
case 3:
case 2:
power_++; // Fall through.
case 1:
break;
default:
return;
}
if (!base::bits::IsPowerOfTwo64(value)) {
displacement_ = 1;
}
left_ = left;
}
} // namespace compiler
} // namespace internal
} // namespace v8
...@@ -150,88 +150,6 @@ typedef BinopMatcher<UintPtrMatcher, UintPtrMatcher> UintPtrBinopMatcher; ...@@ -150,88 +150,6 @@ typedef BinopMatcher<UintPtrMatcher, UintPtrMatcher> UintPtrBinopMatcher;
typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher; typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
typedef BinopMatcher<NumberMatcher, NumberMatcher> NumberBinopMatcher; typedef BinopMatcher<NumberMatcher, NumberMatcher> NumberBinopMatcher;
// Fairly intel-specify node matcher used for matching scale factors in
// addressing modes.
// Matches nodes of form [x * N] for N in {1,2,4,8}
class ScaleFactorMatcher : public NodeMatcher {
public:
static const int kMatchedFactors[4];
explicit ScaleFactorMatcher(Node* node);
bool Matches() const { return left_ != NULL; }
int Power() const {
DCHECK(Matches());
return power_;
}
Node* Left() const {
DCHECK(Matches());
return left_;
}
private:
Node* left_;
int power_;
};
// Fairly intel-specify node matcher used for matching index and displacement
// operands in addressing modes.
// Matches nodes of form:
// [x * N]
// [x * N + K]
// [x + K]
// [x] -- fallback case
// for N in {1,2,4,8} and K int32_t
class IndexAndDisplacementMatcher : public NodeMatcher {
public:
explicit IndexAndDisplacementMatcher(Node* node);
Node* index_node() const { return index_node_; }
int displacement() const { return displacement_; }
int power() const { return power_; }
private:
Node* index_node_;
int displacement_;
int power_;
};
// Fairly intel-specify node matcher used for matching multiplies that can be
// transformed to lea instructions.
// Matches nodes of form:
// [x * N]
// for N in {1,2,3,4,5,8,9}
class LeaMultiplyMatcher : public NodeMatcher {
public:
static const int kMatchedFactors[7];
explicit LeaMultiplyMatcher(Node* node);
bool Matches() const { return left_ != NULL; }
int Power() const {
DCHECK(Matches());
return power_;
}
Node* Left() const {
DCHECK(Matches());
return left_;
}
// Displacement will be either 0 or 1.
int32_t Displacement() const {
DCHECK(Matches());
return displacement_;
}
private:
Node* left_;
int power_;
int displacement_;
};
} // namespace compiler } // namespace compiler
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -1118,7 +1118,7 @@ static WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged, ...@@ -1118,7 +1118,7 @@ static WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
void SimplifiedLowering::DoLoadField(Node* node) { void SimplifiedLowering::DoLoadField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op()); const FieldAccess& access = FieldAccessOf(node->op());
node->set_op(machine()->Load(access.machine_type)); node->set_op(machine()->Load(access.machine_type));
Node* offset = jsgraph()->Int32Constant(access.offset - access.tag()); Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset); node->InsertInput(graph()->zone(), 1, offset);
} }
...@@ -1129,7 +1129,7 @@ void SimplifiedLowering::DoStoreField(Node* node) { ...@@ -1129,7 +1129,7 @@ void SimplifiedLowering::DoStoreField(Node* node) {
access.base_is_tagged, access.machine_type, access.type); access.base_is_tagged, access.machine_type, access.type);
node->set_op( node->set_op(
machine()->Store(StoreRepresentation(access.machine_type, kind))); machine()->Store(StoreRepresentation(access.machine_type, kind)));
Node* offset = jsgraph()->Int32Constant(access.offset - access.tag()); Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset); node->InsertInput(graph()->zone(), 1, offset);
} }
...@@ -1137,20 +1137,22 @@ void SimplifiedLowering::DoStoreField(Node* node) { ...@@ -1137,20 +1137,22 @@ void SimplifiedLowering::DoStoreField(Node* node) {
Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access, Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access,
Node* const key) { Node* const key) {
Node* index = key; Node* index = key;
const int element_size = ElementSizeOf(access.machine_type); const int element_size_shift = ElementSizeLog2Of(access.machine_type);
if (element_size != 1) { if (element_size_shift) {
index = graph()->NewNode(machine()->Int32Mul(), index, index = graph()->NewNode(machine()->Word32Shl(), index,
jsgraph()->Int32Constant(element_size)); jsgraph()->Int32Constant(element_size_shift));
} }
const int fixed_offset = access.header_size - access.tag(); const int fixed_offset = access.header_size - access.tag();
if (fixed_offset != 0) { if (fixed_offset) {
index = graph()->NewNode(machine()->Int32Add(), index, index = graph()->NewNode(machine()->Int32Add(), index,
jsgraph()->Int32Constant(fixed_offset)); jsgraph()->Int32Constant(fixed_offset));
} }
// TODO(bmeurer): 64-Bit if (machine()->Is64()) {
// if (machine()->Is64()) { // TODO(turbofan): This is probably only correct for typed arrays, and only
// index = graph()->NewNode(machine()->ChangeInt32ToInt64(), index); // if the typed arrays are at most 2GiB in size, which happens to match
// } // exactly our current situation.
index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
}
return index; return index;
} }
......
...@@ -34,12 +34,7 @@ class X64OperandConverter : public InstructionOperandConverter { ...@@ -34,12 +34,7 @@ class X64OperandConverter : public InstructionOperandConverter {
Operand OutputOperand() { return ToOperand(instr_->Output()); } Operand OutputOperand() { return ToOperand(instr_->Output()); }
Immediate ToImmediate(InstructionOperand* operand) { Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand); return Immediate(ToConstant(operand).ToInt32());
if (constant.type() == Constant::kInt32) {
return Immediate(constant.ToInt32());
}
UNREACHABLE();
return Immediate(-1);
} }
Operand ToOperand(InstructionOperand* op, int extra = 0) { Operand ToOperand(InstructionOperand* op, int extra = 0) {
......
...@@ -88,7 +88,7 @@ namespace compiler { ...@@ -88,7 +88,7 @@ namespace compiler {
// M = memory operand // M = memory operand
// R = base register // R = base register
// N = index register * N for N in {1, 2, 4, 8} // N = index register * N for N in {1, 2, 4, 8}
// I = immediate displacement (int32_t) // I = immediate displacement (32-bit signed integer)
#define TARGET_ADDRESSING_MODE_LIST(V) \ #define TARGET_ADDRESSING_MODE_LIST(V) \
V(MR) /* [%r1 ] */ \ V(MR) /* [%r1 ] */ \
......
...@@ -1324,45 +1324,54 @@ TEST(InsertChangesAroundFloat64Cmp) { ...@@ -1324,45 +1324,54 @@ TEST(InsertChangesAroundFloat64Cmp) {
} }
namespace {
void CheckFieldAccessArithmetic(FieldAccess access, Node* load_or_store) { void CheckFieldAccessArithmetic(FieldAccess access, Node* load_or_store) {
Int32Matcher index = Int32Matcher(load_or_store->InputAt(1)); IntPtrMatcher mindex(load_or_store->InputAt(1));
CHECK(index.Is(access.offset - access.tag())); CHECK(mindex.Is(access.offset - access.tag()));
} }
Node* CheckElementAccessArithmetic(ElementAccess access, Node* load_or_store) { Node* CheckElementAccessArithmetic(ElementAccess access, Node* load_or_store) {
Int32BinopMatcher index(load_or_store->InputAt(1)); Node* index = load_or_store->InputAt(1);
CHECK_EQ(IrOpcode::kInt32Add, index.node()->opcode()); if (kPointerSize == 8) {
CHECK(index.right().Is(access.header_size - access.tag())); CHECK_EQ(IrOpcode::kChangeUint32ToUint64, index->opcode());
index = index->InputAt(0);
int element_size = ElementSizeOf(access.machine_type); }
if (element_size != 1) { Int32BinopMatcher mindex(index);
Int32BinopMatcher mul(index.left().node()); CHECK_EQ(IrOpcode::kInt32Add, mindex.node()->opcode());
CHECK_EQ(IrOpcode::kInt32Mul, mul.node()->opcode()); CHECK(mindex.right().Is(access.header_size - access.tag()));
CHECK(mul.right().Is(element_size));
return mul.left().node(); const int element_size_shift = ElementSizeLog2Of(access.machine_type);
if (element_size_shift) {
Int32BinopMatcher shl(mindex.left().node());
CHECK_EQ(IrOpcode::kWord32Shl, shl.node()->opcode());
CHECK(shl.right().Is(element_size_shift));
return shl.left().node();
} else { } else {
return index.left().node(); return mindex.left().node();
} }
} }
static const MachineType machine_reps[] = { const MachineType kMachineReps[] = {kRepBit, kMachInt8, kMachInt16,
kRepBit, kMachInt8, kMachInt16, kMachInt32, kMachInt32, kMachInt64, kMachFloat64,
kMachInt64, kMachFloat64, kMachAnyTagged}; kMachAnyTagged};
} // namespace
TEST(LowerLoadField_to_load) { TEST(LowerLoadField_to_load) {
TestingGraph t(Type::Any(), Type::Signed32()); TestingGraph t(Type::Any(), Type::Signed32());
for (size_t i = 0; i < arraysize(machine_reps); i++) { for (size_t i = 0; i < arraysize(kMachineReps); i++) {
FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
Handle<Name>::null(), Type::Any(), machine_reps[i]}; Handle<Name>::null(), Type::Any(), kMachineReps[i]};
Node* load = Node* load =
t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start); t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
Node* use = t.Use(load, machine_reps[i]); Node* use = t.Use(load, kMachineReps[i]);
t.Return(use); t.Return(use);
t.Lower(); t.Lower();
CHECK_EQ(IrOpcode::kLoad, load->opcode()); CHECK_EQ(IrOpcode::kLoad, load->opcode());
...@@ -1370,7 +1379,7 @@ TEST(LowerLoadField_to_load) { ...@@ -1370,7 +1379,7 @@ TEST(LowerLoadField_to_load) {
CheckFieldAccessArithmetic(access, load); CheckFieldAccessArithmetic(access, load);
MachineType rep = OpParameter<MachineType>(load); MachineType rep = OpParameter<MachineType>(load);
CHECK_EQ(machine_reps[i], rep); CHECK_EQ(kMachineReps[i], rep);
} }
} }
...@@ -1378,12 +1387,12 @@ TEST(LowerLoadField_to_load) { ...@@ -1378,12 +1387,12 @@ TEST(LowerLoadField_to_load) {
TEST(LowerStoreField_to_store) { TEST(LowerStoreField_to_store) {
TestingGraph t(Type::Any(), Type::Signed32()); TestingGraph t(Type::Any(), Type::Signed32());
for (size_t i = 0; i < arraysize(machine_reps); i++) { for (size_t i = 0; i < arraysize(kMachineReps); i++) {
FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
Handle<Name>::null(), Type::Any(), machine_reps[i]}; Handle<Name>::null(), Type::Any(), kMachineReps[i]};
Node* val = t.ExampleWithOutput(machine_reps[i]); Node* val = t.ExampleWithOutput(kMachineReps[i]);
Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0, Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
val, t.start, t.start); val, t.start, t.start);
t.Effect(store); t.Effect(store);
...@@ -1393,10 +1402,10 @@ TEST(LowerStoreField_to_store) { ...@@ -1393,10 +1402,10 @@ TEST(LowerStoreField_to_store) {
CheckFieldAccessArithmetic(access, store); CheckFieldAccessArithmetic(access, store);
StoreRepresentation rep = OpParameter<StoreRepresentation>(store); StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
if (machine_reps[i] & kRepTagged) { if (kMachineReps[i] & kRepTagged) {
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind()); CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
} }
CHECK_EQ(machine_reps[i], rep.machine_type()); CHECK_EQ(kMachineReps[i], rep.machine_type());
} }
} }
...@@ -1404,15 +1413,15 @@ TEST(LowerStoreField_to_store) { ...@@ -1404,15 +1413,15 @@ TEST(LowerStoreField_to_store) {
TEST(LowerLoadElement_to_load) { TEST(LowerLoadElement_to_load) {
TestingGraph t(Type::Any(), Type::Signed32()); TestingGraph t(Type::Any(), Type::Signed32());
for (size_t i = 0; i < arraysize(machine_reps); i++) { for (size_t i = 0; i < arraysize(kMachineReps); i++) {
ElementAccess access = {kNoBoundsCheck, kTaggedBase, ElementAccess access = {kNoBoundsCheck, kTaggedBase,
FixedArrayBase::kHeaderSize, Type::Any(), FixedArrayBase::kHeaderSize, Type::Any(),
machine_reps[i]}; kMachineReps[i]};
Node* load = Node* load =
t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0, t.p1, t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0, t.p1,
t.jsgraph.Int32Constant(1024), t.start, t.start); t.jsgraph.Int32Constant(1024), t.start, t.start);
Node* use = t.Use(load, machine_reps[i]); Node* use = t.Use(load, kMachineReps[i]);
t.Return(use); t.Return(use);
t.Lower(); t.Lower();
CHECK_EQ(IrOpcode::kLoad, load->opcode()); CHECK_EQ(IrOpcode::kLoad, load->opcode());
...@@ -1420,7 +1429,7 @@ TEST(LowerLoadElement_to_load) { ...@@ -1420,7 +1429,7 @@ TEST(LowerLoadElement_to_load) {
CheckElementAccessArithmetic(access, load); CheckElementAccessArithmetic(access, load);
MachineType rep = OpParameter<MachineType>(load); MachineType rep = OpParameter<MachineType>(load);
CHECK_EQ(machine_reps[i], rep); CHECK_EQ(kMachineReps[i], rep);
} }
} }
...@@ -1428,12 +1437,12 @@ TEST(LowerLoadElement_to_load) { ...@@ -1428,12 +1437,12 @@ TEST(LowerLoadElement_to_load) {
TEST(LowerStoreElement_to_store) { TEST(LowerStoreElement_to_store) {
TestingGraph t(Type::Any(), Type::Signed32()); TestingGraph t(Type::Any(), Type::Signed32());
for (size_t i = 0; i < arraysize(machine_reps); i++) { for (size_t i = 0; i < arraysize(kMachineReps); i++) {
ElementAccess access = {kNoBoundsCheck, kTaggedBase, ElementAccess access = {kNoBoundsCheck, kTaggedBase,
FixedArrayBase::kHeaderSize, Type::Any(), FixedArrayBase::kHeaderSize, Type::Any(),
machine_reps[i]}; kMachineReps[i]};
Node* val = t.ExampleWithOutput(machine_reps[i]); Node* val = t.ExampleWithOutput(kMachineReps[i]);
Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
t.p1, t.jsgraph.Int32Constant(1024), val, t.p1, t.jsgraph.Int32Constant(1024), val,
t.start, t.start); t.start, t.start);
...@@ -1444,10 +1453,10 @@ TEST(LowerStoreElement_to_store) { ...@@ -1444,10 +1453,10 @@ TEST(LowerStoreElement_to_store) {
CheckElementAccessArithmetic(access, store); CheckElementAccessArithmetic(access, store);
StoreRepresentation rep = OpParameter<StoreRepresentation>(store); StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
if (machine_reps[i] & kRepTagged) { if (kMachineReps[i] & kRepTagged) {
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind()); CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
} }
CHECK_EQ(machine_reps[i], rep.machine_type()); CHECK_EQ(kMachineReps[i], rep.machine_type());
} }
} }
......
...@@ -90,9 +90,12 @@ class ChangeLoweringTest : public GraphTest { ...@@ -90,9 +90,12 @@ class ChangeLoweringTest : public GraphTest {
Matcher<Node*> IsLoadHeapNumber(const Matcher<Node*>& value_matcher, Matcher<Node*> IsLoadHeapNumber(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& control_matcher) { const Matcher<Node*>& control_matcher) {
return IsLoad(kMachFloat64, value_matcher, return IsLoad(kMachFloat64, value_matcher,
IsInt32Constant(HeapNumberValueOffset()), graph()->start(), IsIntPtrConstant(HeapNumberValueOffset()), graph()->start(),
control_matcher); control_matcher);
} }
Matcher<Node*> IsIntPtrConstant(int value) {
return Is32() ? IsInt32Constant(value) : IsInt64Constant(value);
}
Matcher<Node*> IsWordEqual(const Matcher<Node*>& lhs_matcher, Matcher<Node*> IsWordEqual(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) { const Matcher<Node*>& rhs_matcher) {
return Is32() ? IsWord32Equal(lhs_matcher, rhs_matcher) return Is32() ? IsWord32Equal(lhs_matcher, rhs_matcher)
...@@ -162,7 +165,7 @@ TARGET_TEST_P(ChangeLoweringCommonTest, ChangeFloat64ToTagged) { ...@@ -162,7 +165,7 @@ TARGET_TEST_P(ChangeLoweringCommonTest, ChangeFloat64ToTagged) {
IsAllocateHeapNumber(IsValueEffect(val), graph()->start())), IsAllocateHeapNumber(IsValueEffect(val), graph()->start())),
IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier), IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
CaptureEq(&heap_number), CaptureEq(&heap_number),
IsInt32Constant(HeapNumberValueOffset()), val, IsIntPtrConstant(HeapNumberValueOffset()), val,
CaptureEq(&heap_number), graph()->start()))); CaptureEq(&heap_number), graph()->start())));
} }
...@@ -207,7 +210,7 @@ TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) { ...@@ -207,7 +210,7 @@ TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
IsAllocateHeapNumber(_, CaptureEq(&if_true))), IsAllocateHeapNumber(_, CaptureEq(&if_true))),
IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier), IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
CaptureEq(&heap_number), CaptureEq(&heap_number),
IsInt32Constant(HeapNumberValueOffset()), IsIntPtrConstant(HeapNumberValueOffset()),
IsChangeInt32ToFloat64(val), IsChangeInt32ToFloat64(val),
CaptureEq(&heap_number), CaptureEq(&if_true))), CaptureEq(&heap_number), CaptureEq(&if_true))),
IsProjection( IsProjection(
...@@ -346,7 +349,7 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeInt32ToTagged) { ...@@ -346,7 +349,7 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeInt32ToTagged) {
EXPECT_THAT(reduction.replacement(), EXPECT_THAT(reduction.replacement(),
IsWord64Shl(IsChangeInt32ToInt64(val), IsWord64Shl(IsChangeInt32ToInt64(val),
IsInt32Constant(SmiShiftAmount()))); IsInt64Constant(SmiShiftAmount())));
} }
...@@ -366,12 +369,12 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToFloat64) { ...@@ -366,12 +369,12 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToFloat64) {
IsPhi( IsPhi(
kMachFloat64, IsLoadHeapNumber(val, CaptureEq(&if_true)), kMachFloat64, IsLoadHeapNumber(val, CaptureEq(&if_true)),
IsChangeInt32ToFloat64(IsTruncateInt64ToInt32( IsChangeInt32ToFloat64(IsTruncateInt64ToInt32(
IsWord64Sar(val, IsInt32Constant(SmiShiftAmount())))), IsWord64Sar(val, IsInt64Constant(SmiShiftAmount())))),
IsMerge( IsMerge(
AllOf(CaptureEq(&if_true), AllOf(CaptureEq(&if_true),
IsIfTrue(AllOf( IsIfTrue(AllOf(
CaptureEq(&branch), CaptureEq(&branch),
IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)), IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
graph()->start())))), graph()->start())))),
IsIfFalse(CaptureEq(&branch))))); IsIfFalse(CaptureEq(&branch)))));
} }
...@@ -393,11 +396,11 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToInt32) { ...@@ -393,11 +396,11 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToInt32) {
IsPhi(kMachInt32, IsPhi(kMachInt32,
IsChangeFloat64ToInt32(IsLoadHeapNumber(val, CaptureEq(&if_true))), IsChangeFloat64ToInt32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
IsTruncateInt64ToInt32( IsTruncateInt64ToInt32(
IsWord64Sar(val, IsInt32Constant(SmiShiftAmount()))), IsWord64Sar(val, IsInt64Constant(SmiShiftAmount()))),
IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))), IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
IsIfFalse(AllOf( IsIfFalse(AllOf(
CaptureEq(&branch), CaptureEq(&branch),
IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)), IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
graph()->start())))))); graph()->start()))))));
} }
...@@ -418,11 +421,11 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToUint32) { ...@@ -418,11 +421,11 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToUint32) {
IsPhi(kMachUint32, IsPhi(kMachUint32,
IsChangeFloat64ToUint32(IsLoadHeapNumber(val, CaptureEq(&if_true))), IsChangeFloat64ToUint32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
IsTruncateInt64ToInt32( IsTruncateInt64ToInt32(
IsWord64Sar(val, IsInt32Constant(SmiShiftAmount()))), IsWord64Sar(val, IsInt64Constant(SmiShiftAmount()))),
IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))), IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
IsIfFalse(AllOf( IsIfFalse(AllOf(
CaptureEq(&branch), CaptureEq(&branch),
IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)), IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
graph()->start())))))); graph()->start()))))));
} }
...@@ -442,18 +445,18 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeUint32ToTagged) { ...@@ -442,18 +445,18 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeUint32ToTagged) {
phi, phi,
IsPhi( IsPhi(
kMachAnyTagged, IsWord64Shl(IsChangeUint32ToUint64(val), kMachAnyTagged, IsWord64Shl(IsChangeUint32ToUint64(val),
IsInt32Constant(SmiShiftAmount())), IsInt64Constant(SmiShiftAmount())),
IsFinish(AllOf(CaptureEq(&heap_number), IsFinish(AllOf(CaptureEq(&heap_number),
IsAllocateHeapNumber(_, CaptureEq(&if_false))), IsAllocateHeapNumber(_, CaptureEq(&if_false))),
IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier), IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
CaptureEq(&heap_number), CaptureEq(&heap_number),
IsInt32Constant(HeapNumberValueOffset()), IsInt64Constant(HeapNumberValueOffset()),
IsChangeUint32ToFloat64(val), IsChangeUint32ToFloat64(val),
CaptureEq(&heap_number), CaptureEq(&if_false))), CaptureEq(&heap_number), CaptureEq(&if_false))),
IsMerge( IsMerge(
IsIfTrue(AllOf(CaptureEq(&branch), IsIfTrue(AllOf(CaptureEq(&branch),
IsBranch(IsUint32LessThanOrEqual( IsBranch(IsUint32LessThanOrEqual(
val, IsInt32Constant(SmiMaxValue())), val, IsInt64Constant(SmiMaxValue())),
graph()->start()))), graph()->start()))),
AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch)))))); AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
} }
......
...@@ -138,6 +138,14 @@ bool InstructionSelectorTest::Stream::IsFixed(const InstructionOperand* operand, ...@@ -138,6 +138,14 @@ bool InstructionSelectorTest::Stream::IsFixed(const InstructionOperand* operand,
} }
bool InstructionSelectorTest::Stream::IsSameAsFirst(
const InstructionOperand* operand) const {
if (!operand->IsUnallocated()) return false;
const UnallocatedOperand* unallocated = UnallocatedOperand::cast(operand);
return unallocated->HasSameAsInputPolicy();
}
bool InstructionSelectorTest::Stream::IsUsedAtStart( bool InstructionSelectorTest::Stream::IsUsedAtStart(
const InstructionOperand* operand) const { const InstructionOperand* operand) const {
if (!operand->IsUnallocated()) return false; if (!operand->IsUnallocated()) return false;
......
...@@ -172,6 +172,7 @@ class InstructionSelectorTest : public TestWithContext, public TestWithZone { ...@@ -172,6 +172,7 @@ class InstructionSelectorTest : public TestWithContext, public TestWithZone {
int ToVreg(const Node* node) const; int ToVreg(const Node* node) const;
bool IsFixed(const InstructionOperand* operand, Register reg) const; bool IsFixed(const InstructionOperand* operand, Register reg) const;
bool IsSameAsFirst(const InstructionOperand* operand) const;
bool IsUsedAtStart(const InstructionOperand* operand) const; bool IsUsedAtStart(const InstructionOperand* operand) const;
FrameStateDescriptor* GetFrameStateDescriptor(int deoptimization_id) { FrameStateDescriptor* GetFrameStateDescriptor(int deoptimization_id) {
......
...@@ -463,7 +463,6 @@ ...@@ -463,7 +463,6 @@
'../../src/compiler/node-aux-data.h', '../../src/compiler/node-aux-data.h',
'../../src/compiler/node-cache.cc', '../../src/compiler/node-cache.cc',
'../../src/compiler/node-cache.h', '../../src/compiler/node-cache.h',
'../../src/compiler/node-matchers.cc',
'../../src/compiler/node-matchers.h', '../../src/compiler/node-matchers.h',
'../../src/compiler/node-properties-inl.h', '../../src/compiler/node-properties-inl.h',
'../../src/compiler/node-properties.h', '../../src/compiler/node-properties.h',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment