Commit 580fdf3c authored by ivica.bogosavljevic's avatar ivica.bogosavljevic Committed by Commit bot

Implement UnaligedLoad and UnaligedStore turbofan operators.

Implement UnalignedLoad and UnalignedStore optional
turbofan operators and use them in WasmCompiler for unaligned
memory access.

BUG=

Review-Url: https://codereview.chromium.org/2122853002
Cr-Commit-Position: refs/heads/master@{#37988}
parent 122a9b7a
...@@ -494,6 +494,11 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -494,6 +494,11 @@ void InstructionSelector::VisitStore(Node* node) {
} }
} }
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitCheckedLoad(Node* node) { void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op()); CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
......
...@@ -632,6 +632,11 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -632,6 +632,11 @@ void InstructionSelector::VisitStore(Node* node) {
} }
} }
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitCheckedLoad(Node* node) { void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op()); CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
......
...@@ -51,7 +51,8 @@ CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone, ...@@ -51,7 +51,8 @@ CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
: raw_assembler_(new RawMachineAssembler( : raw_assembler_(new RawMachineAssembler(
isolate, new (zone) Graph(zone), call_descriptor, isolate, new (zone) Graph(zone), call_descriptor,
MachineType::PointerRepresentation(), MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags())), InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements())),
flags_(flags), flags_(flags),
name_(name), name_(name),
code_generated_(false), code_generated_(false),
......
...@@ -343,6 +343,11 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -343,6 +343,11 @@ void InstructionSelector::VisitStore(Node* node) {
} }
} }
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitCheckedLoad(Node* node) { void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op()); CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
......
...@@ -717,6 +717,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) { ...@@ -717,6 +717,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
int effect_level = 0; int effect_level = 0;
for (Node* const node : *block) { for (Node* const node : *block) {
if (node->opcode() == IrOpcode::kStore || if (node->opcode() == IrOpcode::kStore ||
node->opcode() == IrOpcode::kUnalignedStore ||
node->opcode() == IrOpcode::kCheckedStore || node->opcode() == IrOpcode::kCheckedStore ||
node->opcode() == IrOpcode::kCall) { node->opcode() == IrOpcode::kCall) {
++effect_level; ++effect_level;
...@@ -1227,6 +1228,14 @@ void InstructionSelector::VisitNode(Node* node) { ...@@ -1227,6 +1228,14 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitLoadFramePointer(node); return VisitLoadFramePointer(node);
case IrOpcode::kLoadParentFramePointer: case IrOpcode::kLoadParentFramePointer:
return VisitLoadParentFramePointer(node); return VisitLoadParentFramePointer(node);
case IrOpcode::kUnalignedLoad: {
UnalignedLoadRepresentation type =
UnalignedLoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitUnalignedLoad(node);
}
case IrOpcode::kUnalignedStore:
return VisitUnalignedStore(node);
case IrOpcode::kCheckedLoad: { case IrOpcode::kCheckedLoad: {
MachineRepresentation rep = MachineRepresentation rep =
CheckedLoadRepresentationOf(node->op()).representation(); CheckedLoadRepresentationOf(node->op()).representation();
......
...@@ -132,16 +132,31 @@ void Int64Lowering::LowerNode(Node* node) { ...@@ -132,16 +132,31 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNode(node, low_node, high_node); ReplaceNode(node, low_node, high_node);
break; break;
} }
case IrOpcode::kLoad: { case IrOpcode::kLoad:
LoadRepresentation load_rep = LoadRepresentationOf(node->op()); case IrOpcode::kUnalignedLoad: {
MachineRepresentation rep;
if (node->opcode() == IrOpcode::kLoad) {
rep = LoadRepresentationOf(node->op()).representation();
} else {
DCHECK(node->opcode() == IrOpcode::kUnalignedLoad);
rep = UnalignedLoadRepresentationOf(node->op()).representation();
}
if (load_rep.representation() == MachineRepresentation::kWord64) { if (rep == MachineRepresentation::kWord64) {
Node* base = node->InputAt(0); Node* base = node->InputAt(0);
Node* index = node->InputAt(1); Node* index = node->InputAt(1);
Node* index_low; Node* index_low;
Node* index_high; Node* index_high;
GetIndexNodes(index, index_low, index_high); GetIndexNodes(index, index_low, index_high);
const Operator* load_op = machine()->Load(MachineType::Int32()); const Operator* load_op;
if (node->opcode() == IrOpcode::kLoad) {
load_op = machine()->Load(MachineType::Int32());
} else {
DCHECK(node->opcode() == IrOpcode::kUnalignedLoad);
load_op = machine()->UnalignedLoad(MachineType::Int32());
}
Node* high_node; Node* high_node;
if (node->InputCount() > 2) { if (node->InputCount() > 2) {
Node* effect_high = node->InputAt(2); Node* effect_high = node->InputAt(2);
...@@ -162,15 +177,21 @@ void Int64Lowering::LowerNode(Node* node) { ...@@ -162,15 +177,21 @@ void Int64Lowering::LowerNode(Node* node) {
} }
break; break;
} }
case IrOpcode::kStore: { case IrOpcode::kStore:
StoreRepresentation store_rep = StoreRepresentationOf(node->op()); case IrOpcode::kUnalignedStore: {
if (store_rep.representation() == MachineRepresentation::kWord64) { MachineRepresentation rep;
if (node->opcode() == IrOpcode::kStore) {
rep = StoreRepresentationOf(node->op()).representation();
} else {
DCHECK(node->opcode() == IrOpcode::kUnalignedStore);
rep = UnalignedStoreRepresentationOf(node->op());
}
if (rep == MachineRepresentation::kWord64) {
// We change the original store node to store the low word, and create // We change the original store node to store the low word, and create
// a new store node to store the high word. The effect and control edges // a new store node to store the high word. The effect and control edges
// are copied from the original store to the new store node, the effect // are copied from the original store to the new store node, the effect
// edge of the original store is redirected to the new store. // edge of the original store is redirected to the new store.
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
Node* base = node->InputAt(0); Node* base = node->InputAt(0);
Node* index = node->InputAt(1); Node* index = node->InputAt(1);
Node* index_low; Node* index_low;
...@@ -180,8 +201,16 @@ void Int64Lowering::LowerNode(Node* node) { ...@@ -180,8 +201,16 @@ void Int64Lowering::LowerNode(Node* node) {
DCHECK(HasReplacementLow(value)); DCHECK(HasReplacementLow(value));
DCHECK(HasReplacementHigh(value)); DCHECK(HasReplacementHigh(value));
const Operator* store_op = machine()->Store(StoreRepresentation( const Operator* store_op;
MachineRepresentation::kWord32, write_barrier_kind)); if (node->opcode() == IrOpcode::kStore) {
WriteBarrierKind write_barrier_kind =
StoreRepresentationOf(node->op()).write_barrier_kind();
store_op = machine()->Store(StoreRepresentation(
MachineRepresentation::kWord32, write_barrier_kind));
} else {
DCHECK(node->opcode() == IrOpcode::kUnalignedStore);
store_op = machine()->UnalignedStore(MachineRepresentation::kWord32);
}
Node* high_node; Node* high_node;
if (node->InputCount() > 3) { if (node->InputCount() > 3) {
......
...@@ -583,6 +583,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) { ...@@ -583,6 +583,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat64InsertHighWord32: case IrOpcode::kFloat64InsertHighWord32:
return ReduceFloat64InsertHighWord32(node); return ReduceFloat64InsertHighWord32(node);
case IrOpcode::kStore: case IrOpcode::kStore:
case IrOpcode::kUnalignedStore:
case IrOpcode::kCheckedStore: case IrOpcode::kCheckedStore:
return ReduceStore(node); return ReduceStore(node);
case IrOpcode::kFloat64Equal: case IrOpcode::kFloat64Equal:
...@@ -797,9 +798,13 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) { ...@@ -797,9 +798,13 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) {
if (nm.IsCheckedStore()) { if (nm.IsCheckedStore()) {
rep = CheckedStoreRepresentationOf(node->op()); rep = CheckedStoreRepresentationOf(node->op());
value_input = 3; value_input = 3;
} else { } else if (nm.IsStore()) {
rep = StoreRepresentationOf(node->op()).representation(); rep = StoreRepresentationOf(node->op()).representation();
value_input = 2; value_input = 2;
} else {
DCHECK(nm.IsUnalignedStore());
rep = UnalignedStoreRepresentationOf(node->op());
value_input = 2;
} }
Node* const value = node->InputAt(value_input); Node* const value = node->InputAt(value_input);
......
...@@ -46,6 +46,16 @@ StoreRepresentation const& StoreRepresentationOf(Operator const* op) { ...@@ -46,6 +46,16 @@ StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
return OpParameter<StoreRepresentation>(op); return OpParameter<StoreRepresentation>(op);
} }
UnalignedLoadRepresentation UnalignedLoadRepresentationOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kUnalignedLoad, op->opcode());
return OpParameter<UnalignedLoadRepresentation>(op);
}
UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
Operator const* op) {
DCHECK_EQ(IrOpcode::kUnalignedStore, op->opcode());
return OpParameter<UnalignedStoreRepresentation>(op);
}
CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const* op) { CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kCheckedLoad, op->opcode()); DCHECK_EQ(IrOpcode::kCheckedLoad, op->opcode());
...@@ -468,6 +478,14 @@ struct MachineOperatorGlobalCache { ...@@ -468,6 +478,14 @@ struct MachineOperatorGlobalCache {
Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
"Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \ "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \ }; \
struct UnalignedLoad##Type##Operator final \
: public Operator1<UnalignedLoadRepresentation> { \
UnalignedLoad##Type##Operator() \
: Operator1<UnalignedLoadRepresentation>( \
IrOpcode::kUnalignedLoad, \
Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
"UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
struct CheckedLoad##Type##Operator final \ struct CheckedLoad##Type##Operator final \
: public Operator1<CheckedLoadRepresentation> { \ : public Operator1<CheckedLoadRepresentation> { \
CheckedLoad##Type##Operator() \ CheckedLoad##Type##Operator() \
...@@ -477,6 +495,7 @@ struct MachineOperatorGlobalCache { ...@@ -477,6 +495,7 @@ struct MachineOperatorGlobalCache {
"CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \ "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \ }; \
Load##Type##Operator kLoad##Type; \ Load##Type##Operator kLoad##Type; \
UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
CheckedLoad##Type##Operator kCheckedLoad##Type; CheckedLoad##Type##Operator kCheckedLoad##Type;
MACHINE_TYPE_LIST(LOAD) MACHINE_TYPE_LIST(LOAD)
#undef LOAD #undef LOAD
...@@ -524,6 +543,15 @@ struct MachineOperatorGlobalCache { ...@@ -524,6 +543,15 @@ struct MachineOperatorGlobalCache {
Store##Type##FullWriteBarrier##Operator() \ Store##Type##FullWriteBarrier##Operator() \
: Store##Type##Operator(kFullWriteBarrier) {} \ : Store##Type##Operator(kFullWriteBarrier) {} \
}; \ }; \
struct UnalignedStore##Type##Operator final \
: public Operator1<UnalignedStoreRepresentation> { \
UnalignedStore##Type##Operator() \
: Operator1<UnalignedStoreRepresentation>( \
IrOpcode::kUnalignedStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"UnalignedStore", 3, 1, 1, 0, 1, 0, \
MachineRepresentation::Type) {} \
}; \
struct CheckedStore##Type##Operator final \ struct CheckedStore##Type##Operator final \
: public Operator1<CheckedStoreRepresentation> { \ : public Operator1<CheckedStoreRepresentation> { \
CheckedStore##Type##Operator() \ CheckedStore##Type##Operator() \
...@@ -538,6 +566,7 @@ struct MachineOperatorGlobalCache { ...@@ -538,6 +566,7 @@ struct MachineOperatorGlobalCache {
Store##Type##PointerWriteBarrier##Operator \ Store##Type##PointerWriteBarrier##Operator \
kStore##Type##PointerWriteBarrier; \ kStore##Type##PointerWriteBarrier; \
Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \ Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
UnalignedStore##Type##Operator kUnalignedStore##Type; \
CheckedStore##Type##Operator kCheckedStore##Type; CheckedStore##Type##Operator kCheckedStore##Type;
MACHINE_REPRESENTATION_LIST(STORE) MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE #undef STORE
...@@ -597,6 +626,33 @@ MachineOperatorBuilder::MachineOperatorBuilder( ...@@ -597,6 +626,33 @@ MachineOperatorBuilder::MachineOperatorBuilder(
word == MachineRepresentation::kWord64); word == MachineRepresentation::kWord64);
} }
const Operator* MachineOperatorBuilder::UnalignedLoad(
UnalignedLoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
return &cache_.kUnalignedLoad##Type; \
}
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
return nullptr;
}
const Operator* MachineOperatorBuilder::UnalignedStore(
UnalignedStoreRepresentation rep) {
switch (rep) {
#define STORE(kRep) \
case MachineRepresentation::kRep: \
return &cache_.kUnalignedStore##kRep;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
return nullptr;
}
#define PURE(Name, properties, value_input_count, control_input_count, \ #define PURE(Name, properties, value_input_count, control_input_count, \
output_count) \ output_count) \
......
...@@ -70,6 +70,15 @@ std::ostream& operator<<(std::ostream&, StoreRepresentation); ...@@ -70,6 +70,15 @@ std::ostream& operator<<(std::ostream&, StoreRepresentation);
StoreRepresentation const& StoreRepresentationOf(Operator const*); StoreRepresentation const& StoreRepresentationOf(Operator const*);
typedef MachineType UnalignedLoadRepresentation;
UnalignedLoadRepresentation UnalignedLoadRepresentationOf(Operator const*);
// An UnalignedStore needs a MachineType.
typedef MachineRepresentation UnalignedStoreRepresentation;
UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
Operator const*);
// A CheckedLoad needs a MachineType. // A CheckedLoad needs a MachineType.
typedef MachineType CheckedLoadRepresentation; typedef MachineType CheckedLoadRepresentation;
...@@ -95,27 +104,34 @@ class MachineOperatorBuilder final : public ZoneObject { ...@@ -95,27 +104,34 @@ class MachineOperatorBuilder final : public ZoneObject {
// for operations that are unsupported by some back-ends. // for operations that are unsupported by some back-ends.
enum Flag : unsigned { enum Flag : unsigned {
kNoFlags = 0u, kNoFlags = 0u,
kFloat32RoundDown = 1u << 0, // Note that Float*Max behaves like `(b < a) ? a : b`, not like Math.max().
kFloat64RoundDown = 1u << 1, // Note that Float*Min behaves like `(a < b) ? a : b`, not like Math.min().
kFloat32RoundUp = 1u << 2, kFloat32Max = 1u << 0,
kFloat64RoundUp = 1u << 3, kFloat32Min = 1u << 1,
kFloat32RoundTruncate = 1u << 4, kFloat64Max = 1u << 2,
kFloat64RoundTruncate = 1u << 5, kFloat64Min = 1u << 3,
kFloat32RoundTiesEven = 1u << 6, kFloat32RoundDown = 1u << 4,
kFloat64RoundTiesEven = 1u << 7, kFloat64RoundDown = 1u << 5,
kFloat64RoundTiesAway = 1u << 8, kFloat32RoundUp = 1u << 6,
kInt32DivIsSafe = 1u << 9, kFloat64RoundUp = 1u << 7,
kUint32DivIsSafe = 1u << 10, kFloat32RoundTruncate = 1u << 8,
kWord32ShiftIsSafe = 1u << 11, kFloat64RoundTruncate = 1u << 9,
kWord32Ctz = 1u << 12, kFloat32RoundTiesEven = 1u << 10,
kWord64Ctz = 1u << 13, kFloat64RoundTiesEven = 1u << 11,
kWord32Popcnt = 1u << 14, kFloat64RoundTiesAway = 1u << 12,
kWord64Popcnt = 1u << 15, kInt32DivIsSafe = 1u << 13,
kWord32ReverseBits = 1u << 16, kUint32DivIsSafe = 1u << 14,
kWord64ReverseBits = 1u << 17, kWord32ShiftIsSafe = 1u << 15,
kFloat32Neg = 1u << 18, kWord32Ctz = 1u << 16,
kFloat64Neg = 1u << 19, kWord64Ctz = 1u << 17,
kWord32Popcnt = 1u << 18,
kWord64Popcnt = 1u << 19,
kWord32ReverseBits = 1u << 20,
kWord64ReverseBits = 1u << 21,
kFloat32Neg = 1u << 22,
kFloat64Neg = 1u << 23,
kAllOptionalOps = kAllOptionalOps =
kFloat32Max | kFloat32Min | kFloat64Max | kFloat64Min |
kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp | kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
kFloat64RoundUp | kFloat32RoundTruncate | kFloat64RoundTruncate | kFloat64RoundUp | kFloat32RoundTruncate | kFloat64RoundTruncate |
kFloat64RoundTiesAway | kFloat32RoundTiesEven | kFloat64RoundTiesEven | kFloat64RoundTiesAway | kFloat32RoundTiesEven | kFloat64RoundTiesEven |
...@@ -191,7 +207,7 @@ class MachineOperatorBuilder final : public ZoneObject { ...@@ -191,7 +207,7 @@ class MachineOperatorBuilder final : public ZoneObject {
MachineRepresentation word = MachineType::PointerRepresentation(), MachineRepresentation word = MachineType::PointerRepresentation(),
Flags supportedOperators = kNoFlags, Flags supportedOperators = kNoFlags,
AlignmentRequirements alignmentRequirements = AlignmentRequirements alignmentRequirements =
AlignmentRequirements::NoUnalignedAccessSupport()); AlignmentRequirements::FullUnalignedAccessSupport());
const Operator* Comment(const char* msg); const Operator* Comment(const char* msg);
const Operator* DebugBreak(); const Operator* DebugBreak();
...@@ -591,6 +607,12 @@ class MachineOperatorBuilder final : public ZoneObject { ...@@ -591,6 +607,12 @@ class MachineOperatorBuilder final : public ZoneObject {
// store [base + index], value // store [base + index], value
const Operator* Store(StoreRepresentation rep); const Operator* Store(StoreRepresentation rep);
// unaligned load [base + index]
const Operator* UnalignedLoad(UnalignedLoadRepresentation rep);
// unaligned store [base + index], value
const Operator* UnalignedStore(UnalignedStoreRepresentation rep);
const Operator* StackSlot(MachineRepresentation rep); const Operator* StackSlot(MachineRepresentation rep);
// Access to the machine stack. // Access to the machine stack.
......
...@@ -1360,34 +1360,65 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1360,34 +1360,65 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMipsLhu: case kMipsLhu:
__ lhu(i.OutputRegister(), i.MemoryOperand()); __ lhu(i.OutputRegister(), i.MemoryOperand());
break; break;
case kMipsUlhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
break;
case kMipsLh: case kMipsLh:
__ lh(i.OutputRegister(), i.MemoryOperand()); __ lh(i.OutputRegister(), i.MemoryOperand());
break; break;
case kMipsUlh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
break;
case kMipsSh: case kMipsSh:
__ sh(i.InputRegister(2), i.MemoryOperand()); __ sh(i.InputRegister(2), i.MemoryOperand());
break; break;
case kMipsUsh:
__ Ush(i.InputRegister(2), i.MemoryOperand(), kScratchReg);
break;
case kMipsLw: case kMipsLw:
__ lw(i.OutputRegister(), i.MemoryOperand()); __ lw(i.OutputRegister(), i.MemoryOperand());
break; break;
case kMipsUlw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
break;
case kMipsSw: case kMipsSw:
__ sw(i.InputRegister(2), i.MemoryOperand()); __ sw(i.InputRegister(2), i.MemoryOperand());
break; break;
case kMipsUsw:
__ Usw(i.InputRegister(2), i.MemoryOperand());
break;
case kMipsLwc1: { case kMipsLwc1: {
__ lwc1(i.OutputSingleRegister(), i.MemoryOperand()); __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
break; break;
} }
case kMipsUlwc1: {
__ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
break;
}
case kMipsSwc1: { case kMipsSwc1: {
size_t index = 0; size_t index = 0;
MemOperand operand = i.MemoryOperand(&index); MemOperand operand = i.MemoryOperand(&index);
__ swc1(i.InputSingleRegister(index), operand); __ swc1(i.InputSingleRegister(index), operand);
break; break;
} }
case kMipsUswc1: {
size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
__ Uswc1(i.InputSingleRegister(index), operand, kScratchReg);
break;
}
case kMipsLdc1: case kMipsLdc1:
__ ldc1(i.OutputDoubleRegister(), i.MemoryOperand()); __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
break; break;
case kMipsUldc1:
__ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
break;
case kMipsSdc1: case kMipsSdc1:
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand()); __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break; break;
case kMipsUsdc1:
__ Usdc1(i.InputDoubleRegister(2), i.MemoryOperand(), kScratchReg);
break;
case kMipsPush: case kMipsPush:
if (instr->InputAt(0)->IsFPRegister()) { if (instr->InputAt(0)->IsFPRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize)); __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
......
...@@ -97,14 +97,23 @@ namespace compiler { ...@@ -97,14 +97,23 @@ namespace compiler {
V(MipsLbu) \ V(MipsLbu) \
V(MipsSb) \ V(MipsSb) \
V(MipsLh) \ V(MipsLh) \
V(MipsUlh) \
V(MipsLhu) \ V(MipsLhu) \
V(MipsUlhu) \
V(MipsSh) \ V(MipsSh) \
V(MipsUsh) \
V(MipsLw) \ V(MipsLw) \
V(MipsUlw) \
V(MipsSw) \ V(MipsSw) \
V(MipsUsw) \
V(MipsLwc1) \ V(MipsLwc1) \
V(MipsUlwc1) \
V(MipsSwc1) \ V(MipsSwc1) \
V(MipsUswc1) \
V(MipsLdc1) \ V(MipsLdc1) \
V(MipsUldc1) \
V(MipsSdc1) \ V(MipsSdc1) \
V(MipsUsdc1) \
V(MipsFloat64ExtractLowWord32) \ V(MipsFloat64ExtractLowWord32) \
V(MipsFloat64ExtractHighWord32) \ V(MipsFloat64ExtractHighWord32) \
V(MipsFloat64InsertLowWord32) \ V(MipsFloat64InsertLowWord32) \
......
...@@ -952,6 +952,100 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return false; } ...@@ -952,6 +952,100 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; } int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
void InstructionSelector::VisitUnalignedLoad(Node* node) {
UnalignedLoadRepresentation load_rep =
UnalignedLoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
UNREACHABLE();
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsUnsigned() ? kMipsUlhu : kMipsUlh;
break;
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kMipsUlw;
break;
case MachineRepresentation::kFloat32:
opcode = kMipsUlwc1;
break;
case MachineRepresentation::kFloat64:
opcode = kMipsUldc1;
break;
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
} else {
InstructionOperand addr_reg = g.TempRegister();
Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
}
}
void InstructionSelector::VisitUnalignedStore(Node* node) {
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
// TODO(mips): I guess this could be done in a better way.
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kMipsUswc1;
break;
case MachineRepresentation::kFloat64:
opcode = kMipsUsdc1;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
UNREACHABLE();
break;
case MachineRepresentation::kWord16:
opcode = kMipsUsh;
break;
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kMipsUsw;
break;
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
} else {
InstructionOperand addr_reg = g.TempRegister();
Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
addr_reg, g.TempImmediate(0), g.UseRegister(value));
}
}
void InstructionSelector::VisitCheckedLoad(Node* node) { void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op()); CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
MipsOperandGenerator g(this); MipsOperandGenerator g(this);
...@@ -1531,6 +1625,7 @@ InstructionSelector::SupportedMachineOperatorFlags() { ...@@ -1531,6 +1625,7 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTruncate | MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesEven; MachineOperatorBuilder::kFloat64RoundTiesEven;
} }
return flags | MachineOperatorBuilder::kWord32Ctz | return flags | MachineOperatorBuilder::kWord32Ctz |
MachineOperatorBuilder::kWord32Popcnt | MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kInt32DivIsSafe | MachineOperatorBuilder::kInt32DivIsSafe |
......
...@@ -1635,43 +1635,83 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1635,43 +1635,83 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64Lhu: case kMips64Lhu:
__ lhu(i.OutputRegister(), i.MemoryOperand()); __ lhu(i.OutputRegister(), i.MemoryOperand());
break; break;
case kMips64Ulhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Lh: case kMips64Lh:
__ lh(i.OutputRegister(), i.MemoryOperand()); __ lh(i.OutputRegister(), i.MemoryOperand());
break; break;
case kMips64Ulh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Sh: case kMips64Sh:
__ sh(i.InputRegister(2), i.MemoryOperand()); __ sh(i.InputRegister(2), i.MemoryOperand());
break; break;
case kMips64Ush:
__ Ush(i.InputRegister(2), i.MemoryOperand(), kScratchReg);
break;
case kMips64Lw: case kMips64Lw:
__ lw(i.OutputRegister(), i.MemoryOperand()); __ lw(i.OutputRegister(), i.MemoryOperand());
break; break;
case kMips64Ulw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Lwu: case kMips64Lwu:
__ lwu(i.OutputRegister(), i.MemoryOperand()); __ lwu(i.OutputRegister(), i.MemoryOperand());
break; break;
case kMips64Ulwu:
__ Ulwu(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Ld: case kMips64Ld:
__ ld(i.OutputRegister(), i.MemoryOperand()); __ ld(i.OutputRegister(), i.MemoryOperand());
break; break;
case kMips64Uld:
__ Uld(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Sw: case kMips64Sw:
__ sw(i.InputRegister(2), i.MemoryOperand()); __ sw(i.InputRegister(2), i.MemoryOperand());
break; break;
case kMips64Usw:
__ Usw(i.InputRegister(2), i.MemoryOperand());
break;
case kMips64Sd: case kMips64Sd:
__ sd(i.InputRegister(2), i.MemoryOperand()); __ sd(i.InputRegister(2), i.MemoryOperand());
break; break;
case kMips64Usd:
__ Usd(i.InputRegister(2), i.MemoryOperand());
break;
case kMips64Lwc1: { case kMips64Lwc1: {
__ lwc1(i.OutputSingleRegister(), i.MemoryOperand()); __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
break; break;
} }
case kMips64Ulwc1: {
__ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
break;
}
case kMips64Swc1: { case kMips64Swc1: {
size_t index = 0; size_t index = 0;
MemOperand operand = i.MemoryOperand(&index); MemOperand operand = i.MemoryOperand(&index);
__ swc1(i.InputSingleRegister(index), operand); __ swc1(i.InputSingleRegister(index), operand);
break; break;
} }
case kMips64Uswc1: {
size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
__ Uswc1(i.InputSingleRegister(index), operand, kScratchReg);
break;
}
case kMips64Ldc1: case kMips64Ldc1:
__ ldc1(i.OutputDoubleRegister(), i.MemoryOperand()); __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
break; break;
case kMips64Uldc1:
__ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
break;
case kMips64Sdc1: case kMips64Sdc1:
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand()); __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break; break;
case kMips64Usdc1:
__ Usdc1(i.InputDoubleRegister(2), i.MemoryOperand(), kScratchReg);
break;
case kMips64Push: case kMips64Push:
if (instr->InputAt(0)->IsFPRegister()) { if (instr->InputAt(0)->IsFPRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize)); __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
......
...@@ -121,17 +121,29 @@ namespace compiler { ...@@ -121,17 +121,29 @@ namespace compiler {
V(Mips64Lbu) \ V(Mips64Lbu) \
V(Mips64Sb) \ V(Mips64Sb) \
V(Mips64Lh) \ V(Mips64Lh) \
V(Mips64Ulh) \
V(Mips64Lhu) \ V(Mips64Lhu) \
V(Mips64Ulhu) \
V(Mips64Sh) \ V(Mips64Sh) \
V(Mips64Ush) \
V(Mips64Ld) \
V(Mips64Uld) \
V(Mips64Lw) \ V(Mips64Lw) \
V(Mips64Ulw) \
V(Mips64Lwu) \ V(Mips64Lwu) \
V(Mips64Ulwu) \
V(Mips64Sw) \ V(Mips64Sw) \
V(Mips64Ld) \ V(Mips64Usw) \
V(Mips64Sd) \ V(Mips64Sd) \
V(Mips64Usd) \
V(Mips64Lwc1) \ V(Mips64Lwc1) \
V(Mips64Ulwc1) \
V(Mips64Swc1) \ V(Mips64Swc1) \
V(Mips64Uswc1) \
V(Mips64Ldc1) \ V(Mips64Ldc1) \
V(Mips64Uldc1) \
V(Mips64Sdc1) \ V(Mips64Sdc1) \
V(Mips64Usdc1) \
V(Mips64BitcastDL) \ V(Mips64BitcastDL) \
V(Mips64BitcastLD) \ V(Mips64BitcastLD) \
V(Mips64Float64ExtractLowWord32) \ V(Mips64Float64ExtractLowWord32) \
......
...@@ -1355,6 +1355,102 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return false; } ...@@ -1355,6 +1355,102 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; } int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
void InstructionSelector::VisitUnalignedLoad(Node* node) {
UnalignedLoadRepresentation load_rep =
UnalignedLoadRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kMips64Ulwc1;
break;
case MachineRepresentation::kFloat64:
opcode = kMips64Uldc1;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
UNREACHABLE();
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
break;
case MachineRepresentation::kWord32:
opcode = load_rep.IsUnsigned() ? kMips64Ulwu : kMips64Ulw;
break;
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kMips64Uld;
break;
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
} else {
InstructionOperand addr_reg = g.TempRegister();
Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
}
}
void InstructionSelector::VisitUnalignedStore(Node* node) {
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kMips64Uswc1;
break;
case MachineRepresentation::kFloat64:
opcode = kMips64Usdc1;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
UNREACHABLE();
break;
case MachineRepresentation::kWord16:
opcode = kMips64Ush;
break;
case MachineRepresentation::kWord32:
opcode = kMips64Usw;
break;
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kMips64Usd;
break;
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
} else {
InstructionOperand addr_reg = g.TempRegister();
Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
addr_reg, g.TempImmediate(0), g.UseRegister(value));
}
}
void InstructionSelector::VisitCheckedLoad(Node* node) { void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op()); CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
Mips64OperandGenerator g(this); Mips64OperandGenerator g(this);
...@@ -2029,7 +2125,8 @@ void InstructionSelector::VisitAtomicStore(Node* node) { ...@@ -2029,7 +2125,8 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
// static // static
MachineOperatorBuilder::Flags MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() { InstructionSelector::SupportedMachineOperatorFlags() {
return MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
return flags | MachineOperatorBuilder::kWord32Ctz |
MachineOperatorBuilder::kWord64Ctz | MachineOperatorBuilder::kWord64Ctz |
MachineOperatorBuilder::kWord32Popcnt | MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kWord64Popcnt | MachineOperatorBuilder::kWord64Popcnt |
......
...@@ -449,6 +449,8 @@ ...@@ -449,6 +449,8 @@
V(LoadParentFramePointer) \ V(LoadParentFramePointer) \
V(CheckedLoad) \ V(CheckedLoad) \
V(CheckedStore) \ V(CheckedStore) \
V(UnalignedLoad) \
V(UnalignedStore) \
V(Int32PairAdd) \ V(Int32PairAdd) \
V(Int32PairSub) \ V(Int32PairSub) \
V(Int32PairMul) \ V(Int32PairMul) \
......
...@@ -99,7 +99,8 @@ class PipelineData { ...@@ -99,7 +99,8 @@ class PipelineData {
simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_); simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_);
machine_ = new (graph_zone_) MachineOperatorBuilder( machine_ = new (graph_zone_) MachineOperatorBuilder(
graph_zone_, MachineType::PointerRepresentation(), graph_zone_, MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags()); InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements());
common_ = new (graph_zone_) CommonOperatorBuilder(graph_zone_); common_ = new (graph_zone_) CommonOperatorBuilder(graph_zone_);
javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_); javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
jsgraph_ = new (graph_zone_) jsgraph_ = new (graph_zone_)
......
...@@ -327,6 +327,11 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -327,6 +327,11 @@ void InstructionSelector::VisitStore(Node* node) {
} }
} }
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitCheckedLoad(Node* node) { void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op()); CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
......
...@@ -13,14 +13,14 @@ namespace v8 { ...@@ -13,14 +13,14 @@ namespace v8 {
namespace internal { namespace internal {
namespace compiler { namespace compiler {
RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph, RawMachineAssembler::RawMachineAssembler(
CallDescriptor* call_descriptor, Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
MachineRepresentation word, MachineRepresentation word, MachineOperatorBuilder::Flags flags,
MachineOperatorBuilder::Flags flags) MachineOperatorBuilder::AlignmentRequirements alignment_requirements)
: isolate_(isolate), : isolate_(isolate),
graph_(graph), graph_(graph),
schedule_(new (zone()) Schedule(zone())), schedule_(new (zone()) Schedule(zone())),
machine_(zone(), word, flags), machine_(zone(), word, flags, alignment_requirements),
common_(zone()), common_(zone()),
call_descriptor_(call_descriptor), call_descriptor_(call_descriptor),
parameters_(parameter_count(), zone()), parameters_(parameter_count(), zone()),
......
...@@ -40,7 +40,10 @@ class RawMachineAssembler { ...@@ -40,7 +40,10 @@ class RawMachineAssembler {
Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor, Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
MachineRepresentation word = MachineType::PointerRepresentation(), MachineRepresentation word = MachineType::PointerRepresentation(),
MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::Flag::kNoFlags); MachineOperatorBuilder::Flag::kNoFlags,
MachineOperatorBuilder::AlignmentRequirements alignment_requirements =
MachineOperatorBuilder::AlignmentRequirements::
FullUnalignedAccessSupport());
~RawMachineAssembler() {} ~RawMachineAssembler() {}
Isolate* isolate() const { return isolate_; } Isolate* isolate() const { return isolate_; }
...@@ -133,6 +136,34 @@ class RawMachineAssembler { ...@@ -133,6 +136,34 @@ class RawMachineAssembler {
base, index, value); base, index, value);
} }
// Unaligned memory operations
Node* UnalignedLoad(MachineType rep, Node* base) {
return UnalignedLoad(rep, base, IntPtrConstant(0));
}
Node* UnalignedLoad(MachineType rep, Node* base, Node* index) {
if (machine()->UnalignedLoadSupported(rep, 1)) {
return AddNode(machine()->Load(rep), base, index);
} else {
return AddNode(machine()->UnalignedLoad(rep), base, index);
}
}
Node* UnalignedStore(MachineRepresentation rep, Node* base, Node* value) {
return UnalignedStore(rep, base, IntPtrConstant(0), value);
}
Node* UnalignedStore(MachineRepresentation rep, Node* base, Node* index,
Node* value) {
MachineType t = MachineType::TypeForRepresentation(rep);
if (machine()->UnalignedStoreSupported(t, 1)) {
return AddNode(machine()->Store(StoreRepresentation(
rep, WriteBarrierKind::kNoWriteBarrier)),
base, index, value);
} else {
return AddNode(
machine()->UnalignedStore(UnalignedStoreRepresentation(rep)), base,
index, value);
}
}
// Atomic memory operations. // Atomic memory operations.
Node* AtomicLoad(MachineType rep, Node* base, Node* index) { Node* AtomicLoad(MachineType rep, Node* base, Node* index) {
return AddNode(machine()->AtomicLoad(rep), base, index); return AddNode(machine()->AtomicLoad(rep), base, index);
...@@ -647,6 +678,14 @@ class RawMachineAssembler { ...@@ -647,6 +678,14 @@ class RawMachineAssembler {
Node* StoreToPointer(void* address, MachineRepresentation rep, Node* node) { Node* StoreToPointer(void* address, MachineRepresentation rep, Node* node) {
return Store(rep, PointerConstant(address), node, kNoWriteBarrier); return Store(rep, PointerConstant(address), node, kNoWriteBarrier);
} }
Node* UnalignedLoadFromPointer(void* address, MachineType rep,
int32_t offset = 0) {
return UnalignedLoad(rep, PointerConstant(address), Int32Constant(offset));
}
Node* UnalignedStoreToPointer(void* address, MachineRepresentation rep,
Node* node) {
return UnalignedStore(rep, PointerConstant(address), node);
}
Node* StringConstant(const char* string) { Node* StringConstant(const char* string) {
return HeapConstant(isolate()->factory()->InternalizeUtf8String(string)); return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
} }
......
...@@ -318,6 +318,12 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -318,6 +318,12 @@ void InstructionSelector::VisitStore(Node* node) {
} }
} }
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitCheckedLoad(Node* node) { void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op()); CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
S390OperandGenerator g(this); S390OperandGenerator g(this);
......
...@@ -2523,6 +2523,13 @@ Type* Typer::Visitor::TypeLoadParentFramePointer(Node* node) { ...@@ -2523,6 +2523,13 @@ Type* Typer::Visitor::TypeLoadParentFramePointer(Node* node) {
return Type::Internal(); return Type::Internal();
} }
Type* Typer::Visitor::TypeUnalignedLoad(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeUnalignedStore(Node* node) {
UNREACHABLE();
return nullptr;
}
Type* Typer::Visitor::TypeCheckedLoad(Node* node) { return Type::Any(); } Type* Typer::Visitor::TypeCheckedLoad(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeCheckedStore(Node* node) { Type* Typer::Visitor::TypeCheckedStore(Node* node) {
......
...@@ -1184,6 +1184,8 @@ void Verifier::Visitor::Check(Node* node) { ...@@ -1184,6 +1184,8 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kLoadStackPointer: case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer: case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer: case IrOpcode::kLoadParentFramePointer:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
case IrOpcode::kCheckedLoad: case IrOpcode::kCheckedLoad:
case IrOpcode::kCheckedStore: case IrOpcode::kCheckedStore:
case IrOpcode::kAtomicLoad: case IrOpcode::kAtomicLoad:
......
...@@ -2947,128 +2947,6 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index, ...@@ -2947,128 +2947,6 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position); trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
} }
MachineType WasmGraphBuilder::GetTypeForUnalignedAccess(uint32_t alignment,
bool signExtend) {
switch (alignment) {
case 0:
return signExtend ? MachineType::Int8() : MachineType::Uint8();
case 1:
return signExtend ? MachineType::Int16() : MachineType::Uint16();
case 2:
return signExtend ? MachineType::Int32() : MachineType::Uint32();
default:
UNREACHABLE();
return MachineType::None();
}
}
Node* WasmGraphBuilder::GetUnalignedLoadOffsetNode(Node* baseOffset,
int numberOfBytes,
int stride, int current) {
int offset;
wasm::WasmOpcode addOpcode;
#if defined(V8_TARGET_LITTLE_ENDIAN)
offset = numberOfBytes - stride - current;
#elif defined(V8_TARGET_BIG_ENDIAN)
offset = current;
#else
#error Unsupported endianness
#endif
#if WASM_64
addOpcode = wasm::kExprI64Add;
#else
addOpcode = wasm::kExprI32Add;
#endif
if (offset == 0) {
return baseOffset;
} else {
return Binop(addOpcode, baseOffset, jsgraph()->Int32Constant(offset));
}
}
Node* WasmGraphBuilder::BuildUnalignedLoad(wasm::LocalType type,
MachineType memtype, Node* index,
uint32_t offset,
uint32_t alignment) {
Node* result;
Node* load;
bool extendTo64Bit = false;
wasm::WasmOpcode shiftOpcode;
wasm::WasmOpcode orOpcode;
Node* shiftConst;
bool signExtend = memtype.IsSigned();
bool isFloat = IsFloatingPoint(memtype.representation());
int stride =
1 << ElementSizeLog2Of(
GetTypeForUnalignedAccess(alignment, false).representation());
int numberOfBytes = 1 << ElementSizeLog2Of(memtype.representation());
DCHECK(numberOfBytes % stride == 0);
switch (type) {
case wasm::kAstI64:
case wasm::kAstF64:
shiftOpcode = wasm::kExprI64Shl;
orOpcode = wasm::kExprI64Ior;
result = jsgraph()->Int64Constant(0);
shiftConst = jsgraph()->Int64Constant(8 * stride);
extendTo64Bit = true;
break;
case wasm::kAstI32:
case wasm::kAstF32:
shiftOpcode = wasm::kExprI32Shl;
orOpcode = wasm::kExprI32Ior;
result = jsgraph()->Int32Constant(0);
shiftConst = jsgraph()->Int32Constant(8 * stride);
break;
default:
UNREACHABLE();
}
Node* baseOffset = MemBuffer(offset);
for (int i = 0; i < numberOfBytes; i += stride) {
result = Binop(shiftOpcode, result, shiftConst);
load = graph()->NewNode(
jsgraph()->machine()->Load(
GetTypeForUnalignedAccess(alignment, signExtend)),
GetUnalignedLoadOffsetNode(baseOffset, numberOfBytes, stride, i), index,
*effect_, *control_);
*effect_ = load;
if (extendTo64Bit) {
if (signExtend) {
load =
graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), load);
} else {
load = graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(),
load);
}
}
signExtend = false;
result = Binop(orOpcode, result, load);
}
// Convert to float
if (isFloat) {
switch (type) {
case wasm::kAstF32:
result = Unop(wasm::kExprF32ReinterpretI32, result);
break;
case wasm::kAstF64:
result = Unop(wasm::kExprF64ReinterpretI64, result);
break;
default:
UNREACHABLE();
}
}
return result;
}
Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype, Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
Node* index, uint32_t offset, Node* index, uint32_t offset,
...@@ -3085,10 +2963,13 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype, ...@@ -3085,10 +2963,13 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) { jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) {
load = graph()->NewNode(jsgraph()->machine()->Load(memtype), load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
MemBuffer(offset), index, *effect_, *control_); MemBuffer(offset), index, *effect_, *control_);
*effect_ = load;
} else { } else {
load = BuildUnalignedLoad(type, memtype, index, offset, alignment); load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype),
MemBuffer(offset), index, *effect_, *control_);
} }
*effect_ = load;
#if defined(V8_TARGET_BIG_ENDIAN) #if defined(V8_TARGET_BIG_ENDIAN)
// TODO(john.yan) Implement byte swap turbofan operator // TODO(john.yan) Implement byte swap turbofan operator
// and use it if available for better performance // and use it if available for better performance
...@@ -3111,97 +2992,6 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype, ...@@ -3111,97 +2992,6 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
return load; return load;
} }
Node* WasmGraphBuilder::GetUnalignedStoreOffsetNode(Node* baseOffset,
int numberOfBytes,
int stride, int current) {
int offset;
wasm::WasmOpcode addOpcode;
#if defined(V8_TARGET_LITTLE_ENDIAN)
offset = current;
#elif defined(V8_TARGET_BIG_ENDIAN)
offset = numberOfBytes - stride - current;
#else
#error Unsupported endianness
#endif
#if WASM_64
addOpcode = wasm::kExprI64Add;
#else
addOpcode = wasm::kExprI32Add;
#endif
if (offset == 0) {
return baseOffset;
} else {
return Binop(addOpcode, baseOffset, jsgraph()->Int32Constant(offset));
}
}
Node* WasmGraphBuilder::BuildUnalignedStore(MachineType memtype, Node* index,
uint32_t offset, uint32_t alignment,
Node* val) {
Node* store;
Node* newValue;
wasm::WasmOpcode shiftOpcode;
Node* shiftConst;
bool extendTo64Bit = false;
bool isFloat = IsFloatingPoint(memtype.representation());
int stride = 1 << ElementSizeLog2Of(
GetTypeForUnalignedAccess(alignment).representation());
int numberOfBytes = 1 << ElementSizeLog2Of(memtype.representation());
DCHECK(numberOfBytes % stride == 0);
StoreRepresentation rep(GetTypeForUnalignedAccess(alignment).representation(),
kNoWriteBarrier);
if (ElementSizeLog2Of(memtype.representation()) <= 2) {
shiftOpcode = wasm::kExprI32ShrU;
shiftConst = jsgraph()->Int32Constant(8 * stride);
} else {
shiftOpcode = wasm::kExprI64ShrU;
shiftConst = jsgraph()->Int64Constant(8 * stride);
extendTo64Bit = true;
}
newValue = val;
if (isFloat) {
switch (memtype.representation()) {
case MachineRepresentation::kFloat64:
newValue = Unop(wasm::kExprI64ReinterpretF64, val);
break;
case MachineRepresentation::kFloat32:
newValue = Unop(wasm::kExprI32ReinterpretF32, val);
break;
default:
UNREACHABLE();
}
}
Node* baseOffset = MemBuffer(offset);
for (int i = 0; i < numberOfBytes - stride; i += stride) {
store = graph()->NewNode(
jsgraph()->machine()->Store(rep),
GetUnalignedStoreOffsetNode(baseOffset, numberOfBytes, stride, i),
index,
extendTo64Bit ? Unop(wasm::kExprI32ConvertI64, newValue) : newValue,
*effect_, *control_);
newValue = Binop(shiftOpcode, newValue, shiftConst);
*effect_ = store;
}
store = graph()->NewNode(
jsgraph()->machine()->Store(rep),
GetUnalignedStoreOffsetNode(baseOffset, numberOfBytes, stride,
numberOfBytes - stride),
index,
extendTo64Bit ? Unop(wasm::kExprI32ConvertI64, newValue) : newValue,
*effect_, *control_);
*effect_ = store;
return val;
}
Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index, Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
uint32_t offset, uint32_t alignment, Node* val, uint32_t offset, uint32_t alignment, Node* val,
...@@ -3226,11 +3016,15 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index, ...@@ -3226,11 +3016,15 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
store = store =
graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset), graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
index, val, *effect_, *control_); index, val, *effect_, *control_);
*effect_ = store;
} else { } else {
store = BuildUnalignedStore(memtype, index, offset, alignment, val); UnalignedStoreRepresentation rep(memtype.representation());
store =
graph()->NewNode(jsgraph()->machine()->UnalignedStore(rep),
MemBuffer(offset), index, val, *effect_, *control_);
} }
*effect_ = store;
return store; return store;
} }
...@@ -3555,7 +3349,8 @@ WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower, ...@@ -3555,7 +3349,8 @@ WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower,
new (graph_zone()) CommonOperatorBuilder(graph_zone()), nullptr, new (graph_zone()) CommonOperatorBuilder(graph_zone()), nullptr,
nullptr, new (graph_zone()) MachineOperatorBuilder( nullptr, new (graph_zone()) MachineOperatorBuilder(
graph_zone(), MachineType::PointerRepresentation(), graph_zone(), MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags()))), InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements()))),
compilation_zone_(isolate->allocator()), compilation_zone_(isolate->allocator()),
info_(function->name_length != 0 info_(function->name_length != 0
? module_env->module->GetNameOrNull(function->name_offset, ? module_env->module->GetNameOrNull(function->name_offset,
......
...@@ -234,19 +234,6 @@ class WasmGraphBuilder { ...@@ -234,19 +234,6 @@ class WasmGraphBuilder {
void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset, void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
wasm::WasmCodePosition position); wasm::WasmCodePosition position);
MachineType GetTypeForUnalignedAccess(uint32_t alignment,
bool signExtend = false);
Node* GetUnalignedLoadOffsetNode(Node* baseOffset, int numberOfBytes,
int stride, int current);
Node* BuildUnalignedLoad(wasm::LocalType type, MachineType memtype,
Node* index, uint32_t offset, uint32_t alignment);
Node* GetUnalignedStoreOffsetNode(Node* baseOffset, int numberOfBytes,
int stride, int current);
Node* BuildUnalignedStore(MachineType memtype, Node* index, uint32_t offset,
uint32_t alignment, Node* val);
Node* BuildChangeEndianness(Node* node, MachineType type, Node* BuildChangeEndianness(Node* node, MachineType type,
wasm::LocalType wasmtype = wasm::kAstStmt); wasm::LocalType wasmtype = wasm::kAstStmt);
......
...@@ -277,6 +277,11 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -277,6 +277,11 @@ void InstructionSelector::VisitStore(Node* node) {
} }
} }
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitCheckedLoad(Node* node) { void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op()); CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
......
...@@ -304,6 +304,11 @@ void InstructionSelector::VisitStore(Node* node) { ...@@ -304,6 +304,11 @@ void InstructionSelector::VisitStore(Node* node) {
} }
} }
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitCheckedLoad(Node* node) { void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op()); CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
......
...@@ -159,6 +159,35 @@ class MachineType { ...@@ -159,6 +159,35 @@ class MachineType {
return MachineType(MachineRepresentation::kBit, MachineSemantic::kNone); return MachineType(MachineRepresentation::kBit, MachineSemantic::kNone);
} }
static MachineType TypeForRepresentation(MachineRepresentation& rep,
bool isSigned = true) {
switch (rep) {
case MachineRepresentation::kNone:
return MachineType::None();
case MachineRepresentation::kBit:
return MachineType::Bool();
case MachineRepresentation::kWord8:
return isSigned ? MachineType::Int8() : MachineType::Uint8();
case MachineRepresentation::kWord16:
return isSigned ? MachineType::Int16() : MachineType::Uint16();
case MachineRepresentation::kWord32:
return isSigned ? MachineType::Int32() : MachineType::Uint32();
case MachineRepresentation::kWord64:
return isSigned ? MachineType::Int64() : MachineType::Uint64();
case MachineRepresentation::kFloat32:
return MachineType::Float32();
case MachineRepresentation::kFloat64:
return MachineType::Float64();
case MachineRepresentation::kSimd128:
return MachineType::Simd128();
case MachineRepresentation::kTagged:
return MachineType::AnyTagged();
default:
UNREACHABLE();
return MachineType::None();
}
}
private: private:
MachineRepresentation representation_; MachineRepresentation representation_;
MachineSemantic semantic_; MachineSemantic semantic_;
......
...@@ -38,7 +38,8 @@ class RawMachineAssemblerTester : public HandleAndZoneScope, ...@@ -38,7 +38,8 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
p1, p2, p3, p4), p1, p2, p3, p4),
true), true),
MachineType::PointerRepresentation(), MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags()) {} InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements()) {}
virtual ~RawMachineAssemblerTester() {} virtual ~RawMachineAssemblerTester() {}
......
...@@ -25,7 +25,8 @@ class GraphAndBuilders { ...@@ -25,7 +25,8 @@ class GraphAndBuilders {
: main_graph_(new (zone) Graph(zone)), : main_graph_(new (zone) Graph(zone)),
main_common_(zone), main_common_(zone),
main_machine_(zone, MachineType::PointerRepresentation(), main_machine_(zone, MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags()), InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements()),
main_simplified_(zone) {} main_simplified_(zone) {}
Graph* graph() const { return main_graph_; } Graph* graph() const { return main_graph_; }
......
This diff is collapsed.
This diff is collapsed.
...@@ -310,12 +310,22 @@ Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher, ...@@ -310,12 +310,22 @@ Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& index_matcher, const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher, const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher); const Matcher<Node*>& control_matcher);
Matcher<Node*> IsUnalignedLoad(
const Matcher<UnalignedLoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher, Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher, const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher, const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher, const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher); const Matcher<Node*>& control_matcher);
Matcher<Node*> IsUnalignedStore(
const Matcher<UnalignedStoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher, const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStackSlot(const Matcher<MachineRepresentation>& rep_matcher); Matcher<Node*> IsStackSlot(const Matcher<MachineRepresentation>& rep_matcher);
Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher, Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher); const Matcher<Node*>& rhs_matcher);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment