Commit 174f0e95 authored by Manos Koukoutos's avatar Manos Koukoutos Committed by Commit Bot

[wasm] Use object operators in wasm compiler, enable optimizations

This CL enables full csa optimization for wasm code. To take advantage
of csa load elimination, it switches from Load/Store to LoadFromObject/
StoreToObject operators in the wasm compiler (where possible).

Bug: v8:11510
Change-Id: Ibecd8ba81e89a76553b12ad2671ecad520e9e066
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2727407Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarZhi An Ng <zhin@chromium.org>
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73268}
parent ee34ce48
......@@ -66,6 +66,12 @@ Reduction CsaLoadElimination::Reduce(Node* node) {
namespace CsaLoadEliminationHelpers {
bool IsCompatible(MachineRepresentation r1, MachineRepresentation r2) {
// TODO(manoskouk): Temporary patch-up to get wasm i8 and i16 working until we
// properly fix the compatibility logic.
if (ElementSizeInBytes(r1) <
ElementSizeInBytes(MachineRepresentation::kWord32)) {
return false;
}
if (r1 == r2) return true;
return IsAnyTagged(r1) && IsAnyTagged(r2);
}
......
......@@ -23,13 +23,14 @@ namespace compiler {
Int64Lowering::Int64Lowering(
Graph* graph, MachineOperatorBuilder* machine,
CommonOperatorBuilder* common, Zone* zone,
Signature<MachineRepresentation>* signature,
CommonOperatorBuilder* common, SimplifiedOperatorBuilder* simplified,
Zone* zone, Signature<MachineRepresentation>* signature,
std::unique_ptr<Int64LoweringSpecialCase> special_case)
: zone_(zone),
graph_(graph),
machine_(machine),
common_(common),
simplified_(simplified),
state_(graph, 3),
stack_(zone),
replacements_(nullptr),
......@@ -161,6 +162,75 @@ void Int64Lowering::GetIndexNodes(Node* index, Node** index_low,
#endif
}
void Int64Lowering::LowerLoadOperator(Node* node, MachineRepresentation rep,
const Operator* load_op) {
if (rep == MachineRepresentation::kWord64) {
LowerMemoryBaseAndIndex(node);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* index_low;
Node* index_high;
GetIndexNodes(index, &index_low, &index_high);
Node* high_node;
if (node->InputCount() > 2) {
Node* effect_high = node->InputAt(2);
Node* control_high = node->InputAt(3);
high_node = graph()->NewNode(load_op, base, index_high, effect_high,
control_high);
// change the effect change from old_node --> old_effect to
// old_node --> high_node --> old_effect.
node->ReplaceInput(2, high_node);
} else {
high_node = graph()->NewNode(load_op, base, index_high);
}
node->ReplaceInput(1, index_low);
NodeProperties::ChangeOp(node, load_op);
ReplaceNode(node, node, high_node);
} else {
DefaultLowering(node);
}
}
void Int64Lowering::LowerStoreOperator(Node* node, MachineRepresentation rep,
const Operator* store_op) {
if (rep == MachineRepresentation::kWord64) {
// We change the original store node to store the low word, and create
// a new store node to store the high word. The effect and control edges
// are copied from the original store to the new store node, the effect
// edge of the original store is redirected to the new store.
LowerMemoryBaseAndIndex(node);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* index_low;
Node* index_high;
GetIndexNodes(index, &index_low, &index_high);
Node* value = node->InputAt(2);
DCHECK(HasReplacementLow(value));
DCHECK(HasReplacementHigh(value));
Node* high_node;
if (node->InputCount() > 3) {
Node* effect_high = node->InputAt(3);
Node* control_high = node->InputAt(4);
high_node = graph()->NewNode(store_op, base, index_high,
GetReplacementHigh(value), effect_high,
control_high);
node->ReplaceInput(3, high_node);
} else {
high_node = graph()->NewNode(store_op, base, index_high,
GetReplacementHigh(value));
}
node->ReplaceInput(1, index_low);
node->ReplaceInput(2, GetReplacementLow(value));
NodeProperties::ChangeOp(node, store_op);
ReplaceNode(node, node, high_node);
} else {
DefaultLowering(node, true);
}
}
void Int64Lowering::LowerNode(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt64Constant: {
......@@ -172,104 +242,47 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNode(node, low_node, high_node);
break;
}
case IrOpcode::kLoad:
case IrOpcode::kLoad: {
MachineRepresentation rep =
LoadRepresentationOf(node->op()).representation();
LowerLoadOperator(node, rep, machine()->Load(MachineType::Int32()));
break;
}
case IrOpcode::kUnalignedLoad: {
MachineRepresentation rep =
LoadRepresentationOf(node->op()).representation();
if (rep == MachineRepresentation::kWord64) {
LowerMemoryBaseAndIndex(node);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* index_low;
Node* index_high;
GetIndexNodes(index, &index_low, &index_high);
const Operator* load_op;
if (node->opcode() == IrOpcode::kLoad) {
load_op = machine()->Load(MachineType::Int32());
} else {
DCHECK_EQ(IrOpcode::kUnalignedLoad, node->opcode());
load_op = machine()->UnalignedLoad(MachineType::Int32());
}
Node* high_node;
if (node->InputCount() > 2) {
Node* effect_high = node->InputAt(2);
Node* control_high = node->InputAt(3);
high_node = graph()->NewNode(load_op, base, index_high, effect_high,
control_high);
// change the effect change from old_node --> old_effect to
// old_node --> high_node --> old_effect.
node->ReplaceInput(2, high_node);
} else {
high_node = graph()->NewNode(load_op, base, index_high);
}
node->ReplaceInput(1, index_low);
NodeProperties::ChangeOp(node, load_op);
ReplaceNode(node, node, high_node);
} else {
DefaultLowering(node);
}
LowerLoadOperator(node, rep,
machine()->UnalignedLoad(MachineType::Int32()));
break;
}
case IrOpcode::kLoadFromObject: {
ObjectAccess access = ObjectAccessOf(node->op());
LowerLoadOperator(node, access.machine_type.representation(),
simplified()->LoadFromObject(ObjectAccess(
MachineType::Int32(), access.write_barrier_kind)));
break;
}
case IrOpcode::kStore: {
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
LowerStoreOperator(
node, store_rep.representation(),
machine()->Store(StoreRepresentation(
MachineRepresentation::kWord32, store_rep.write_barrier_kind())));
break;
}
case IrOpcode::kStore:
case IrOpcode::kUnalignedStore: {
MachineRepresentation rep;
if (node->opcode() == IrOpcode::kStore) {
rep = StoreRepresentationOf(node->op()).representation();
} else {
DCHECK_EQ(IrOpcode::kUnalignedStore, node->opcode());
rep = UnalignedStoreRepresentationOf(node->op());
}
if (rep == MachineRepresentation::kWord64) {
// We change the original store node to store the low word, and create
// a new store node to store the high word. The effect and control edges
// are copied from the original store to the new store node, the effect
// edge of the original store is redirected to the new store.
LowerMemoryBaseAndIndex(node);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* index_low;
Node* index_high;
GetIndexNodes(index, &index_low, &index_high);
Node* value = node->InputAt(2);
DCHECK(HasReplacementLow(value));
DCHECK(HasReplacementHigh(value));
const Operator* store_op;
if (node->opcode() == IrOpcode::kStore) {
WriteBarrierKind write_barrier_kind =
StoreRepresentationOf(node->op()).write_barrier_kind();
store_op = machine()->Store(StoreRepresentation(
MachineRepresentation::kWord32, write_barrier_kind));
} else {
DCHECK_EQ(IrOpcode::kUnalignedStore, node->opcode());
store_op = machine()->UnalignedStore(MachineRepresentation::kWord32);
}
Node* high_node;
if (node->InputCount() > 3) {
Node* effect_high = node->InputAt(3);
Node* control_high = node->InputAt(4);
high_node = graph()->NewNode(store_op, base, index_high,
GetReplacementHigh(value), effect_high,
control_high);
node->ReplaceInput(3, high_node);
} else {
high_node = graph()->NewNode(store_op, base, index_high,
GetReplacementHigh(value));
}
node->ReplaceInput(1, index_low);
node->ReplaceInput(2, GetReplacementLow(value));
NodeProperties::ChangeOp(node, store_op);
ReplaceNode(node, node, high_node);
} else {
DefaultLowering(node, true);
}
UnalignedStoreRepresentation store_rep =
UnalignedStoreRepresentationOf(node->op());
LowerStoreOperator(
node, store_rep,
machine()->UnalignedStore(MachineRepresentation::kWord32));
break;
}
case IrOpcode::kStoreToObject: {
ObjectAccess access = ObjectAccessOf(node->op());
LowerStoreOperator(node, access.machine_type.representation(),
simplified()->StoreToObject(ObjectAccess(
MachineType::Int32(), access.write_barrier_kind)));
break;
}
case IrOpcode::kStart: {
......
......@@ -12,6 +12,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/simplified-operator.h"
#include "src/zone/zone-containers.h"
namespace v8 {
......@@ -33,8 +34,8 @@ class V8_EXPORT_PRIVATE Int64Lowering {
public:
Int64Lowering(
Graph* graph, MachineOperatorBuilder* machine,
CommonOperatorBuilder* common, Zone* zone,
Signature<MachineRepresentation>* signature,
CommonOperatorBuilder* common, SimplifiedOperatorBuilder* simplified_,
Zone* zone, Signature<MachineRepresentation>* signature,
std::unique_ptr<Int64LoweringSpecialCase> special_case = nullptr);
void LowerGraph();
......@@ -54,6 +55,7 @@ class V8_EXPORT_PRIVATE Int64Lowering {
Graph* graph() const { return graph_; }
MachineOperatorBuilder* machine() const { return machine_; }
CommonOperatorBuilder* common() const { return common_; }
SimplifiedOperatorBuilder* simplified() const { return simplified_; }
Signature<MachineRepresentation>* signature() const { return signature_; }
void PushNode(Node* node);
......@@ -63,6 +65,10 @@ class V8_EXPORT_PRIVATE Int64Lowering {
const Operator* unsigned_op);
void LowerWord64AtomicBinop(Node* node, const Operator* op);
void LowerWord64AtomicNarrowOp(Node* node, const Operator* op);
void LowerLoadOperator(Node* node, MachineRepresentation rep,
const Operator* load_op);
void LowerStoreOperator(Node* node, MachineRepresentation rep,
const Operator* store_op);
const CallDescriptor* LowerCallDescriptor(
const CallDescriptor* call_descriptor);
......@@ -86,6 +92,7 @@ class V8_EXPORT_PRIVATE Int64Lowering {
Graph* const graph_;
MachineOperatorBuilder* machine_;
CommonOperatorBuilder* common_;
SimplifiedOperatorBuilder* simplified_;
NodeMarker<State> state_;
ZoneDeque<NodeState> stack_;
Replacement* replacements_;
......
......@@ -80,9 +80,10 @@ void UnrollLoop(Node* loop_node, ZoneUnorderedSet<Node*>* loop, uint32_t depth,
// {use} (stack check effect that we need to replace)
DCHECK_EQ(use->InputAt(1)->opcode(), IrOpcode::kCall);
DCHECK_EQ(use->InputAt(1)->InputAt(1), stack_check);
DCHECK_EQ(stack_check->InputAt(1)->opcode(), IrOpcode::kLoad);
DCHECK_EQ(stack_check->InputAt(1)->opcode(),
IrOpcode::kLoadFromObject);
DCHECK_EQ(stack_check->InputAt(1)->InputAt(2)->opcode(),
IrOpcode::kLoad);
IrOpcode::kLoadFromObject);
Node* replacing_effect =
stack_check->InputAt(1)->InputAt(2)->InputAt(2);
FOREACH_COPY_INDEX(i) {
......
......@@ -292,7 +292,13 @@ Reduction MemoryLowering::ReduceAllocateRaw(
Reduction MemoryLowering::ReduceLoadFromObject(Node* node) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op());
NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
MachineRepresentation rep = access.machine_type.representation();
const Operator* load_op = ElementSizeInBytes(rep) > kTaggedSize &&
!machine()->UnalignedLoadSupported(
access.machine_type.representation())
? machine()->UnalignedLoad(access.machine_type)
: machine()->Load(access.machine_type);
NodeProperties::ChangeOp(node, load_op);
return Changed(node);
}
......@@ -387,9 +393,13 @@ Reduction MemoryLowering::ReduceStoreToObject(Node* node,
Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
access.machine_type.representation(), write_barrier_kind)));
MachineRepresentation rep = access.machine_type.representation();
StoreRepresentation store_rep(rep, write_barrier_kind);
const Operator* store_op = ElementSizeInBytes(rep) > kTaggedSize &&
!machine()->UnalignedStoreSupported(rep)
? machine()->UnalignedStore(rep)
: machine()->Store(store_rep);
NodeProperties::ChangeOp(node, store_op);
return Changed(node);
}
......
......@@ -2564,6 +2564,8 @@ CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl(
json_of << "{\"function\":\"" << info_.GetDebugName().get()
<< "\", \"source\":\"\",\n\"phases\":[";
}
pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true);
pipeline_.Run<MemoryOptimizationPhase>();
pipeline_.ComputeScheduledGraph();
if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) {
return CompilationJob::SUCCEEDED;
......@@ -3127,6 +3129,10 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
}
pipeline.RunPrintAndVerify("V8.WasmNativeStubMachineCode", true);
pipeline.Run<MemoryOptimizationPhase>();
pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
pipeline.ComputeScheduledGraph();
Linkage linkage(call_descriptor);
......@@ -3214,32 +3220,26 @@ void Pipeline::GenerateCodeForWasmFunction(
pipeline.Run<WasmLoopUnrollingPhase>(loop_info);
pipeline.RunPrintAndVerify("V8.WasmLoopUnrolling", true);
}
const bool is_asm_js = is_asmjs_module(module);
if (FLAG_wasm_opt || is_asm_js) {
pipeline.Run<CsaEarlyOptimizationPhase>(is_asm_js);
pipeline.RunPrintAndVerify(CsaEarlyOptimizationPhase::phase_name(), true);
}
pipeline.Run<MemoryOptimizationPhase>();
pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
data.BeginPhaseKind("V8.WasmOptimization");
const bool is_asm_js = is_asmjs_module(module);
if (FLAG_turbo_splitting && !is_asm_js) {
data.info()->set_splitting();
}
if (FLAG_wasm_opt || is_asm_js) {
PipelineRunScope scope(&data, "V8.WasmFullOptimization",
RuntimeCallCounterId::kOptimizeWasmFullOptimization);
GraphReducer graph_reducer(
scope.zone(), data.graph(), &data.info()->tick_counter(), data.broker(),
data.mcgraph()->Dead(), data.observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data.graph(),
data.common(), scope.zone());
ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
const bool allow_signalling_nan = is_asm_js;
MachineOperatorReducer machine_reducer(&graph_reducer, data.mcgraph(),
allow_signalling_nan);
CommonOperatorReducer common_reducer(&graph_reducer, data.graph(),
data.broker(), data.common(),
data.machine(), scope.zone());
AddReducer(&data, &graph_reducer, &dead_code_elimination);
AddReducer(&data, &graph_reducer, &machine_reducer);
AddReducer(&data, &graph_reducer, &common_reducer);
AddReducer(&data, &graph_reducer, &value_numbering);
graph_reducer.ReduceGraph();
pipeline.Run<CsaOptimizationPhase>(is_asm_js);
pipeline.RunPrintAndVerify(CsaOptimizationPhase::phase_name(), true);
pipeline.Run<DecompressionOptimizationPhase>();
pipeline.RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(),
true);
} else {
pipeline.Run<WasmBaseOptimizationPhase>();
pipeline.RunPrintAndVerify(WasmBaseOptimizationPhase::phase_name(), true);
......
......@@ -60,8 +60,10 @@ int GetMaskForShift(Node* node) {
} // anonymous namespace
SimdScalarLowering::SimdScalarLowering(
MachineGraph* mcgraph, Signature<MachineRepresentation>* signature)
MachineGraph* mcgraph, SimplifiedOperatorBuilder* simplified,
Signature<MachineRepresentation>* signature)
: mcgraph_(mcgraph),
simplified_(simplified),
state_(mcgraph->graph(), 3),
stack_(mcgraph_->zone()),
replacements_(nullptr),
......@@ -541,12 +543,19 @@ void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices,
}
void SimdScalarLowering::LowerLoadOp(Node* node, SimdType type) {
MachineRepresentation rep = LoadRepresentationOf(node->op()).representation();
MachineRepresentation rep =
node->opcode() == IrOpcode::kLoadFromObject
? ObjectAccessOf(node->op()).machine_type.representation()
: LoadRepresentationOf(node->op()).representation();
const Operator* load_op;
switch (node->opcode()) {
case IrOpcode::kLoad:
load_op = machine()->Load(MachineTypeFrom(type));
break;
case IrOpcode::kLoadFromObject:
load_op = simplified()->LoadFromObject(
ObjectAccess(MachineTypeFrom(type), kNoWriteBarrier));
break;
case IrOpcode::kUnalignedLoad:
load_op = machine()->UnalignedLoad(MachineTypeFrom(type));
break;
......@@ -732,6 +741,14 @@ void SimdScalarLowering::LowerStoreOp(Node* node) {
MachineTypeFrom(rep_type).representation(), write_barrier_kind));
break;
}
case IrOpcode::kStoreToObject: {
rep = ObjectAccessOf(node->op()).machine_type.representation();
WriteBarrierKind write_barrier_kind =
ObjectAccessOf(node->op()).write_barrier_kind;
store_op = simplified()->StoreToObject(
ObjectAccess(MachineTypeFrom(rep_type), write_barrier_kind));
break;
}
case IrOpcode::kUnalignedStore: {
rep = UnalignedStoreRepresentationOf(node->op());
store_op =
......@@ -1452,6 +1469,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kLoad:
case IrOpcode::kLoadFromObject:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kProtectedLoad: {
LowerLoadOp(node, rep_type);
......@@ -1462,6 +1480,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kStore:
case IrOpcode::kStoreToObject:
case IrOpcode::kUnalignedStore:
case IrOpcode::kProtectedStore: {
LowerStoreOp(node);
......
......@@ -11,6 +11,7 @@
#include "src/compiler/machine-graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/simplified-operator.h"
#include "src/zone/zone-containers.h"
namespace v8 {
......@@ -24,6 +25,7 @@ namespace compiler {
class SimdScalarLowering {
public:
SimdScalarLowering(MachineGraph* mcgraph,
SimplifiedOperatorBuilder* simplified,
Signature<MachineRepresentation>* signature);
void LowerGraph();
......@@ -64,6 +66,7 @@ class SimdScalarLowering {
Graph* graph() const { return mcgraph_->graph(); }
MachineOperatorBuilder* machine() const { return mcgraph_->machine(); }
CommonOperatorBuilder* common() const { return mcgraph_->common(); }
SimplifiedOperatorBuilder* simplified() const { return simplified_; }
Signature<MachineRepresentation>* signature() const { return signature_; }
void LowerNode(Node* node);
......@@ -131,6 +134,7 @@ class SimdScalarLowering {
Node* ExtendNode(Node* node, SimdType rep_type, bool is_signed);
MachineGraph* const mcgraph_;
SimplifiedOperatorBuilder* const simplified_;
NodeMarker<State> state_;
ZoneDeque<NodeState> stack_;
Replacement* replacements_;
......
This diff is collapsed.
......@@ -726,9 +726,6 @@ class WasmGraphBuilder {
WasmInstanceCacheNodes* instance_cache_ = nullptr;
SetOncePointer<Node> instance_node_;
SetOncePointer<Node> ref_null_node_;
SetOncePointer<Node> globals_start_;
SetOncePointer<Node> imported_mutable_globals_;
SetOncePointer<Node> stack_check_code_node_;
SetOncePointer<Node> isolate_root_node_;
SetOncePointer<const Operator> stack_check_call_operator_;
......
......@@ -903,6 +903,7 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssignSpillSlots) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRangeBundles) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRanges) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BytecodeGraphBuilder) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, CommitAssignment) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ConnectRanges) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ControlFlowOptimization) \
......@@ -914,24 +915,21 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EarlyTrimming) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EffectLinearization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EscapeAnalysis) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterOutputDefinition) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierPopulateReferenceMaps) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterAllocator) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierSpillSlotAllocator) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FinalizeCode) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FrameElision) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, GenericLowering) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BytecodeGraphBuilder) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Inlining) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmInlining) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, JumpThreading) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierPopulateReferenceMaps) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterAllocator) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterOutputDefinition) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierSpillSlotAllocator) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateGraphTrimming) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateOptimization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoadElimination) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LocateSpillSlots) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoopExitElimination) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoopPeeling) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmLoopUnrolling) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MachineOperatorOptimization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MeetRegisterConstraints) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MemoryOptimization) \
......@@ -953,7 +951,8 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Untyper) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, VerifyGraph) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmBaseOptimization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmFullOptimization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmInlining) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmLoopUnrolling) \
\
ADD_THREAD_SPECIFIC_COUNTER(V, Parse, ArrowFunctionLiteral) \
ADD_THREAD_SPECIFIC_COUNTER(V, Parse, FunctionLiteral) \
......
......@@ -484,8 +484,8 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode() {
for (size_t i = 0; i < num_params + 1; i++) {
rep_builder.AddParam(MachineRepresentation::kWord32);
}
compiler::Int64Lowering r(graph(), machine(), common(), zone(),
rep_builder.Build());
compiler::Int64Lowering r(graph(), machine(), common(), simplified(),
zone(), rep_builder.Build());
r.LowerGraph();
}
......
......@@ -32,13 +32,15 @@ class Int64LoweringTest : public GraphTest {
Int64LoweringTest()
: GraphTest(),
machine_(zone(), MachineRepresentation::kWord32,
MachineOperatorBuilder::Flag::kAllOptionalOps) {
MachineOperatorBuilder::Flag::kAllOptionalOps),
simplified_(zone()) {
value_[0] = 0x1234567890ABCDEF;
value_[1] = 0x1EDCBA098765432F;
value_[2] = 0x1133557799886644;
}
MachineOperatorBuilder* machine() { return &machine_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
void LowerGraph(Node* node, Signature<MachineRepresentation>* signature) {
Node* zero = graph()->NewNode(common()->Int32Constant(0));
......@@ -46,7 +48,8 @@ class Int64LoweringTest : public GraphTest {
graph()->start(), graph()->start());
NodeProperties::MergeControlToEnd(graph(), common(), ret);
Int64Lowering lowering(graph(), machine(), common(), zone(), signature);
Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
signature);
lowering.LowerGraph();
}
......@@ -64,7 +67,7 @@ class Int64LoweringTest : public GraphTest {
Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0);
sig_builder.AddReturn(rep);
Int64Lowering lowering(graph(), machine(), common(), zone(),
Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
sig_builder.Build(), std::move(special_case));
lowering.LowerGraph();
}
......@@ -134,6 +137,7 @@ class Int64LoweringTest : public GraphTest {
private:
MachineOperatorBuilder machine_;
SimplifiedOperatorBuilder simplified_;
int64_t value_[3];
};
......@@ -177,22 +181,29 @@ TEST_F(Int64LoweringTest, Int64Constant) {
start()));
#endif
#define INT64_LOAD_LOWERING(kLoad) \
int32_t base = 0x1234; \
int32_t index = 0x5678; \
\
LowerGraph(graph()->NewNode(machine()->kLoad(MachineType::Int64()), \
Int32Constant(base), Int32Constant(index), \
start(), start()), \
MachineRepresentation::kWord64); \
\
Capture<Node*> high_word_load; \
#define INT64_LOAD_LOWERING(kLoad, param, builder) \
int32_t base = 0x1234; \
int32_t index = 0x5678; \
\
LowerGraph(graph()->NewNode(builder()->kLoad(param), Int32Constant(base), \
Int32Constant(index), start(), start()), \
MachineRepresentation::kWord64); \
\
Capture<Node*> high_word_load; \
LOAD_VERIFY(kLoad)
TEST_F(Int64LoweringTest, Int64Load) { INT64_LOAD_LOWERING(Load); }
TEST_F(Int64LoweringTest, Int64Load) {
INT64_LOAD_LOWERING(Load, MachineType::Int64(), machine);
}
TEST_F(Int64LoweringTest, UnalignedInt64Load) {
INT64_LOAD_LOWERING(UnalignedLoad);
INT64_LOAD_LOWERING(UnalignedLoad, MachineType::Int64(), machine);
}
TEST_F(Int64LoweringTest, Int64LoadFromObject) {
INT64_LOAD_LOWERING(LoadFromObject,
ObjectAccess(MachineType::Int64(), kNoWriteBarrier),
simplified);
}
#if defined(V8_TARGET_LITTLE_ENDIAN)
......@@ -225,7 +236,7 @@ TEST_F(Int64LoweringTest, UnalignedInt64Load) {
start()));
#endif
#define INT64_STORE_LOWERING(kStore, kRep32, kRep64) \
#define INT64_STORE_LOWERING(kStore, kRep32, kRep64, builder) \
int32_t base = 1111; \
int32_t index = 2222; \
int32_t return_value = 0x5555; \
......@@ -233,7 +244,7 @@ TEST_F(Int64LoweringTest, UnalignedInt64Load) {
Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0); \
sig_builder.AddReturn(MachineRepresentation::kWord32); \
\
Node* store = graph()->NewNode(machine()->kStore(kRep64), \
Node* store = graph()->NewNode(builder()->kStore(kRep64), \
Int32Constant(base), Int32Constant(index), \
Int64Constant(value(0)), start(), start()); \
\
......@@ -243,7 +254,7 @@ TEST_F(Int64LoweringTest, UnalignedInt64Load) {
\
NodeProperties::MergeControlToEnd(graph(), common(), ret); \
\
Int64Lowering lowering(graph(), machine(), common(), zone(), \
Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(), \
sig_builder.Build()); \
lowering.LowerGraph(); \
\
......@@ -254,7 +265,7 @@ TEST_F(Int64LoweringTest, Int64Store) {
WriteBarrierKind::kNoWriteBarrier);
const StoreRepresentation rep32(MachineRepresentation::kWord32,
WriteBarrierKind::kNoWriteBarrier);
INT64_STORE_LOWERING(Store, rep32, rep64);
INT64_STORE_LOWERING(Store, rep32, rep64, machine);
}
TEST_F(Int64LoweringTest, Int32Store) {
......@@ -277,7 +288,7 @@ TEST_F(Int64LoweringTest, Int32Store) {
NodeProperties::MergeControlToEnd(graph(), common(), ret);
Int64Lowering lowering(graph(), machine(), common(), zone(),
Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
sig_builder.Build());
lowering.LowerGraph();
......@@ -292,7 +303,13 @@ TEST_F(Int64LoweringTest, Int32Store) {
TEST_F(Int64LoweringTest, Int64UnalignedStore) {
const UnalignedStoreRepresentation rep64(MachineRepresentation::kWord64);
const UnalignedStoreRepresentation rep32(MachineRepresentation::kWord32);
INT64_STORE_LOWERING(UnalignedStore, rep32, rep64);
INT64_STORE_LOWERING(UnalignedStore, rep32, rep64, machine);
}
TEST_F(Int64LoweringTest, Int64StoreToObject) {
const ObjectAccess access64(MachineType::Int64(), kNoWriteBarrier);
const ObjectAccess access32(MachineType::Int32(), kNoWriteBarrier);
INT64_STORE_LOWERING(StoreToObject, access32, access64, simplified);
}
TEST_F(Int64LoweringTest, Int64And) {
......
......@@ -1153,10 +1153,10 @@ LOAD_MATCHER(UnalignedLoad)
LOAD_MATCHER(PoisonedLoad)
LOAD_MATCHER(LoadFromObject)
#define STORE_MATCHER(kStore) \
#define STORE_MATCHER(kStore, representation) \
class Is##kStore##Matcher final : public TestNodeMatcher { \
public: \
Is##kStore##Matcher(const Matcher<kStore##Representation>& rep_matcher, \
Is##kStore##Matcher(const Matcher<representation>& rep_matcher, \
const Matcher<Node*>& base_matcher, \
const Matcher<Node*>& index_matcher, \
const Matcher<Node*>& value_matcher, \
......@@ -1198,9 +1198,8 @@ LOAD_MATCHER(LoadFromObject)
control_node = NodeProperties::GetControlInput(node); \
} \
return (TestNodeMatcher::MatchAndExplain(node, listener) && \
PrintMatchAndExplain( \
OpParameter<kStore##Representation>(node->op()), "rep", \
rep_matcher_, listener) && \
PrintMatchAndExplain(OpParameter<representation>(node->op()), \
"rep", rep_matcher_, listener) && \
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), \
"base", base_matcher_, listener) && \
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), \
......@@ -1214,7 +1213,7 @@ LOAD_MATCHER(LoadFromObject)
} \
\
private: \
const Matcher<kStore##Representation> rep_matcher_; \
const Matcher<representation> rep_matcher_; \
const Matcher<Node*> base_matcher_; \
const Matcher<Node*> index_matcher_; \
const Matcher<Node*> value_matcher_; \
......@@ -1222,8 +1221,9 @@ LOAD_MATCHER(LoadFromObject)
const Matcher<Node*> control_matcher_; \
};
STORE_MATCHER(Store)
STORE_MATCHER(UnalignedStore)
STORE_MATCHER(Store, StoreRepresentation)
STORE_MATCHER(UnalignedStore, UnalignedStoreRepresentation)
STORE_MATCHER(StoreToObject, ObjectAccess)
class IsStackSlotMatcher final : public TestNodeMatcher {
public:
......@@ -2117,6 +2117,17 @@ Matcher<Node*> IsUnalignedStore(
control_matcher));
}
Matcher<Node*> IsStoreToObject(const Matcher<ObjectAccess>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
return MakeMatcher(new IsStoreToObjectMatcher(
rep_matcher, base_matcher, index_matcher, value_matcher, effect_matcher,
control_matcher));
}
Matcher<Node*> IsStackSlot(
const Matcher<StackSlotRepresentation>& rep_matcher) {
return MakeMatcher(new IsStackSlotMatcher(rep_matcher));
......
......@@ -354,6 +354,12 @@ Matcher<Node*> IsUnalignedStore(
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher, const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStoreToObject(const Matcher<ObjectAccess>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStackSlot(const Matcher<StackSlotRepresentation>& rep_matcher);
Matcher<Node*> IsWord32Popcnt(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment