Commit 174f0e95 authored by Manos Koukoutos's avatar Manos Koukoutos Committed by Commit Bot

[wasm] Use object operators in wasm compiler, enable optimizations

This CL enables full csa optimization for wasm code. To take advantage
of csa load elimination, it switches from Load/Store to LoadFromObject/
StoreToObject operators in the wasm compiler (where possible).

Bug: v8:11510
Change-Id: Ibecd8ba81e89a76553b12ad2671ecad520e9e066
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2727407Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarZhi An Ng <zhin@chromium.org>
Commit-Queue: Manos Koukoutos <manoskouk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73268}
parent ee34ce48
...@@ -66,6 +66,12 @@ Reduction CsaLoadElimination::Reduce(Node* node) { ...@@ -66,6 +66,12 @@ Reduction CsaLoadElimination::Reduce(Node* node) {
namespace CsaLoadEliminationHelpers { namespace CsaLoadEliminationHelpers {
bool IsCompatible(MachineRepresentation r1, MachineRepresentation r2) { bool IsCompatible(MachineRepresentation r1, MachineRepresentation r2) {
// TODO(manoskouk): Temporary patch-up to get wasm i8 and i16 working until we
// properly fix the compatibility logic.
if (ElementSizeInBytes(r1) <
ElementSizeInBytes(MachineRepresentation::kWord32)) {
return false;
}
if (r1 == r2) return true; if (r1 == r2) return true;
return IsAnyTagged(r1) && IsAnyTagged(r2); return IsAnyTagged(r1) && IsAnyTagged(r2);
} }
......
...@@ -23,13 +23,14 @@ namespace compiler { ...@@ -23,13 +23,14 @@ namespace compiler {
Int64Lowering::Int64Lowering( Int64Lowering::Int64Lowering(
Graph* graph, MachineOperatorBuilder* machine, Graph* graph, MachineOperatorBuilder* machine,
CommonOperatorBuilder* common, Zone* zone, CommonOperatorBuilder* common, SimplifiedOperatorBuilder* simplified,
Signature<MachineRepresentation>* signature, Zone* zone, Signature<MachineRepresentation>* signature,
std::unique_ptr<Int64LoweringSpecialCase> special_case) std::unique_ptr<Int64LoweringSpecialCase> special_case)
: zone_(zone), : zone_(zone),
graph_(graph), graph_(graph),
machine_(machine), machine_(machine),
common_(common), common_(common),
simplified_(simplified),
state_(graph, 3), state_(graph, 3),
stack_(zone), stack_(zone),
replacements_(nullptr), replacements_(nullptr),
...@@ -161,6 +162,75 @@ void Int64Lowering::GetIndexNodes(Node* index, Node** index_low, ...@@ -161,6 +162,75 @@ void Int64Lowering::GetIndexNodes(Node* index, Node** index_low,
#endif #endif
} }
void Int64Lowering::LowerLoadOperator(Node* node, MachineRepresentation rep,
const Operator* load_op) {
if (rep == MachineRepresentation::kWord64) {
LowerMemoryBaseAndIndex(node);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* index_low;
Node* index_high;
GetIndexNodes(index, &index_low, &index_high);
Node* high_node;
if (node->InputCount() > 2) {
Node* effect_high = node->InputAt(2);
Node* control_high = node->InputAt(3);
high_node = graph()->NewNode(load_op, base, index_high, effect_high,
control_high);
// change the effect change from old_node --> old_effect to
// old_node --> high_node --> old_effect.
node->ReplaceInput(2, high_node);
} else {
high_node = graph()->NewNode(load_op, base, index_high);
}
node->ReplaceInput(1, index_low);
NodeProperties::ChangeOp(node, load_op);
ReplaceNode(node, node, high_node);
} else {
DefaultLowering(node);
}
}
void Int64Lowering::LowerStoreOperator(Node* node, MachineRepresentation rep,
const Operator* store_op) {
if (rep == MachineRepresentation::kWord64) {
// We change the original store node to store the low word, and create
// a new store node to store the high word. The effect and control edges
// are copied from the original store to the new store node, the effect
// edge of the original store is redirected to the new store.
LowerMemoryBaseAndIndex(node);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* index_low;
Node* index_high;
GetIndexNodes(index, &index_low, &index_high);
Node* value = node->InputAt(2);
DCHECK(HasReplacementLow(value));
DCHECK(HasReplacementHigh(value));
Node* high_node;
if (node->InputCount() > 3) {
Node* effect_high = node->InputAt(3);
Node* control_high = node->InputAt(4);
high_node = graph()->NewNode(store_op, base, index_high,
GetReplacementHigh(value), effect_high,
control_high);
node->ReplaceInput(3, high_node);
} else {
high_node = graph()->NewNode(store_op, base, index_high,
GetReplacementHigh(value));
}
node->ReplaceInput(1, index_low);
node->ReplaceInput(2, GetReplacementLow(value));
NodeProperties::ChangeOp(node, store_op);
ReplaceNode(node, node, high_node);
} else {
DefaultLowering(node, true);
}
}
void Int64Lowering::LowerNode(Node* node) { void Int64Lowering::LowerNode(Node* node) {
switch (node->opcode()) { switch (node->opcode()) {
case IrOpcode::kInt64Constant: { case IrOpcode::kInt64Constant: {
...@@ -172,104 +242,47 @@ void Int64Lowering::LowerNode(Node* node) { ...@@ -172,104 +242,47 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNode(node, low_node, high_node); ReplaceNode(node, low_node, high_node);
break; break;
} }
case IrOpcode::kLoad: case IrOpcode::kLoad: {
MachineRepresentation rep =
LoadRepresentationOf(node->op()).representation();
LowerLoadOperator(node, rep, machine()->Load(MachineType::Int32()));
break;
}
case IrOpcode::kUnalignedLoad: { case IrOpcode::kUnalignedLoad: {
MachineRepresentation rep = MachineRepresentation rep =
LoadRepresentationOf(node->op()).representation(); LoadRepresentationOf(node->op()).representation();
LowerLoadOperator(node, rep,
if (rep == MachineRepresentation::kWord64) { machine()->UnalignedLoad(MachineType::Int32()));
LowerMemoryBaseAndIndex(node); break;
Node* base = node->InputAt(0); }
Node* index = node->InputAt(1); case IrOpcode::kLoadFromObject: {
Node* index_low; ObjectAccess access = ObjectAccessOf(node->op());
Node* index_high; LowerLoadOperator(node, access.machine_type.representation(),
GetIndexNodes(index, &index_low, &index_high); simplified()->LoadFromObject(ObjectAccess(
const Operator* load_op; MachineType::Int32(), access.write_barrier_kind)));
break;
if (node->opcode() == IrOpcode::kLoad) { }
load_op = machine()->Load(MachineType::Int32()); case IrOpcode::kStore: {
} else { StoreRepresentation store_rep = StoreRepresentationOf(node->op());
DCHECK_EQ(IrOpcode::kUnalignedLoad, node->opcode()); LowerStoreOperator(
load_op = machine()->UnalignedLoad(MachineType::Int32()); node, store_rep.representation(),
} machine()->Store(StoreRepresentation(
MachineRepresentation::kWord32, store_rep.write_barrier_kind())));
Node* high_node;
if (node->InputCount() > 2) {
Node* effect_high = node->InputAt(2);
Node* control_high = node->InputAt(3);
high_node = graph()->NewNode(load_op, base, index_high, effect_high,
control_high);
// change the effect change from old_node --> old_effect to
// old_node --> high_node --> old_effect.
node->ReplaceInput(2, high_node);
} else {
high_node = graph()->NewNode(load_op, base, index_high);
}
node->ReplaceInput(1, index_low);
NodeProperties::ChangeOp(node, load_op);
ReplaceNode(node, node, high_node);
} else {
DefaultLowering(node);
}
break; break;
} }
case IrOpcode::kStore:
case IrOpcode::kUnalignedStore: { case IrOpcode::kUnalignedStore: {
MachineRepresentation rep; UnalignedStoreRepresentation store_rep =
if (node->opcode() == IrOpcode::kStore) { UnalignedStoreRepresentationOf(node->op());
rep = StoreRepresentationOf(node->op()).representation(); LowerStoreOperator(
} else { node, store_rep,
DCHECK_EQ(IrOpcode::kUnalignedStore, node->opcode()); machine()->UnalignedStore(MachineRepresentation::kWord32));
rep = UnalignedStoreRepresentationOf(node->op()); break;
} }
case IrOpcode::kStoreToObject: {
if (rep == MachineRepresentation::kWord64) { ObjectAccess access = ObjectAccessOf(node->op());
// We change the original store node to store the low word, and create LowerStoreOperator(node, access.machine_type.representation(),
// a new store node to store the high word. The effect and control edges simplified()->StoreToObject(ObjectAccess(
// are copied from the original store to the new store node, the effect MachineType::Int32(), access.write_barrier_kind)));
// edge of the original store is redirected to the new store.
LowerMemoryBaseAndIndex(node);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* index_low;
Node* index_high;
GetIndexNodes(index, &index_low, &index_high);
Node* value = node->InputAt(2);
DCHECK(HasReplacementLow(value));
DCHECK(HasReplacementHigh(value));
const Operator* store_op;
if (node->opcode() == IrOpcode::kStore) {
WriteBarrierKind write_barrier_kind =
StoreRepresentationOf(node->op()).write_barrier_kind();
store_op = machine()->Store(StoreRepresentation(
MachineRepresentation::kWord32, write_barrier_kind));
} else {
DCHECK_EQ(IrOpcode::kUnalignedStore, node->opcode());
store_op = machine()->UnalignedStore(MachineRepresentation::kWord32);
}
Node* high_node;
if (node->InputCount() > 3) {
Node* effect_high = node->InputAt(3);
Node* control_high = node->InputAt(4);
high_node = graph()->NewNode(store_op, base, index_high,
GetReplacementHigh(value), effect_high,
control_high);
node->ReplaceInput(3, high_node);
} else {
high_node = graph()->NewNode(store_op, base, index_high,
GetReplacementHigh(value));
}
node->ReplaceInput(1, index_low);
node->ReplaceInput(2, GetReplacementLow(value));
NodeProperties::ChangeOp(node, store_op);
ReplaceNode(node, node, high_node);
} else {
DefaultLowering(node, true);
}
break; break;
} }
case IrOpcode::kStart: { case IrOpcode::kStart: {
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "src/compiler/graph.h" #include "src/compiler/graph.h"
#include "src/compiler/machine-operator.h" #include "src/compiler/machine-operator.h"
#include "src/compiler/node-marker.h" #include "src/compiler/node-marker.h"
#include "src/compiler/simplified-operator.h"
#include "src/zone/zone-containers.h" #include "src/zone/zone-containers.h"
namespace v8 { namespace v8 {
...@@ -33,8 +34,8 @@ class V8_EXPORT_PRIVATE Int64Lowering { ...@@ -33,8 +34,8 @@ class V8_EXPORT_PRIVATE Int64Lowering {
public: public:
Int64Lowering( Int64Lowering(
Graph* graph, MachineOperatorBuilder* machine, Graph* graph, MachineOperatorBuilder* machine,
CommonOperatorBuilder* common, Zone* zone, CommonOperatorBuilder* common, SimplifiedOperatorBuilder* simplified_,
Signature<MachineRepresentation>* signature, Zone* zone, Signature<MachineRepresentation>* signature,
std::unique_ptr<Int64LoweringSpecialCase> special_case = nullptr); std::unique_ptr<Int64LoweringSpecialCase> special_case = nullptr);
void LowerGraph(); void LowerGraph();
...@@ -54,6 +55,7 @@ class V8_EXPORT_PRIVATE Int64Lowering { ...@@ -54,6 +55,7 @@ class V8_EXPORT_PRIVATE Int64Lowering {
Graph* graph() const { return graph_; } Graph* graph() const { return graph_; }
MachineOperatorBuilder* machine() const { return machine_; } MachineOperatorBuilder* machine() const { return machine_; }
CommonOperatorBuilder* common() const { return common_; } CommonOperatorBuilder* common() const { return common_; }
SimplifiedOperatorBuilder* simplified() const { return simplified_; }
Signature<MachineRepresentation>* signature() const { return signature_; } Signature<MachineRepresentation>* signature() const { return signature_; }
void PushNode(Node* node); void PushNode(Node* node);
...@@ -63,6 +65,10 @@ class V8_EXPORT_PRIVATE Int64Lowering { ...@@ -63,6 +65,10 @@ class V8_EXPORT_PRIVATE Int64Lowering {
const Operator* unsigned_op); const Operator* unsigned_op);
void LowerWord64AtomicBinop(Node* node, const Operator* op); void LowerWord64AtomicBinop(Node* node, const Operator* op);
void LowerWord64AtomicNarrowOp(Node* node, const Operator* op); void LowerWord64AtomicNarrowOp(Node* node, const Operator* op);
void LowerLoadOperator(Node* node, MachineRepresentation rep,
const Operator* load_op);
void LowerStoreOperator(Node* node, MachineRepresentation rep,
const Operator* store_op);
const CallDescriptor* LowerCallDescriptor( const CallDescriptor* LowerCallDescriptor(
const CallDescriptor* call_descriptor); const CallDescriptor* call_descriptor);
...@@ -86,6 +92,7 @@ class V8_EXPORT_PRIVATE Int64Lowering { ...@@ -86,6 +92,7 @@ class V8_EXPORT_PRIVATE Int64Lowering {
Graph* const graph_; Graph* const graph_;
MachineOperatorBuilder* machine_; MachineOperatorBuilder* machine_;
CommonOperatorBuilder* common_; CommonOperatorBuilder* common_;
SimplifiedOperatorBuilder* simplified_;
NodeMarker<State> state_; NodeMarker<State> state_;
ZoneDeque<NodeState> stack_; ZoneDeque<NodeState> stack_;
Replacement* replacements_; Replacement* replacements_;
......
...@@ -80,9 +80,10 @@ void UnrollLoop(Node* loop_node, ZoneUnorderedSet<Node*>* loop, uint32_t depth, ...@@ -80,9 +80,10 @@ void UnrollLoop(Node* loop_node, ZoneUnorderedSet<Node*>* loop, uint32_t depth,
// {use} (stack check effect that we need to replace) // {use} (stack check effect that we need to replace)
DCHECK_EQ(use->InputAt(1)->opcode(), IrOpcode::kCall); DCHECK_EQ(use->InputAt(1)->opcode(), IrOpcode::kCall);
DCHECK_EQ(use->InputAt(1)->InputAt(1), stack_check); DCHECK_EQ(use->InputAt(1)->InputAt(1), stack_check);
DCHECK_EQ(stack_check->InputAt(1)->opcode(), IrOpcode::kLoad); DCHECK_EQ(stack_check->InputAt(1)->opcode(),
IrOpcode::kLoadFromObject);
DCHECK_EQ(stack_check->InputAt(1)->InputAt(2)->opcode(), DCHECK_EQ(stack_check->InputAt(1)->InputAt(2)->opcode(),
IrOpcode::kLoad); IrOpcode::kLoadFromObject);
Node* replacing_effect = Node* replacing_effect =
stack_check->InputAt(1)->InputAt(2)->InputAt(2); stack_check->InputAt(1)->InputAt(2)->InputAt(2);
FOREACH_COPY_INDEX(i) { FOREACH_COPY_INDEX(i) {
......
...@@ -292,7 +292,13 @@ Reduction MemoryLowering::ReduceAllocateRaw( ...@@ -292,7 +292,13 @@ Reduction MemoryLowering::ReduceAllocateRaw(
Reduction MemoryLowering::ReduceLoadFromObject(Node* node) { Reduction MemoryLowering::ReduceLoadFromObject(Node* node) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode()); DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op()); ObjectAccess const& access = ObjectAccessOf(node->op());
NodeProperties::ChangeOp(node, machine()->Load(access.machine_type)); MachineRepresentation rep = access.machine_type.representation();
const Operator* load_op = ElementSizeInBytes(rep) > kTaggedSize &&
!machine()->UnalignedLoadSupported(
access.machine_type.representation())
? machine()->UnalignedLoad(access.machine_type)
: machine()->Load(access.machine_type);
NodeProperties::ChangeOp(node, load_op);
return Changed(node); return Changed(node);
} }
...@@ -387,9 +393,13 @@ Reduction MemoryLowering::ReduceStoreToObject(Node* node, ...@@ -387,9 +393,13 @@ Reduction MemoryLowering::ReduceStoreToObject(Node* node,
Node* value = node->InputAt(2); Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind( WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind); node, object, value, state, access.write_barrier_kind);
NodeProperties::ChangeOp( MachineRepresentation rep = access.machine_type.representation();
node, machine()->Store(StoreRepresentation( StoreRepresentation store_rep(rep, write_barrier_kind);
access.machine_type.representation(), write_barrier_kind))); const Operator* store_op = ElementSizeInBytes(rep) > kTaggedSize &&
!machine()->UnalignedStoreSupported(rep)
? machine()->UnalignedStore(rep)
: machine()->Store(store_rep);
NodeProperties::ChangeOp(node, store_op);
return Changed(node); return Changed(node);
} }
......
...@@ -2564,6 +2564,8 @@ CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl( ...@@ -2564,6 +2564,8 @@ CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl(
json_of << "{\"function\":\"" << info_.GetDebugName().get() json_of << "{\"function\":\"" << info_.GetDebugName().get()
<< "\", \"source\":\"\",\n\"phases\":["; << "\", \"source\":\"\",\n\"phases\":[";
} }
pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true);
pipeline_.Run<MemoryOptimizationPhase>();
pipeline_.ComputeScheduledGraph(); pipeline_.ComputeScheduledGraph();
if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) { if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) {
return CompilationJob::SUCCEEDED; return CompilationJob::SUCCEEDED;
...@@ -3127,6 +3129,10 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub( ...@@ -3127,6 +3129,10 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
} }
pipeline.RunPrintAndVerify("V8.WasmNativeStubMachineCode", true); pipeline.RunPrintAndVerify("V8.WasmNativeStubMachineCode", true);
pipeline.Run<MemoryOptimizationPhase>();
pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
pipeline.ComputeScheduledGraph(); pipeline.ComputeScheduledGraph();
Linkage linkage(call_descriptor); Linkage linkage(call_descriptor);
...@@ -3214,32 +3220,26 @@ void Pipeline::GenerateCodeForWasmFunction( ...@@ -3214,32 +3220,26 @@ void Pipeline::GenerateCodeForWasmFunction(
pipeline.Run<WasmLoopUnrollingPhase>(loop_info); pipeline.Run<WasmLoopUnrollingPhase>(loop_info);
pipeline.RunPrintAndVerify("V8.WasmLoopUnrolling", true); pipeline.RunPrintAndVerify("V8.WasmLoopUnrolling", true);
} }
const bool is_asm_js = is_asmjs_module(module);
if (FLAG_wasm_opt || is_asm_js) {
pipeline.Run<CsaEarlyOptimizationPhase>(is_asm_js);
pipeline.RunPrintAndVerify(CsaEarlyOptimizationPhase::phase_name(), true);
}
pipeline.Run<MemoryOptimizationPhase>();
pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
data.BeginPhaseKind("V8.WasmOptimization"); data.BeginPhaseKind("V8.WasmOptimization");
const bool is_asm_js = is_asmjs_module(module);
if (FLAG_turbo_splitting && !is_asm_js) { if (FLAG_turbo_splitting && !is_asm_js) {
data.info()->set_splitting(); data.info()->set_splitting();
} }
if (FLAG_wasm_opt || is_asm_js) { if (FLAG_wasm_opt || is_asm_js) {
PipelineRunScope scope(&data, "V8.WasmFullOptimization", pipeline.Run<CsaOptimizationPhase>(is_asm_js);
RuntimeCallCounterId::kOptimizeWasmFullOptimization); pipeline.RunPrintAndVerify(CsaOptimizationPhase::phase_name(), true);
GraphReducer graph_reducer( pipeline.Run<DecompressionOptimizationPhase>();
scope.zone(), data.graph(), &data.info()->tick_counter(), data.broker(), pipeline.RunPrintAndVerify(DecompressionOptimizationPhase::phase_name(),
data.mcgraph()->Dead(), data.observe_node_manager()); true);
DeadCodeElimination dead_code_elimination(&graph_reducer, data.graph(),
data.common(), scope.zone());
ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
const bool allow_signalling_nan = is_asm_js;
MachineOperatorReducer machine_reducer(&graph_reducer, data.mcgraph(),
allow_signalling_nan);
CommonOperatorReducer common_reducer(&graph_reducer, data.graph(),
data.broker(), data.common(),
data.machine(), scope.zone());
AddReducer(&data, &graph_reducer, &dead_code_elimination);
AddReducer(&data, &graph_reducer, &machine_reducer);
AddReducer(&data, &graph_reducer, &common_reducer);
AddReducer(&data, &graph_reducer, &value_numbering);
graph_reducer.ReduceGraph();
} else { } else {
pipeline.Run<WasmBaseOptimizationPhase>(); pipeline.Run<WasmBaseOptimizationPhase>();
pipeline.RunPrintAndVerify(WasmBaseOptimizationPhase::phase_name(), true); pipeline.RunPrintAndVerify(WasmBaseOptimizationPhase::phase_name(), true);
......
...@@ -60,8 +60,10 @@ int GetMaskForShift(Node* node) { ...@@ -60,8 +60,10 @@ int GetMaskForShift(Node* node) {
} // anonymous namespace } // anonymous namespace
SimdScalarLowering::SimdScalarLowering( SimdScalarLowering::SimdScalarLowering(
MachineGraph* mcgraph, Signature<MachineRepresentation>* signature) MachineGraph* mcgraph, SimplifiedOperatorBuilder* simplified,
Signature<MachineRepresentation>* signature)
: mcgraph_(mcgraph), : mcgraph_(mcgraph),
simplified_(simplified),
state_(mcgraph->graph(), 3), state_(mcgraph->graph(), 3),
stack_(mcgraph_->zone()), stack_(mcgraph_->zone()),
replacements_(nullptr), replacements_(nullptr),
...@@ -541,12 +543,19 @@ void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices, ...@@ -541,12 +543,19 @@ void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices,
} }
void SimdScalarLowering::LowerLoadOp(Node* node, SimdType type) { void SimdScalarLowering::LowerLoadOp(Node* node, SimdType type) {
MachineRepresentation rep = LoadRepresentationOf(node->op()).representation(); MachineRepresentation rep =
node->opcode() == IrOpcode::kLoadFromObject
? ObjectAccessOf(node->op()).machine_type.representation()
: LoadRepresentationOf(node->op()).representation();
const Operator* load_op; const Operator* load_op;
switch (node->opcode()) { switch (node->opcode()) {
case IrOpcode::kLoad: case IrOpcode::kLoad:
load_op = machine()->Load(MachineTypeFrom(type)); load_op = machine()->Load(MachineTypeFrom(type));
break; break;
case IrOpcode::kLoadFromObject:
load_op = simplified()->LoadFromObject(
ObjectAccess(MachineTypeFrom(type), kNoWriteBarrier));
break;
case IrOpcode::kUnalignedLoad: case IrOpcode::kUnalignedLoad:
load_op = machine()->UnalignedLoad(MachineTypeFrom(type)); load_op = machine()->UnalignedLoad(MachineTypeFrom(type));
break; break;
...@@ -732,6 +741,14 @@ void SimdScalarLowering::LowerStoreOp(Node* node) { ...@@ -732,6 +741,14 @@ void SimdScalarLowering::LowerStoreOp(Node* node) {
MachineTypeFrom(rep_type).representation(), write_barrier_kind)); MachineTypeFrom(rep_type).representation(), write_barrier_kind));
break; break;
} }
case IrOpcode::kStoreToObject: {
rep = ObjectAccessOf(node->op()).machine_type.representation();
WriteBarrierKind write_barrier_kind =
ObjectAccessOf(node->op()).write_barrier_kind;
store_op = simplified()->StoreToObject(
ObjectAccess(MachineTypeFrom(rep_type), write_barrier_kind));
break;
}
case IrOpcode::kUnalignedStore: { case IrOpcode::kUnalignedStore: {
rep = UnalignedStoreRepresentationOf(node->op()); rep = UnalignedStoreRepresentationOf(node->op());
store_op = store_op =
...@@ -1452,6 +1469,7 @@ void SimdScalarLowering::LowerNode(Node* node) { ...@@ -1452,6 +1469,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
break; break;
} }
case IrOpcode::kLoad: case IrOpcode::kLoad:
case IrOpcode::kLoadFromObject:
case IrOpcode::kUnalignedLoad: case IrOpcode::kUnalignedLoad:
case IrOpcode::kProtectedLoad: { case IrOpcode::kProtectedLoad: {
LowerLoadOp(node, rep_type); LowerLoadOp(node, rep_type);
...@@ -1462,6 +1480,7 @@ void SimdScalarLowering::LowerNode(Node* node) { ...@@ -1462,6 +1480,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
break; break;
} }
case IrOpcode::kStore: case IrOpcode::kStore:
case IrOpcode::kStoreToObject:
case IrOpcode::kUnalignedStore: case IrOpcode::kUnalignedStore:
case IrOpcode::kProtectedStore: { case IrOpcode::kProtectedStore: {
LowerStoreOp(node); LowerStoreOp(node);
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "src/compiler/machine-graph.h" #include "src/compiler/machine-graph.h"
#include "src/compiler/machine-operator.h" #include "src/compiler/machine-operator.h"
#include "src/compiler/node-marker.h" #include "src/compiler/node-marker.h"
#include "src/compiler/simplified-operator.h"
#include "src/zone/zone-containers.h" #include "src/zone/zone-containers.h"
namespace v8 { namespace v8 {
...@@ -24,6 +25,7 @@ namespace compiler { ...@@ -24,6 +25,7 @@ namespace compiler {
class SimdScalarLowering { class SimdScalarLowering {
public: public:
SimdScalarLowering(MachineGraph* mcgraph, SimdScalarLowering(MachineGraph* mcgraph,
SimplifiedOperatorBuilder* simplified,
Signature<MachineRepresentation>* signature); Signature<MachineRepresentation>* signature);
void LowerGraph(); void LowerGraph();
...@@ -64,6 +66,7 @@ class SimdScalarLowering { ...@@ -64,6 +66,7 @@ class SimdScalarLowering {
Graph* graph() const { return mcgraph_->graph(); } Graph* graph() const { return mcgraph_->graph(); }
MachineOperatorBuilder* machine() const { return mcgraph_->machine(); } MachineOperatorBuilder* machine() const { return mcgraph_->machine(); }
CommonOperatorBuilder* common() const { return mcgraph_->common(); } CommonOperatorBuilder* common() const { return mcgraph_->common(); }
SimplifiedOperatorBuilder* simplified() const { return simplified_; }
Signature<MachineRepresentation>* signature() const { return signature_; } Signature<MachineRepresentation>* signature() const { return signature_; }
void LowerNode(Node* node); void LowerNode(Node* node);
...@@ -131,6 +134,7 @@ class SimdScalarLowering { ...@@ -131,6 +134,7 @@ class SimdScalarLowering {
Node* ExtendNode(Node* node, SimdType rep_type, bool is_signed); Node* ExtendNode(Node* node, SimdType rep_type, bool is_signed);
MachineGraph* const mcgraph_; MachineGraph* const mcgraph_;
SimplifiedOperatorBuilder* const simplified_;
NodeMarker<State> state_; NodeMarker<State> state_;
ZoneDeque<NodeState> stack_; ZoneDeque<NodeState> stack_;
Replacement* replacements_; Replacement* replacements_;
......
...@@ -79,52 +79,11 @@ MachineType assert_size(int expected_size, MachineType type) { ...@@ -79,52 +79,11 @@ MachineType assert_size(int expected_size, MachineType type) {
(WasmInstanceObject::k##name##OffsetEnd - \ (WasmInstanceObject::k##name##OffsetEnd - \
WasmInstanceObject::k##name##Offset + 1) // NOLINT(whitespace/indent) WasmInstanceObject::k##name##Offset + 1) // NOLINT(whitespace/indent)
#define WASM_INSTANCE_OBJECT_OFFSET(name) \ #define LOAD_INSTANCE_FIELD(name, type) \
wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset) gasm_->LoadFromObject( \
assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type), \
#define LOAD_INSTANCE_FIELD(name, type) \ instance_node_.get(), \
gasm_->Load(assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type), \ wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset))
instance_node_.get(), WASM_INSTANCE_OBJECT_OFFSET(name))
#define LOAD_FULL_POINTER(base_pointer, byte_offset) \
gasm_->Load(MachineType::Pointer(), base_pointer, byte_offset)
#define LOAD_TAGGED_POINTER(base_pointer, byte_offset) \
gasm_->Load(MachineType::TaggedPointer(), base_pointer, byte_offset)
#define LOAD_TAGGED_ANY(base_pointer, byte_offset) \
gasm_->Load(MachineType::AnyTagged(), base_pointer, byte_offset)
#define LOAD_FIXED_ARRAY_SLOT(array_node, index, type) \
gasm_->Load(type, array_node, \
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index))
#define LOAD_FIXED_ARRAY_SLOT_SMI(array_node, index) \
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedSigned())
#define LOAD_FIXED_ARRAY_SLOT_PTR(array_node, index) \
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedPointer())
#define LOAD_FIXED_ARRAY_SLOT_ANY(array_node, index) \
LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::AnyTagged())
#define STORE_RAW(base, offset, val, rep, barrier) \
STORE_RAW_NODE_OFFSET(base, Int32Constant(offset), val, rep, barrier)
#define STORE_RAW_NODE_OFFSET(base, node_offset, val, rep, barrier) \
gasm_->Store(StoreRepresentation(rep, barrier), base, node_offset, val)
// This can be used to store tagged Smi values only.
#define STORE_FIXED_ARRAY_SLOT_SMI(array_node, index, value) \
STORE_RAW(array_node, \
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index), value, \
MachineRepresentation::kTaggedSigned, kNoWriteBarrier)
// This can be used to store any tagged (Smi and HeapObject) value.
#define STORE_FIXED_ARRAY_SLOT_ANY(array_node, index, value) \
STORE_RAW(array_node, \
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index), value, \
MachineRepresentation::kTagged, kFullWriteBarrier)
bool ContainsSimd(const wasm::FunctionSig* sig) { bool ContainsSimd(const wasm::FunctionSig* sig) {
for (auto type : sig->all()) { for (auto type : sig->all()) {
...@@ -173,6 +132,13 @@ CallDescriptor* GetBuiltinCallDescriptor(Builtins::Name name, Zone* zone, ...@@ -173,6 +132,13 @@ CallDescriptor* GetBuiltinCallDescriptor(Builtins::Name name, Zone* zone,
Operator::kNoProperties, // properties Operator::kNoProperties, // properties
stub_mode); // stub call mode stub_mode); // stub call mode
} }
ObjectAccess ObjectAccessForGCStores(wasm::ValueType type) {
return ObjectAccess(
MachineType::TypeForRepresentation(type.machine_representation(),
!type.is_packed()),
type.is_reference() ? kFullWriteBarrier : kNoWriteBarrier);
}
} // namespace } // namespace
JSWasmCallData::JSWasmCallData(const wasm::FunctionSig* wasm_signature) JSWasmCallData::JSWasmCallData(const wasm::FunctionSig* wasm_signature)
...@@ -189,7 +155,7 @@ JSWasmCallData::JSWasmCallData(const wasm::FunctionSig* wasm_signature) ...@@ -189,7 +155,7 @@ JSWasmCallData::JSWasmCallData(const wasm::FunctionSig* wasm_signature)
class WasmGraphAssembler : public GraphAssembler { class WasmGraphAssembler : public GraphAssembler {
public: public:
WasmGraphAssembler(MachineGraph* mcgraph, Zone* zone) WasmGraphAssembler(MachineGraph* mcgraph, Zone* zone)
: GraphAssembler(mcgraph, zone) {} : GraphAssembler(mcgraph, zone), simplified_(zone) {}
template <typename... Args> template <typename... Args>
Node* CallRuntimeStub(wasm::WasmCode::RuntimeStubId stub_id, Args*... args) { Node* CallRuntimeStub(wasm::WasmCode::RuntimeStubId stub_id, Args*... args) {
...@@ -259,6 +225,39 @@ class WasmGraphAssembler : public GraphAssembler { ...@@ -259,6 +225,39 @@ class WasmGraphAssembler : public GraphAssembler {
// Rule of thumb: if access to a given field in an object is required in // Rule of thumb: if access to a given field in an object is required in
// at least two places, put a helper function here. // at least two places, put a helper function here.
Node* LoadFromObject(MachineType type, Node* base, Node* offset) {
return AddNode(graph()->NewNode(
simplified_.LoadFromObject(ObjectAccess(type, kNoWriteBarrier)), base,
offset, effect(), control()));
}
Node* LoadFromObject(MachineType type, Node* base, int offset) {
return LoadFromObject(type, base, IntPtrConstant(offset));
}
Node* LoadFullPointer(Node* base, int offset) {
return LoadFromObject(MachineType::Pointer(), base, offset);
}
Node* LoadTaggedPointer(Node* base, int offset) {
return LoadFromObject(MachineType::TaggedPointer(), base, offset);
}
Node* LoadAnyTagged(Node* base, int offset) {
return LoadFromObject(MachineType::AnyTagged(), base, offset);
}
Node* StoreToObject(ObjectAccess access, Node* base, Node* offset,
Node* value) {
return AddNode(graph()->NewNode(simplified_.StoreToObject(access), base,
offset, value, effect(), control()));
}
Node* StoreToObject(ObjectAccess access, Node* base, int offset,
Node* value) {
return StoreToObject(access, base, IntPtrConstant(offset), value);
}
Node* IsI31(Node* object) { Node* IsI31(Node* object) {
if (COMPRESS_POINTERS_BOOL) { if (COMPRESS_POINTERS_BOOL) {
return Word32Equal(Word32And(object, Int32Constant(kSmiTagMask)), return Word32Equal(Word32And(object, Int32Constant(kSmiTagMask)),
...@@ -272,124 +271,129 @@ class WasmGraphAssembler : public GraphAssembler { ...@@ -272,124 +271,129 @@ class WasmGraphAssembler : public GraphAssembler {
// Maps and their contents. // Maps and their contents.
Node* LoadMap(Node* heap_object) { Node* LoadMap(Node* heap_object) {
return Load(MachineType::TaggedPointer(), heap_object, return LoadFromObject(MachineType::TaggedPointer(), heap_object,
wasm::ObjectAccess::ToTagged(HeapObject::kMapOffset)); wasm::ObjectAccess::ToTagged(HeapObject::kMapOffset));
} }
Node* LoadInstanceType(Node* map) { Node* LoadInstanceType(Node* map) {
return Load(MachineType::Uint16(), map, return LoadFromObject(
wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset)); MachineType::Uint16(), map,
wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset));
} }
Node* LoadWasmTypeInfo(Node* map) { Node* LoadWasmTypeInfo(Node* map) {
int offset = Map::kConstructorOrBackPointerOrNativeContextOffset; int offset = Map::kConstructorOrBackPointerOrNativeContextOffset;
return Load(MachineType::TaggedPointer(), map, return LoadFromObject(MachineType::TaggedPointer(), map,
wasm::ObjectAccess::ToTagged(offset)); wasm::ObjectAccess::ToTagged(offset));
} }
Node* LoadSupertypes(Node* wasm_type_info) { Node* LoadSupertypes(Node* wasm_type_info) {
return Load(MachineType::TaggedPointer(), wasm_type_info, return LoadFromObject(
wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset)); MachineType::TaggedPointer(), wasm_type_info,
wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset));
} }
// FixedArrays. // FixedArrays.
Node* LoadFixedArrayLengthAsSmi(Node* fixed_array) { Node* LoadFixedArrayLengthAsSmi(Node* fixed_array) {
return Load(MachineType::TaggedSigned(), fixed_array, return LoadFromObject(
wasm::ObjectAccess::ToTagged(FixedArray::kLengthOffset)); MachineType::TaggedSigned(), fixed_array,
} wasm::ObjectAccess::ToTagged(FixedArray::kLengthOffset));
Node* LoadFixedArrayElement(Node* fixed_array, int index,
MachineType type = MachineType::AnyTagged()) {
return Load(type, fixed_array,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index));
} }
Node* LoadFixedArrayElement(Node* fixed_array, Node* index_intptr, Node* LoadFixedArrayElement(Node* fixed_array, Node* index_intptr,
MachineType type = MachineType::AnyTagged()) { MachineType type = MachineType::AnyTagged()) {
Node* offset = IntAdd( Node* offset = IntAdd(
IntMul(index_intptr, IntPtrConstant(kTaggedSize)), IntMul(index_intptr, IntPtrConstant(kTaggedSize)),
IntPtrConstant(wasm::ObjectAccess::ToTagged(FixedArray::kHeaderSize))); IntPtrConstant(wasm::ObjectAccess::ToTagged(FixedArray::kHeaderSize)));
return Load(type, fixed_array, offset); return LoadFromObject(type, fixed_array, offset);
}
Node* LoadFixedArrayElement(Node* array, int index, MachineType type) {
return LoadFromObject(
type, array,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index));
}
Node* LoadFixedArrayElementSmi(Node* array, int index) {
return LoadFixedArrayElement(array, index, MachineType::TaggedSigned());
}
Node* LoadFixedArrayElementPtr(Node* array, int index) {
return LoadFixedArrayElement(array, index, MachineType::TaggedPointer());
}
Node* LoadFixedArrayElementAny(Node* array, int index) {
return LoadFixedArrayElement(array, index, MachineType::AnyTagged());
}
Node* StoreFixedArrayElement(Node* array, int index, Node* value,
ObjectAccess access) {
return StoreToObject(
access, array,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index), value);
}
Node* StoreFixedArrayElementSmi(Node* array, int index, Node* value) {
return StoreFixedArrayElement(
array, index, value,
ObjectAccess(MachineType::TaggedSigned(), kNoWriteBarrier));
}
Node* StoreFixedArrayElementAny(Node* array, int index, Node* value) {
return StoreFixedArrayElement(
array, index, value,
ObjectAccess(MachineType::AnyTagged(), kFullWriteBarrier));
} }
// Functions, SharedFunctionInfos, FunctionData. // Functions, SharedFunctionInfos, FunctionData.
Node* LoadSharedFunctionInfo(Node* js_function) { Node* LoadSharedFunctionInfo(Node* js_function) {
return Load( return LoadFromObject(
MachineType::TaggedPointer(), js_function, MachineType::TaggedPointer(), js_function,
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()); wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction());
} }
Node* LoadContextFromJSFunction(Node* js_function) { Node* LoadContextFromJSFunction(Node* js_function) {
return Load(MachineType::TaggedPointer(), js_function, return LoadFromObject(
wasm::ObjectAccess::ContextOffsetInTaggedJSFunction()); MachineType::TaggedPointer(), js_function,
wasm::ObjectAccess::ContextOffsetInTaggedJSFunction());
} }
Node* LoadFunctionDataFromJSFunction(Node* js_function) { Node* LoadFunctionDataFromJSFunction(Node* js_function) {
Node* shared = LoadSharedFunctionInfo(js_function); Node* shared = LoadSharedFunctionInfo(js_function);
return Load( return LoadFromObject(
MachineType::TaggedPointer(), shared, MachineType::TaggedPointer(), shared,
wasm::ObjectAccess::ToTagged(SharedFunctionInfo::kFunctionDataOffset)); wasm::ObjectAccess::ToTagged(SharedFunctionInfo::kFunctionDataOffset));
} }
Node* LoadExportedFunctionIndexAsSmi(Node* exported_function_data) { Node* LoadExportedFunctionIndexAsSmi(Node* exported_function_data) {
return Load(MachineType::TaggedSigned(), exported_function_data, return LoadFromObject(MachineType::TaggedSigned(), exported_function_data,
wasm::ObjectAccess::ToTagged( wasm::ObjectAccess::ToTagged(
WasmExportedFunctionData::kFunctionIndexOffset)); WasmExportedFunctionData::kFunctionIndexOffset));
} }
Node* LoadExportedFunctionInstance(Node* exported_function_data) { Node* LoadExportedFunctionInstance(Node* exported_function_data) {
return Load(MachineType::TaggedPointer(), exported_function_data, return LoadFromObject(MachineType::TaggedPointer(), exported_function_data,
wasm::ObjectAccess::ToTagged( wasm::ObjectAccess::ToTagged(
WasmExportedFunctionData::kInstanceOffset)); WasmExportedFunctionData::kInstanceOffset));
} }
// JavaScript objects. // JavaScript objects.
Node* LoadJSArrayElements(Node* js_array) { Node* LoadJSArrayElements(Node* js_array) {
return Load(MachineType::AnyTagged(), js_array, return LoadFromObject(
wasm::ObjectAccess::ToTagged(JSObject::kElementsOffset)); MachineType::AnyTagged(), js_array,
wasm::ObjectAccess::ToTagged(JSObject::kElementsOffset));
} }
// WasmGC objects. // WasmGC objects.
MachineType FieldType(const wasm::StructType* type, uint32_t field_index,
bool is_signed) {
return MachineType::TypeForRepresentation(
type->field(field_index).machine_representation(), is_signed);
}
Node* FieldOffset(const wasm::StructType* type, uint32_t field_index) { Node* FieldOffset(const wasm::StructType* type, uint32_t field_index) {
return IntPtrConstant(wasm::ObjectAccess::ToTagged( return IntPtrConstant(wasm::ObjectAccess::ToTagged(
WasmStruct::kHeaderSize + type->field_offset(field_index))); WasmStruct::kHeaderSize + type->field_offset(field_index)));
} }
// It's guaranteed that struct/array fields are aligned to min(field_size,
// kTaggedSize), with the latter being 4 or 8 depending on platform and
// pointer compression. So on our most common configurations, 8-byte types
// must use unaligned loads/stores.
Node* LoadWithTaggedAlignment(MachineType type, Node* base, Node* offset) {
if (ElementSizeInBytes(type.representation()) > kTaggedSize) {
return LoadUnaligned(type, base, offset);
} else {
return Load(type, base, offset);
}
}
// Same alignment considerations as above.
Node* StoreWithTaggedAlignment(Node* base, Node* offset, Node* value,
wasm::ValueType type) {
MachineRepresentation rep = type.machine_representation();
if (ElementSizeInBytes(rep) > kTaggedSize) {
return StoreUnaligned(rep, base, offset, value);
} else {
WriteBarrierKind write_barrier =
type.is_reference() ? kPointerWriteBarrier : kNoWriteBarrier;
StoreRepresentation store_rep(rep, write_barrier);
return Store(store_rep, base, offset, value);
}
}
Node* StoreStructField(Node* struct_object, const wasm::StructType* type, Node* StoreStructField(Node* struct_object, const wasm::StructType* type,
uint32_t field_index, Node* value) { uint32_t field_index, Node* value) {
return StoreWithTaggedAlignment(struct_object, return StoreToObject(ObjectAccessForGCStores(type->field(field_index)),
FieldOffset(type, field_index), value, struct_object, FieldOffset(type, field_index), value);
type->field(field_index));
} }
Node* WasmArrayElementOffset(Node* index, wasm::ValueType element_type) { Node* WasmArrayElementOffset(Node* index, wasm::ValueType element_type) {
...@@ -399,8 +403,9 @@ class WasmGraphAssembler : public GraphAssembler { ...@@ -399,8 +403,9 @@ class WasmGraphAssembler : public GraphAssembler {
} }
Node* LoadWasmArrayLength(Node* array) { Node* LoadWasmArrayLength(Node* array) {
return Load(MachineType::Uint32(), array, return LoadFromObject(
wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset)); MachineType::Uint32(), array,
wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset));
} }
Node* IsDataRefMap(Node* map) { Node* IsDataRefMap(Node* map) {
...@@ -433,6 +438,11 @@ class WasmGraphAssembler : public GraphAssembler { ...@@ -433,6 +438,11 @@ class WasmGraphAssembler : public GraphAssembler {
Node* instance_type = LoadInstanceType(map); Node* instance_type = LoadInstanceType(map);
return Word32Equal(instance_type, Int32Constant(type)); return Word32Equal(instance_type, Int32Constant(type));
} }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
private:
SimplifiedOperatorBuilder simplified_;
}; };
WasmGraphBuilder::WasmGraphBuilder( WasmGraphBuilder::WasmGraphBuilder(
...@@ -560,20 +570,9 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects_and_control) { ...@@ -560,20 +570,9 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects_and_control) {
} }
Node* WasmGraphBuilder::RefNull() { Node* WasmGraphBuilder::RefNull() {
// Technically speaking, this does not generate a valid graph since the effect return gasm_->LoadFullPointer(
// of the last Load is not consumed. BuildLoadIsolateRoot(),
// TODO(manoskouk): Remove this code once we implement Load elimination IsolateData::root_slot_offset(RootIndex::kNullValue));
// optimization for wasm.
if (!ref_null_node_.is_set()) {
Node* current_effect = effect();
Node* current_control = control();
SetEffectControl(mcgraph()->graph()->start());
ref_null_node_.set(LOAD_FULL_POINTER(
BuildLoadIsolateRoot(),
IsolateData::root_slot_offset(RootIndex::kNullValue)));
SetEffectControl(current_effect, current_control);
}
return ref_null_node_.get();
} }
Node* WasmGraphBuilder::RefFunc(uint32_t function_index) { Node* WasmGraphBuilder::RefFunc(uint32_t function_index) {
...@@ -616,7 +615,7 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) { ...@@ -616,7 +615,7 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
Node* limit_address = Node* limit_address =
LOAD_INSTANCE_FIELD(StackLimitAddress, MachineType::Pointer()); LOAD_INSTANCE_FIELD(StackLimitAddress, MachineType::Pointer());
Node* limit = gasm_->Load(MachineType::Pointer(), limit_address, 0); Node* limit = gasm_->LoadFromObject(MachineType::Pointer(), limit_address, 0);
Node* check = SetEffect(graph()->NewNode( Node* check = SetEffect(graph()->NewNode(
mcgraph()->machine()->StackPointerGreaterThan(StackCheckKind::kWasm), mcgraph()->machine()->StackPointerGreaterThan(StackCheckKind::kWasm),
...@@ -628,6 +627,12 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) { ...@@ -628,6 +627,12 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
if (stack_check_call_operator_ == nullptr) { if (stack_check_call_operator_ == nullptr) {
// Build and cache the stack check call operator and the constant // Build and cache the stack check call operator and the constant
// representing the stack check code. // representing the stack check code.
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched at relocation.
stack_check_code_node_.set(mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmStackGuard, RelocInfo::WASM_STUB_CALL));
auto call_descriptor = Linkage::GetStubCallDescriptor( auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), // zone mcgraph()->zone(), // zone
NoContextDescriptor{}, // descriptor NoContextDescriptor{}, // descriptor
...@@ -635,10 +640,6 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) { ...@@ -635,10 +640,6 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
CallDescriptor::kNoFlags, // flags CallDescriptor::kNoFlags, // flags
Operator::kNoProperties, // properties Operator::kNoProperties, // properties
StubCallMode::kCallWasmRuntimeStub); // stub call mode StubCallMode::kCallWasmRuntimeStub); // stub call mode
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched at relocation.
stack_check_code_node_.set(mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmStackGuard, RelocInfo::WASM_STUB_CALL));
stack_check_call_operator_ = mcgraph()->common()->Call(call_descriptor); stack_check_call_operator_ = mcgraph()->common()->Call(call_descriptor);
} }
...@@ -2145,7 +2146,7 @@ Node* WasmGraphBuilder::BuildCFuncInstruction(ExternalReference ref, ...@@ -2145,7 +2146,7 @@ Node* WasmGraphBuilder::BuildCFuncInstruction(ExternalReference ref,
Node* function = gasm_->ExternalConstant(ref); Node* function = gasm_->ExternalConstant(ref);
BuildCCall(&sig, function, stack_slot); BuildCCall(&sig, function, stack_slot);
return gasm_->Load(type, stack_slot, 0); return gasm_->LoadFromObject(type, stack_slot, 0);
} }
Node* WasmGraphBuilder::BuildF32SConvertI64(Node* input) { Node* WasmGraphBuilder::BuildF32SConvertI64(Node* input) {
...@@ -2187,7 +2188,7 @@ Node* WasmGraphBuilder::BuildIntToFloatConversionInstruction( ...@@ -2187,7 +2188,7 @@ Node* WasmGraphBuilder::BuildIntToFloatConversionInstruction(
MachineSignature sig(0, 1, sig_types); MachineSignature sig(0, 1, sig_types);
Node* function = gasm_->ExternalConstant(ref); Node* function = gasm_->ExternalConstant(ref);
BuildCCall(&sig, function, stack_slot); BuildCCall(&sig, function, stack_slot);
return gasm_->Load(result_type, stack_slot, 0); return gasm_->LoadFromObject(result_type, stack_slot, 0);
} }
namespace { namespace {
...@@ -2232,7 +2233,7 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input, ...@@ -2232,7 +2233,7 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
Node* overflow = BuildCCall(&sig, function, stack_slot); Node* overflow = BuildCCall(&sig, function, stack_slot);
if (IsTrappingConvertOp(opcode)) { if (IsTrappingConvertOp(opcode)) {
ZeroCheck32(wasm::kTrapFloatUnrepresentable, overflow, position); ZeroCheck32(wasm::kTrapFloatUnrepresentable, overflow, position);
return gasm_->Load(int_ty, stack_slot, 0); return gasm_->LoadFromObject(int_ty, stack_slot, 0);
} }
Node* test = Binop(wasm::kExprI32Eq, overflow, Int32Constant(0), position); Node* test = Binop(wasm::kExprI32Eq, overflow, Int32Constant(0), position);
Diamond tl_d(graph(), mcgraph()->common(), test, BranchHint::kFalse); Diamond tl_d(graph(), mcgraph()->common(), test, BranchHint::kFalse);
...@@ -2245,7 +2246,7 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input, ...@@ -2245,7 +2246,7 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
sat_d.Nest(nan_d, false); sat_d.Nest(nan_d, false);
Node* sat_val = Node* sat_val =
sat_d.Phi(int_ty.representation(), Min(this, int_ty), Max(this, int_ty)); sat_d.Phi(int_ty.representation(), Min(this, int_ty), Max(this, int_ty));
Node* load = gasm_->Load(int_ty, stack_slot, 0); Node* load = gasm_->LoadFromObject(int_ty, stack_slot, 0);
Node* nan_val = Node* nan_val =
nan_d.Phi(int_ty.representation(), Zero(this, int_ty), sat_val); nan_d.Phi(int_ty.representation(), Zero(this, int_ty), sat_val);
return tl_d.Phi(int_ty.representation(), nan_val, load); return tl_d.Phi(int_ty.representation(), nan_val, load);
...@@ -2309,7 +2310,7 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index, ...@@ -2309,7 +2310,7 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
case wasm::kOptRef: case wasm::kOptRef:
case wasm::kRtt: case wasm::kRtt:
case wasm::kRttWithDepth: case wasm::kRttWithDepth:
STORE_FIXED_ARRAY_SLOT_ANY(values_array, index, value); gasm_->StoreFixedArrayElementAny(values_array, index, value);
++index; ++index;
break; break;
case wasm::kI8: case wasm::kI8:
...@@ -2334,22 +2335,22 @@ void WasmGraphBuilder::BuildEncodeException32BitValue(Node* values_array, ...@@ -2334,22 +2335,22 @@ void WasmGraphBuilder::BuildEncodeException32BitValue(Node* values_array,
Node* value) { Node* value) {
Node* upper_halfword_as_smi = Node* upper_halfword_as_smi =
BuildChangeUint31ToSmi(gasm_->Word32Shr(value, Int32Constant(16))); BuildChangeUint31ToSmi(gasm_->Word32Shr(value, Int32Constant(16)));
STORE_FIXED_ARRAY_SLOT_SMI(values_array, *index, upper_halfword_as_smi); gasm_->StoreFixedArrayElementSmi(values_array, *index, upper_halfword_as_smi);
++(*index); ++(*index);
Node* lower_halfword_as_smi = Node* lower_halfword_as_smi =
BuildChangeUint31ToSmi(gasm_->Word32And(value, Int32Constant(0xFFFFu))); BuildChangeUint31ToSmi(gasm_->Word32And(value, Int32Constant(0xFFFFu)));
STORE_FIXED_ARRAY_SLOT_SMI(values_array, *index, lower_halfword_as_smi); gasm_->StoreFixedArrayElementSmi(values_array, *index, lower_halfword_as_smi);
++(*index); ++(*index);
} }
Node* WasmGraphBuilder::BuildDecodeException32BitValue(Node* values_array, Node* WasmGraphBuilder::BuildDecodeException32BitValue(Node* values_array,
uint32_t* index) { uint32_t* index) {
Node* upper = Node* upper = BuildChangeSmiToInt32(
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index)); gasm_->LoadFixedArrayElementSmi(values_array, *index));
(*index)++; (*index)++;
upper = gasm_->Word32Shl(upper, Int32Constant(16)); upper = gasm_->Word32Shl(upper, Int32Constant(16));
Node* lower = Node* lower = BuildChangeSmiToInt32(
BuildChangeSmiToInt32(LOAD_FIXED_ARRAY_SLOT_SMI(values_array, *index)); gasm_->LoadFixedArrayElementSmi(values_array, *index));
(*index)++; (*index)++;
Node* value = gasm_->Word32Or(upper, lower); Node* value = gasm_->Word32Or(upper, lower);
return value; return value;
...@@ -2381,14 +2382,15 @@ Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag, ...@@ -2381,14 +2382,15 @@ Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag,
Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) { Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
Node* exceptions_table = Node* exceptions_table =
LOAD_INSTANCE_FIELD(ExceptionsTable, MachineType::TaggedPointer()); LOAD_INSTANCE_FIELD(ExceptionsTable, MachineType::TaggedPointer());
Node* tag = LOAD_FIXED_ARRAY_SLOT_PTR(exceptions_table, exception_index); Node* tag =
gasm_->LoadFixedArrayElementPtr(exceptions_table, exception_index);
return tag; return tag;
} }
Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) { Node* WasmGraphBuilder::GetExceptionTag(Node* except_obj) {
return gasm_->CallBuiltin( return gasm_->CallBuiltin(
Builtins::kWasmGetOwnProperty, except_obj, Builtins::kWasmGetOwnProperty, except_obj,
LOAD_FULL_POINTER( gasm_->LoadFullPointer(
BuildLoadIsolateRoot(), BuildLoadIsolateRoot(),
IsolateData::root_slot_offset(RootIndex::kwasm_exception_tag_symbol)), IsolateData::root_slot_offset(RootIndex::kwasm_exception_tag_symbol)),
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer())); LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
...@@ -2399,9 +2401,9 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj, ...@@ -2399,9 +2401,9 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
Vector<Node*> values) { Vector<Node*> values) {
Node* values_array = gasm_->CallBuiltin( Node* values_array = gasm_->CallBuiltin(
Builtins::kWasmGetOwnProperty, except_obj, Builtins::kWasmGetOwnProperty, except_obj,
LOAD_FULL_POINTER(BuildLoadIsolateRoot(), gasm_->LoadFullPointer(BuildLoadIsolateRoot(),
IsolateData::root_slot_offset( IsolateData::root_slot_offset(
RootIndex::kwasm_exception_values_symbol)), RootIndex::kwasm_exception_values_symbol)),
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer())); LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
uint32_t index = 0; uint32_t index = 0;
const wasm::WasmExceptionSig* sig = exception->sig; const wasm::WasmExceptionSig* sig = exception->sig;
...@@ -2443,7 +2445,7 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj, ...@@ -2443,7 +2445,7 @@ Node* WasmGraphBuilder::GetExceptionValues(Node* except_obj,
case wasm::kOptRef: case wasm::kOptRef:
case wasm::kRtt: case wasm::kRtt:
case wasm::kRttWithDepth: case wasm::kRttWithDepth:
value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index); value = gasm_->LoadFixedArrayElementAny(values_array, index);
++index; ++index;
break; break;
case wasm::kI8: case wasm::kI8:
...@@ -2745,7 +2747,7 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right, ...@@ -2745,7 +2747,7 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
ZeroCheck32(trap_zero, call, position); ZeroCheck32(trap_zero, call, position);
TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1, position); TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1, position);
return gasm_->Load(result_type, stack_slot, 0); return gasm_->LoadFromObject(result_type, stack_slot, 0);
} }
template <typename... Args> template <typename... Args>
...@@ -2854,13 +2856,14 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig, ...@@ -2854,13 +2856,14 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
Node* imported_function_refs = Node* imported_function_refs =
LOAD_INSTANCE_FIELD(ImportedFunctionRefs, MachineType::TaggedPointer()); LOAD_INSTANCE_FIELD(ImportedFunctionRefs, MachineType::TaggedPointer());
Node* ref_node = Node* ref_node =
LOAD_FIXED_ARRAY_SLOT_PTR(imported_function_refs, func_index); gasm_->LoadFixedArrayElementPtr(imported_function_refs, func_index);
// Load the target from the imported_targets array at a known offset. // Load the target from the imported_targets array at a known offset.
Node* imported_targets = Node* imported_targets =
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer()); LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
Node* target_node = gasm_->Load(MachineType::Pointer(), imported_targets, Node* target_node =
func_index * kSystemPointerSize); gasm_->LoadFromObject(MachineType::Pointer(), imported_targets,
func_index * kSystemPointerSize);
args[0] = target_node; args[0] = target_node;
const UseRetpoline use_retpoline = const UseRetpoline use_retpoline =
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline; untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
...@@ -2893,8 +2896,8 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig, ...@@ -2893,8 +2896,8 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
func_index_intptr, gasm_->IntPtrConstant(kSystemPointerSize)); func_index_intptr, gasm_->IntPtrConstant(kSystemPointerSize));
Node* imported_targets = Node* imported_targets =
LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer()); LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
Node* target_node = gasm_->Load(MachineType::Pointer(), imported_targets, Node* target_node = gasm_->LoadFromObject(
func_index_times_pointersize); MachineType::Pointer(), imported_targets, func_index_times_pointersize);
args[0] = target_node; args[0] = target_node;
const UseRetpoline use_retpoline = const UseRetpoline use_retpoline =
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline; untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
...@@ -2953,21 +2956,21 @@ void WasmGraphBuilder::LoadIndirectFunctionTable(uint32_t table_index, ...@@ -2953,21 +2956,21 @@ void WasmGraphBuilder::LoadIndirectFunctionTable(uint32_t table_index,
Node* ift_tables = Node* ift_tables =
LOAD_INSTANCE_FIELD(IndirectFunctionTables, MachineType::TaggedPointer()); LOAD_INSTANCE_FIELD(IndirectFunctionTables, MachineType::TaggedPointer());
Node* ift_table = LOAD_FIXED_ARRAY_SLOT_ANY(ift_tables, table_index); Node* ift_table = gasm_->LoadFixedArrayElementAny(ift_tables, table_index);
*ift_size = gasm_->Load( *ift_size = gasm_->LoadFromObject(
MachineType::Int32(), ift_table, MachineType::Int32(), ift_table,
wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSizeOffset)); wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSizeOffset));
*ift_sig_ids = gasm_->Load( *ift_sig_ids = gasm_->LoadFromObject(
MachineType::Pointer(), ift_table, MachineType::Pointer(), ift_table,
wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSigIdsOffset)); wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSigIdsOffset));
*ift_targets = gasm_->Load( *ift_targets = gasm_->LoadFromObject(
MachineType::Pointer(), ift_table, MachineType::Pointer(), ift_table,
wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kTargetsOffset)); wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kTargetsOffset));
*ift_instances = gasm_->Load( *ift_instances = gasm_->LoadFromObject(
MachineType::TaggedPointer(), ift_table, MachineType::TaggedPointer(), ift_table,
wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset)); wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset));
} }
...@@ -3021,8 +3024,8 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, ...@@ -3021,8 +3024,8 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
Node* int32_scaled_key = Node* int32_scaled_key =
Uint32ToUintptr(gasm_->Word32Shl(key, Int32Constant(2))); Uint32ToUintptr(gasm_->Word32Shl(key, Int32Constant(2)));
Node* loaded_sig = Node* loaded_sig = gasm_->LoadFromObject(MachineType::Int32(), ift_sig_ids,
gasm_->Load(MachineType::Int32(), ift_sig_ids, int32_scaled_key); int32_scaled_key);
if (table_type.is_reference_to(wasm::HeapType::kFunc)) { if (table_type.is_reference_to(wasm::HeapType::kFunc)) {
int32_t expected_sig_id = env_->module->canonicalized_type_ids[sig_index]; int32_t expected_sig_id = env_->module->canonicalized_type_ids[sig_index];
...@@ -3046,8 +3049,8 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, ...@@ -3046,8 +3049,8 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
Node* intptr_scaled_key = Node* intptr_scaled_key =
gasm_->IntMul(key_intptr, gasm_->IntPtrConstant(kSystemPointerSize)); gasm_->IntMul(key_intptr, gasm_->IntPtrConstant(kSystemPointerSize));
Node* target = Node* target = gasm_->LoadFromObject(MachineType::Pointer(), ift_targets,
gasm_->Load(MachineType::Pointer(), ift_targets, intptr_scaled_key); intptr_scaled_key);
args[0] = target; args[0] = target;
const UseRetpoline use_retpoline = const UseRetpoline use_retpoline =
...@@ -3065,10 +3068,10 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, ...@@ -3065,10 +3068,10 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
Node* WasmGraphBuilder::BuildLoadJumpTableOffsetFromExportedFunctionData( Node* WasmGraphBuilder::BuildLoadJumpTableOffsetFromExportedFunctionData(
Node* function_data) { Node* function_data) {
Node* jump_table_offset_smi = Node* jump_table_offset_smi = gasm_->LoadFromObject(
gasm_->Load(MachineType::TaggedSigned(), function_data, MachineType::TaggedSigned(), function_data,
wasm::ObjectAccess::ToTagged( wasm::ObjectAccess::ToTagged(
WasmExportedFunctionData::kJumpTableOffsetOffset)); WasmExportedFunctionData::kJumpTableOffsetOffset));
return BuildChangeSmiToIntPtr(jump_table_offset_smi); return BuildChangeSmiToIntPtr(jump_table_offset_smi);
} }
...@@ -3104,11 +3107,11 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args, ...@@ -3104,11 +3107,11 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
auto imported_label = gasm_->MakeLabel(); auto imported_label = gasm_->MakeLabel();
// Check if callee is a locally defined or imported function it its module. // Check if callee is a locally defined or imported function in its module.
Node* imported_function_refs = Node* imported_function_refs = gasm_->LoadFromObject(
gasm_->Load(MachineType::TaggedPointer(), callee_instance, MachineType::TaggedPointer(), callee_instance,
wasm::ObjectAccess::ToTagged( wasm::ObjectAccess::ToTagged(
WasmInstanceObject::kImportedFunctionRefsOffset)); WasmInstanceObject::kImportedFunctionRefsOffset));
Node* imported_functions_num = Node* imported_functions_num =
gasm_->LoadFixedArrayLengthAsSmi(imported_function_refs); gasm_->LoadFixedArrayLengthAsSmi(imported_function_refs);
gasm_->GotoIf(gasm_->SmiLessThan(function_index, imported_functions_num), gasm_->GotoIf(gasm_->SmiLessThan(function_index, imported_functions_num),
...@@ -3116,9 +3119,9 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args, ...@@ -3116,9 +3119,9 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
{ {
// Function locally defined in module. // Function locally defined in module.
Node* jump_table_start = Node* jump_table_start =
gasm_->Load(MachineType::Pointer(), callee_instance, gasm_->LoadFromObject(MachineType::Pointer(), callee_instance,
wasm::ObjectAccess::ToTagged( wasm::ObjectAccess::ToTagged(
WasmInstanceObject::kJumpTableStartOffset)); WasmInstanceObject::kJumpTableStartOffset));
Node* jump_table_offset = Node* jump_table_offset =
BuildLoadJumpTableOffsetFromExportedFunctionData(function_data); BuildLoadJumpTableOffsetFromExportedFunctionData(function_data);
Node* jump_table_slot = Node* jump_table_slot =
...@@ -3137,15 +3140,15 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args, ...@@ -3137,15 +3140,15 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
imported_function_refs, function_index_intptr, imported_function_refs, function_index_intptr,
MachineType::TaggedPointer()); MachineType::TaggedPointer());
Node* imported_function_targets = Node* imported_function_targets = gasm_->LoadFromObject(
gasm_->Load(MachineType::Pointer(), callee_instance, MachineType::Pointer(), callee_instance,
wasm::ObjectAccess::ToTagged( wasm::ObjectAccess::ToTagged(
WasmInstanceObject::kImportedFunctionTargetsOffset)); WasmInstanceObject::kImportedFunctionTargetsOffset));
Node* target_node = Node* target_node = gasm_->LoadFromObject(
gasm_->Load(MachineType::Pointer(), imported_function_targets, MachineType::Pointer(), imported_function_targets,
gasm_->IntMul(function_index_intptr, gasm_->IntMul(function_index_intptr,
gasm_->IntPtrConstant(kSystemPointerSize))); gasm_->IntPtrConstant(kSystemPointerSize)));
gasm_->Goto(&end_label, target_node, imported_instance); gasm_->Goto(&end_label, target_node, imported_instance);
} }
...@@ -3158,15 +3161,15 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args, ...@@ -3158,15 +3161,15 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
// (current WasmInstanceObject, function_data->callable()). // (current WasmInstanceObject, function_data->callable()).
gasm_->Bind(&js_label); gasm_->Bind(&js_label);
Node* wrapper_code = Node* wrapper_code = gasm_->LoadFromObject(
gasm_->Load(MachineType::TaggedPointer(), function_data, MachineType::TaggedPointer(), function_data,
wasm::ObjectAccess::ToTagged( wasm::ObjectAccess::ToTagged(
WasmJSFunctionData::kWasmToJsWrapperCodeOffset)); WasmJSFunctionData::kWasmToJsWrapperCodeOffset));
Node* call_target = gasm_->IntAdd( Node* call_target = gasm_->IntAdd(
wrapper_code, wrapper_code,
gasm_->IntPtrConstant(wasm::ObjectAccess::ToTagged(Code::kHeaderSize))); gasm_->IntPtrConstant(wasm::ObjectAccess::ToTagged(Code::kHeaderSize)));
Node* callable = gasm_->Load( Node* callable = gasm_->LoadFromObject(
MachineType::TaggedPointer(), function_data, MachineType::TaggedPointer(), function_data,
wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset)); wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset));
// TODO(manoskouk): Find an elegant way to avoid allocating this pair for // TODO(manoskouk): Find an elegant way to avoid allocating this pair for
...@@ -3464,12 +3467,7 @@ void WasmGraphBuilder::SetEffectControl(Node* effect, Node* control) { ...@@ -3464,12 +3467,7 @@ void WasmGraphBuilder::SetEffectControl(Node* effect, Node* control) {
} }
Node* WasmGraphBuilder::GetImportedMutableGlobals() { Node* WasmGraphBuilder::GetImportedMutableGlobals() {
if (imported_mutable_globals_ == nullptr) { return LOAD_INSTANCE_FIELD(ImportedMutableGlobals, MachineType::UintPtr());
// Load imported_mutable_globals_ from the instance object at runtime.
imported_mutable_globals_ =
LOAD_INSTANCE_FIELD(ImportedMutableGlobals, MachineType::UintPtr());
}
return imported_mutable_globals_.get();
} }
void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type, void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
...@@ -3478,28 +3476,16 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type, ...@@ -3478,28 +3476,16 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
Node** offset_node) { Node** offset_node) {
DCHECK_NOT_NULL(instance_node_); DCHECK_NOT_NULL(instance_node_);
if (global.mutability && global.imported) { if (global.mutability && global.imported) {
*base_node = *base_node = gasm_->LoadFromObject(
gasm_->Load(MachineType::UintPtr(), GetImportedMutableGlobals(), MachineType::UintPtr(), GetImportedMutableGlobals(),
Int32Constant(global.index * sizeof(Address))); Int32Constant(global.index * sizeof(Address)));
*offset_node = Int32Constant(0); *offset_node = Int32Constant(0);
} else { } else {
if (globals_start_ == nullptr) { Node* globals_start =
// Load globals_start from the instance object at runtime. gasm_->LoadFromObject(MachineType::UintPtr(), instance_node_.get(),
// TODO(wasm): we currently generate only one load of the {globals_start} Int32Constant(wasm::ObjectAccess::ToTagged(
// start per graph, which means it can be placed anywhere by the WasmInstanceObject::kGlobalsStartOffset)));
// scheduler. This is legal because the globals_start should never change. *base_node = globals_start;
// However, in some cases (e.g. if the instance object is already in a
// register), it is slightly more efficient to reload this value from the
// instance object. Since this depends on register allocation, it is not
// possible to express in the graph, and would essentially constitute a
// "mem2reg" optimization in TurboFan.
globals_start_ = graph()->NewNode(
mcgraph()->machine()->Load(MachineType::UintPtr()),
instance_node_.get(),
Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(GlobalsStart)),
graph()->start(), graph()->start());
}
*base_node = globals_start_.get();
*offset_node = Int32Constant(global.offset); *offset_node = Int32Constant(global.offset);
if (mem_type == MachineType::Simd128() && global.offset != 0) { if (mem_type == MachineType::Simd128() && global.offset != 0) {
...@@ -3515,12 +3501,13 @@ void WasmGraphBuilder::GetBaseAndOffsetForImportedMutableExternRefGlobal( ...@@ -3515,12 +3501,13 @@ void WasmGraphBuilder::GetBaseAndOffsetForImportedMutableExternRefGlobal(
// Load the base from the ImportedMutableGlobalsBuffer of the instance. // Load the base from the ImportedMutableGlobalsBuffer of the instance.
Node* buffers = LOAD_INSTANCE_FIELD(ImportedMutableGlobalsBuffers, Node* buffers = LOAD_INSTANCE_FIELD(ImportedMutableGlobalsBuffers,
MachineType::TaggedPointer()); MachineType::TaggedPointer());
*base = LOAD_FIXED_ARRAY_SLOT_ANY(buffers, global.index); *base = gasm_->LoadFixedArrayElementAny(buffers, global.index);
// For the offset we need the index of the global in the buffer, and then // For the offset we need the index of the global in the buffer, and then
// calculate the actual offset from the index. Load the index from the // calculate the actual offset from the index. Load the index from the
// ImportedMutableGlobals array of the instance. // ImportedMutableGlobals array of the instance.
Node* index = gasm_->Load(MachineType::UintPtr(), GetImportedMutableGlobals(), Node* index =
gasm_->LoadFromObject(MachineType::UintPtr(), GetImportedMutableGlobals(),
Int32Constant(global.index * sizeof(Address))); Int32Constant(global.index * sizeof(Address)));
// From the index, calculate the actual offset in the FixedArray. This // From the index, calculate the actual offset in the FixedArray. This
...@@ -3571,7 +3558,7 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f, ...@@ -3571,7 +3558,7 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
DCHECK_EQ(1, fun->result_size); DCHECK_EQ(1, fun->result_size);
auto centry_id = auto centry_id =
Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit; Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit;
Node* centry_stub = LOAD_FULL_POINTER( Node* centry_stub = gasm_->LoadFullPointer(
isolate_root, IsolateData::builtin_slot_offset(centry_id)); isolate_root, IsolateData::builtin_slot_offset(centry_id));
// TODO(titzer): allow arbitrary number of runtime arguments // TODO(titzer): allow arbitrary number of runtime arguments
// At the moment we only allow 5 parameters. If more parameters are needed, // At the moment we only allow 5 parameters. If more parameters are needed,
...@@ -3608,11 +3595,11 @@ Node* WasmGraphBuilder::GlobalGet(uint32_t index) { ...@@ -3608,11 +3595,11 @@ Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
Node* base = nullptr; Node* base = nullptr;
Node* offset = nullptr; Node* offset = nullptr;
GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &base, &offset); GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &base, &offset);
return gasm_->Load(MachineType::AnyTagged(), base, offset); return gasm_->LoadFromObject(MachineType::AnyTagged(), base, offset);
} }
Node* globals_buffer = Node* globals_buffer =
LOAD_INSTANCE_FIELD(TaggedGlobalsBuffer, MachineType::TaggedPointer()); LOAD_INSTANCE_FIELD(TaggedGlobalsBuffer, MachineType::TaggedPointer());
return LOAD_FIXED_ARRAY_SLOT_ANY(globals_buffer, global.offset); return gasm_->LoadFixedArrayElementAny(globals_buffer, global.offset);
} }
MachineType mem_type = global.type.machine_type(); MachineType mem_type = global.type.machine_type();
...@@ -3622,6 +3609,8 @@ Node* WasmGraphBuilder::GlobalGet(uint32_t index) { ...@@ -3622,6 +3609,8 @@ Node* WasmGraphBuilder::GlobalGet(uint32_t index) {
Node* base = nullptr; Node* base = nullptr;
Node* offset = nullptr; Node* offset = nullptr;
GetGlobalBaseAndOffset(mem_type, global, &base, &offset); GetGlobalBaseAndOffset(mem_type, global, &base, &offset);
// TODO(manoskouk): Cannot use LoadFromObject here due to
// GetGlobalBaseAndOffset pointer arithmetic.
Node* result = gasm_->Load(mem_type, base, offset); Node* result = gasm_->Load(mem_type, base, offset);
#if defined(V8_TARGET_BIG_ENDIAN) #if defined(V8_TARGET_BIG_ENDIAN)
result = BuildChangeEndiannessLoad(result, mem_type, global.type); result = BuildChangeEndiannessLoad(result, mem_type, global.type);
...@@ -3637,12 +3626,13 @@ Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) { ...@@ -3637,12 +3626,13 @@ Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) {
Node* offset = nullptr; Node* offset = nullptr;
GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &base, &offset); GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &base, &offset);
return STORE_RAW_NODE_OFFSET( return gasm_->StoreToObject(
base, offset, val, MachineRepresentation::kTagged, kFullWriteBarrier); ObjectAccess(MachineType::AnyTagged(), kFullWriteBarrier), base,
offset, val);
} }
Node* globals_buffer = Node* globals_buffer =
LOAD_INSTANCE_FIELD(TaggedGlobalsBuffer, MachineType::TaggedPointer()); LOAD_INSTANCE_FIELD(TaggedGlobalsBuffer, MachineType::TaggedPointer());
return STORE_FIXED_ARRAY_SLOT_ANY(globals_buffer, global.offset, val); return gasm_->StoreFixedArrayElementAny(globals_buffer, global.offset, val);
} }
MachineType mem_type = global.type.machine_type(); MachineType mem_type = global.type.machine_type();
...@@ -3657,7 +3647,8 @@ Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) { ...@@ -3657,7 +3647,8 @@ Node* WasmGraphBuilder::GlobalSet(uint32_t index, Node* val) {
#if defined(V8_TARGET_BIG_ENDIAN) #if defined(V8_TARGET_BIG_ENDIAN)
val = BuildChangeEndiannessStore(val, mem_type.representation(), global.type); val = BuildChangeEndiannessStore(val, mem_type.representation(), global.type);
#endif #endif
// TODO(manoskouk): Cannot use StoreToObject here due to
// GetGlobalBaseAndOffset pointer arithmetic.
return gasm_->Store(store_rep, base, offset, val); return gasm_->Store(store_rep, base, offset, val);
} }
...@@ -4499,7 +4490,8 @@ CallDescriptor* WasmGraphBuilder::GetI64AtomicWaitCallDescriptor() { ...@@ -4499,7 +4490,8 @@ CallDescriptor* WasmGraphBuilder::GetI64AtomicWaitCallDescriptor() {
void WasmGraphBuilder::LowerInt64(Signature<MachineRepresentation>* sig) { void WasmGraphBuilder::LowerInt64(Signature<MachineRepresentation>* sig) {
if (mcgraph()->machine()->Is64()) return; if (mcgraph()->machine()->Is64()) return;
Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(), mcgraph()->common(), Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(), mcgraph()->common(),
mcgraph()->zone(), sig, std::move(lowering_special_case_)); gasm_->simplified(), mcgraph()->zone(), sig,
std::move(lowering_special_case_));
r.LowerGraph(); r.LowerGraph();
} }
...@@ -4508,8 +4500,9 @@ void WasmGraphBuilder::LowerInt64(CallOrigin origin) { ...@@ -4508,8 +4500,9 @@ void WasmGraphBuilder::LowerInt64(CallOrigin origin) {
} }
void WasmGraphBuilder::SimdScalarLoweringForTesting() { void WasmGraphBuilder::SimdScalarLoweringForTesting() {
SimdScalarLowering(mcgraph(), CreateMachineSignature(mcgraph()->zone(), sig_, SimdScalarLowering(
kCalledFromWasm)) mcgraph(), gasm_->simplified(),
CreateMachineSignature(mcgraph()->zone(), sig_, kCalledFromWasm))
.LowerGraph(); .LowerGraph();
} }
...@@ -5424,11 +5417,9 @@ Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index, ...@@ -5424,11 +5417,9 @@ Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
Node* seg_size_array = Node* seg_size_array =
LOAD_INSTANCE_FIELD(DataSegmentSizes, MachineType::Pointer()); LOAD_INSTANCE_FIELD(DataSegmentSizes, MachineType::Pointer());
STATIC_ASSERT(wasm::kV8MaxWasmDataSegments <= kMaxUInt32 >> 2); STATIC_ASSERT(wasm::kV8MaxWasmDataSegments <= kMaxUInt32 >> 2);
auto store_rep = auto access = ObjectAccess(MachineType::Int32(), kNoWriteBarrier);
StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier); return gasm_->StoreToObject(access, seg_size_array, data_segment_index << 2,
return gasm_->Store(store_rep, seg_size_array, Int32Constant(0));
mcgraph()->IntPtrConstant(data_segment_index << 2),
Int32Constant(0));
} }
Node* WasmGraphBuilder::StoreArgsInStackSlot( Node* WasmGraphBuilder::StoreArgsInStackSlot(
...@@ -5528,11 +5519,11 @@ Node* WasmGraphBuilder::TableGrow(uint32_t table_index, Node* value, ...@@ -5528,11 +5519,11 @@ Node* WasmGraphBuilder::TableGrow(uint32_t table_index, Node* value,
Node* WasmGraphBuilder::TableSize(uint32_t table_index) { Node* WasmGraphBuilder::TableSize(uint32_t table_index) {
Node* tables = LOAD_INSTANCE_FIELD(Tables, MachineType::TaggedPointer()); Node* tables = LOAD_INSTANCE_FIELD(Tables, MachineType::TaggedPointer());
Node* table = LOAD_FIXED_ARRAY_SLOT_ANY(tables, table_index); Node* table = gasm_->LoadFixedArrayElementAny(tables, table_index);
int length_field_size = WasmTableObject::kCurrentLengthOffsetEnd - int length_field_size = WasmTableObject::kCurrentLengthOffsetEnd -
WasmTableObject::kCurrentLengthOffset + 1; WasmTableObject::kCurrentLengthOffset + 1;
Node* length_smi = gasm_->Load( Node* length_smi = gasm_->LoadFromObject(
assert_size(length_field_size, MachineType::TaggedSigned()), table, assert_size(length_field_size, MachineType::TaggedSigned()), table,
wasm::ObjectAccess::ToTagged(WasmTableObject::kCurrentLengthOffset)); wasm::ObjectAccess::ToTagged(WasmTableObject::kCurrentLengthOffset));
...@@ -5585,8 +5576,8 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index, ...@@ -5585,8 +5576,8 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
Node* offset = loop.PhiAt(0); Node* offset = loop.PhiAt(0);
Node* check = gasm_->Uint32LessThan(offset, end_offset); Node* check = gasm_->Uint32LessThan(offset, end_offset);
gasm_->GotoIfNot(check, &done); gasm_->GotoIfNot(check, &done);
gasm_->StoreWithTaggedAlignment(a, offset, initial_value, gasm_->StoreToObject(ObjectAccessForGCStores(type->element_type()), a,
type->element_type()); offset, initial_value);
offset = gasm_->Int32Add(offset, element_size); offset = gasm_->Int32Add(offset, element_size);
gasm_->Goto(&loop, offset); gasm_->Goto(&loop, offset);
} }
...@@ -5597,7 +5588,7 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index, ...@@ -5597,7 +5588,7 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
Node* WasmGraphBuilder::RttCanon(uint32_t type_index) { Node* WasmGraphBuilder::RttCanon(uint32_t type_index) {
Node* maps_list = Node* maps_list =
LOAD_INSTANCE_FIELD(ManagedObjectMaps, MachineType::TaggedPointer()); LOAD_INSTANCE_FIELD(ManagedObjectMaps, MachineType::TaggedPointer());
return LOAD_FIXED_ARRAY_SLOT_PTR(maps_list, type_index); return gasm_->LoadFixedArrayElementPtr(maps_list, type_index);
} }
Node* WasmGraphBuilder::RttSub(uint32_t type_index, Node* parent_rtt) { Node* WasmGraphBuilder::RttSub(uint32_t type_index, Node* parent_rtt) {
...@@ -5882,10 +5873,12 @@ Node* WasmGraphBuilder::StructGet(Node* struct_object, ...@@ -5882,10 +5873,12 @@ Node* WasmGraphBuilder::StructGet(Node* struct_object,
TrapIfTrue(wasm::kTrapNullDereference, TrapIfTrue(wasm::kTrapNullDereference,
gasm_->WordEqual(struct_object, RefNull()), position); gasm_->WordEqual(struct_object, RefNull()), position);
} }
MachineType machine_type = // It is not enough to invoke ValueType::machine_type(), because the
gasm_->FieldType(struct_type, field_index, is_signed); // signedness has to be determined by {is_signed}.
MachineType machine_type = MachineType::TypeForRepresentation(
struct_type->field(field_index).machine_representation(), is_signed);
Node* offset = gasm_->FieldOffset(struct_type, field_index); Node* offset = gasm_->FieldOffset(struct_type, field_index);
return gasm_->LoadWithTaggedAlignment(machine_type, struct_object, offset); return gasm_->LoadFromObject(machine_type, struct_object, offset);
} }
Node* WasmGraphBuilder::StructSet(Node* struct_object, Node* WasmGraphBuilder::StructSet(Node* struct_object,
...@@ -5920,7 +5913,7 @@ Node* WasmGraphBuilder::ArrayGet(Node* array_object, ...@@ -5920,7 +5913,7 @@ Node* WasmGraphBuilder::ArrayGet(Node* array_object,
MachineType machine_type = MachineType::TypeForRepresentation( MachineType machine_type = MachineType::TypeForRepresentation(
type->element_type().machine_representation(), is_signed); type->element_type().machine_representation(), is_signed);
Node* offset = gasm_->WasmArrayElementOffset(index, type->element_type()); Node* offset = gasm_->WasmArrayElementOffset(index, type->element_type());
return gasm_->LoadWithTaggedAlignment(machine_type, array_object, offset); return gasm_->LoadFromObject(machine_type, array_object, offset);
} }
Node* WasmGraphBuilder::ArraySet(Node* array_object, Node* WasmGraphBuilder::ArraySet(Node* array_object,
...@@ -5933,8 +5926,8 @@ Node* WasmGraphBuilder::ArraySet(Node* array_object, ...@@ -5933,8 +5926,8 @@ Node* WasmGraphBuilder::ArraySet(Node* array_object,
} }
BoundsCheck(array_object, index, position); BoundsCheck(array_object, index, position);
Node* offset = gasm_->WasmArrayElementOffset(index, type->element_type()); Node* offset = gasm_->WasmArrayElementOffset(index, type->element_type());
return gasm_->StoreWithTaggedAlignment(array_object, offset, value, return gasm_->StoreToObject(ObjectAccessForGCStores(type->element_type()),
type->element_type()); array_object, offset, value);
} }
Node* WasmGraphBuilder::ArrayLen(Node* array_object, CheckForNull null_check, Node* WasmGraphBuilder::ArrayLen(Node* array_object, CheckForNull null_check,
...@@ -6056,15 +6049,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -6056,15 +6049,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
} }
Node* BuildLoadUndefinedValueFromInstance() { Node* BuildLoadUndefinedValueFromInstance() {
if (undefined_value_node_ == nullptr) { return gasm_->LoadFromObject(MachineType::Pointer(), BuildLoadIsolateRoot(),
Node* isolate_root = Int32Constant(IsolateData::root_slot_offset(
LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer()); RootIndex::kUndefinedValue)));
undefined_value_node_ = gasm_->Load(
MachineType::Pointer(), isolate_root,
Int32Constant(
IsolateData::root_slot_offset(RootIndex::kUndefinedValue)));
}
return undefined_value_node_.get();
} }
Node* BuildChangeInt32ToNumber(Node* value) { Node* BuildChangeInt32ToNumber(Node* value) {
...@@ -6273,9 +6260,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -6273,9 +6260,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* BuildUnpackObjectWrapper(Node* input, UnpackFailureBehavior failure) { Node* BuildUnpackObjectWrapper(Node* input, UnpackFailureBehavior failure) {
Node* obj = gasm_->CallBuiltin( Node* obj = gasm_->CallBuiltin(
Builtins::kWasmGetOwnProperty, input, Builtins::kWasmGetOwnProperty, input,
LOAD_FULL_POINTER(BuildLoadIsolateRoot(), gasm_->LoadFullPointer(BuildLoadIsolateRoot(),
IsolateData::root_slot_offset( IsolateData::root_slot_offset(
RootIndex::kwasm_wrapped_object_symbol)), RootIndex::kwasm_wrapped_object_symbol)),
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer())); LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
// Invalid object wrappers (i.e. any other JS object that doesn't have the // Invalid object wrappers (i.e. any other JS object that doesn't have the
// magic hidden property) will return {undefined}. Map that to {null} or // magic hidden property) will return {undefined}. Map that to {null} or
...@@ -6418,8 +6405,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -6418,8 +6405,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
} }
Node* HeapNumberToFloat64(Node* input) { Node* HeapNumberToFloat64(Node* input) {
return gasm_->Load(MachineType::Float64(), input, return gasm_->LoadFromObject(
wasm::ObjectAccess::ToTagged(HeapNumber::kValueOffset)); MachineType::Float64(), input,
wasm::ObjectAccess::ToTagged(HeapNumber::kValueOffset));
} }
Node* FromJSFast(Node* input, wasm::ValueType type) { Node* FromJSFast(Node* input, wasm::ValueType type) {
...@@ -6466,8 +6454,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -6466,8 +6454,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
void BuildModifyThreadInWasmFlagHelper(Node* thread_in_wasm_flag_address, void BuildModifyThreadInWasmFlagHelper(Node* thread_in_wasm_flag_address,
bool new_value) { bool new_value) {
if (FLAG_debug_code) { if (FLAG_debug_code) {
Node* flag_value = Node* flag_value = gasm_->LoadFromObject(MachineType::Pointer(),
gasm_->Load(MachineType::Pointer(), thread_in_wasm_flag_address, 0); thread_in_wasm_flag_address, 0);
Node* check = Node* check =
gasm_->Word32Equal(flag_value, Int32Constant(new_value ? 0 : 1)); gasm_->Word32Equal(flag_value, Int32Constant(new_value ? 0 : 1));
...@@ -6487,9 +6475,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -6487,9 +6475,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
flag_check.merge); flag_check.merge);
} }
gasm_->Store( gasm_->StoreToObject(ObjectAccess(MachineType::Int32(), kNoWriteBarrier),
StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier), thread_in_wasm_flag_address, 0,
thread_in_wasm_flag_address, 0, Int32Constant(new_value ? 1 : 0)); Int32Constant(new_value ? 1 : 0));
} }
void BuildModifyThreadInWasmFlag(bool new_value) { void BuildModifyThreadInWasmFlag(bool new_value) {
...@@ -6497,8 +6485,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -6497,8 +6485,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* isolate_root = BuildLoadIsolateRoot(); Node* isolate_root = BuildLoadIsolateRoot();
Node* thread_in_wasm_flag_address = Node* thread_in_wasm_flag_address =
gasm_->Load(MachineType::Pointer(), isolate_root, gasm_->LoadFromObject(MachineType::Pointer(), isolate_root,
Isolate::thread_in_wasm_flag_address_offset()); Isolate::thread_in_wasm_flag_address_offset());
BuildModifyThreadInWasmFlagHelper(thread_in_wasm_flag_address, new_value); BuildModifyThreadInWasmFlagHelper(thread_in_wasm_flag_address, new_value);
} }
...@@ -6513,8 +6501,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -6513,8 +6501,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* isolate_root = wasm_wrapper_graph_builder_->BuildLoadIsolateRoot(); Node* isolate_root = wasm_wrapper_graph_builder_->BuildLoadIsolateRoot();
thread_in_wasm_flag_address_ = thread_in_wasm_flag_address_ =
gasm->Load(MachineType::Pointer(), isolate_root, gasm->LoadFromObject(MachineType::Pointer(), isolate_root,
Isolate::thread_in_wasm_flag_address_offset()); Isolate::thread_in_wasm_flag_address_offset());
wasm_wrapper_graph_builder_->BuildModifyThreadInWasmFlagHelper( wasm_wrapper_graph_builder_->BuildModifyThreadInWasmFlagHelper(
thread_in_wasm_flag_address_, true); thread_in_wasm_flag_address_, true);
...@@ -6589,13 +6577,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -6589,13 +6577,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* jsval; Node* jsval;
if (sig_->return_count() == 0) { if (sig_->return_count() == 0) {
// We do not use {BuildLoadUndefinedValueFromInstance} here because it jsval = BuildLoadUndefinedValueFromInstance();
// would create an invalid graph.
Node* isolate_root =
LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
jsval = gasm_->Load(
MachineType::Pointer(), isolate_root,
IsolateData::root_slot_offset(RootIndex::kUndefinedValue));
} else if (sig_->return_count() == 1) { } else if (sig_->return_count() == 1) {
jsval = js_wasm_call_data && !js_wasm_call_data->result_needs_conversion() jsval = js_wasm_call_data && !js_wasm_call_data->result_needs_conversion()
? rets[0] ? rets[0]
...@@ -6610,7 +6592,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -6610,7 +6592,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
for (int i = 0; i < return_count; ++i) { for (int i = 0; i < return_count; ++i) {
Node* value = ToJS(rets[i], sig_->GetReturn(i)); Node* value = ToJS(rets[i], sig_->GetReturn(i));
STORE_FIXED_ARRAY_SLOT_ANY(fixed_array, i, value); gasm_->StoreFixedArrayElementAny(fixed_array, i, value);
} }
} }
return jsval; return jsval;
...@@ -6660,10 +6642,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -6660,10 +6642,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kF64: { case wasm::kF64: {
auto done = gasm_->MakeLabel(); auto done = gasm_->MakeLabel();
gasm_->GotoIf(IsSmi(input), &done); gasm_->GotoIf(IsSmi(input), &done);
Node* map = Node* map = gasm_->LoadFromObject(
gasm_->Load(MachineType::TaggedPointer(), input, MachineType::TaggedPointer(), input,
wasm::ObjectAccess::ToTagged(HeapObject::kMapOffset)); wasm::ObjectAccess::ToTagged(HeapObject::kMapOffset));
Node* heap_number_map = LOAD_FULL_POINTER( Node* heap_number_map = gasm_->LoadFullPointer(
BuildLoadIsolateRoot(), BuildLoadIsolateRoot(),
IsolateData::root_slot_offset(RootIndex::kHeapNumberMap)); IsolateData::root_slot_offset(RootIndex::kHeapNumberMap));
Node* is_heap_number = gasm_->WordEqual(heap_number_map, map); Node* is_heap_number = gasm_->WordEqual(heap_number_map, map);
...@@ -6799,9 +6781,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -6799,9 +6781,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* undefined_node) { Node* undefined_node) {
// Check function strict bit. // Check function strict bit.
Node* shared_function_info = gasm_->LoadSharedFunctionInfo(callable_node); Node* shared_function_info = gasm_->LoadSharedFunctionInfo(callable_node);
Node* flags = Node* flags = gasm_->LoadFromObject(
gasm_->Load(MachineType::Int32(), shared_function_info, MachineType::Int32(), shared_function_info,
wasm::ObjectAccess::FlagsOffsetInSharedFunctionInfo()); wasm::ObjectAccess::FlagsOffsetInSharedFunctionInfo());
Node* strict_check = Node* strict_check =
Binop(wasm::kExprI32And, flags, Binop(wasm::kExprI32And, flags,
Int32Constant(SharedFunctionInfo::IsNativeBit::kMask | Int32Constant(SharedFunctionInfo::IsNativeBit::kMask |
...@@ -6812,8 +6794,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -6812,8 +6794,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
BranchHint::kNone); BranchHint::kNone);
Node* old_effect = effect(); Node* old_effect = effect();
SetControl(strict_d.if_false); SetControl(strict_d.if_false);
Node* global_proxy = Node* global_proxy = gasm_->LoadFixedArrayElementPtr(
LOAD_FIXED_ARRAY_SLOT_PTR(native_context, Context::GLOBAL_PROXY_INDEX); native_context, Context::GLOBAL_PROXY_INDEX);
SetEffectControl(strict_d.EffectPhi(old_effect, global_proxy), SetEffectControl(strict_d.EffectPhi(old_effect, global_proxy),
strict_d.merge); strict_d.merge);
return strict_d.Phi(MachineRepresentation::kTagged, undefined_node, return strict_d.Phi(MachineRepresentation::kTagged, undefined_node,
...@@ -6967,7 +6949,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -6967,7 +6949,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
BuildMultiReturnFixedArrayFromIterable(sig_, call, native_context); BuildMultiReturnFixedArrayFromIterable(sig_, call, native_context);
base::SmallVector<Node*, 8> wasm_values(sig_->return_count()); base::SmallVector<Node*, 8> wasm_values(sig_->return_count());
for (unsigned i = 0; i < sig_->return_count(); ++i) { for (unsigned i = 0; i < sig_->return_count(); ++i) {
wasm_values[i] = FromJS(LOAD_FIXED_ARRAY_SLOT_ANY(fixed_array, i), wasm_values[i] = FromJS(gasm_->LoadFixedArrayElementAny(fixed_array, i),
native_context, sig_->GetReturn(i)); native_context, sig_->GetReturn(i));
} }
BuildModifyThreadInWasmFlag(true); BuildModifyThreadInWasmFlag(true);
...@@ -7018,8 +7000,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -7018,8 +7000,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
BuildModifyThreadInWasmFlag(false); BuildModifyThreadInWasmFlag(false);
Node* isolate_root = BuildLoadIsolateRoot(); Node* isolate_root = BuildLoadIsolateRoot();
Node* fp_value = graph()->NewNode(mcgraph()->machine()->LoadFramePointer()); Node* fp_value = graph()->NewNode(mcgraph()->machine()->LoadFramePointer());
STORE_RAW(isolate_root, Isolate::c_entry_fp_offset(), fp_value, gasm_->Store(StoreRepresentation(MachineType::PointerRepresentation(),
MachineType::PointerRepresentation(), kNoWriteBarrier); kNoWriteBarrier),
isolate_root, Isolate::c_entry_fp_offset(), fp_value);
// TODO(jkummerow): Load the address from the {host_data}, and cache // TODO(jkummerow): Load the address from the {host_data}, and cache
// wrappers per signature. // wrappers per signature.
...@@ -7075,6 +7058,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -7075,6 +7058,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm); if (ContainsInt64(sig_)) LowerInt64(kCalledFromWasm);
} }
// TODO(manoskouk): Improve this wrapper to directly using HeapConstants over
// loading from the isolate root.
void BuildJSToJSWrapper(Isolate* isolate) { void BuildJSToJSWrapper(Isolate* isolate) {
int wasm_count = static_cast<int>(sig_->parameter_count()); int wasm_count = static_cast<int>(sig_->parameter_count());
...@@ -7088,8 +7073,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -7088,8 +7073,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Since JS-to-JS wrappers are specific to one Isolate, it is OK to embed // Since JS-to-JS wrappers are specific to one Isolate, it is OK to embed
// values (for undefined and root) directly into the instruction stream. // values (for undefined and root) directly into the instruction stream.
isolate_root_node_ = mcgraph()->IntPtrConstant(isolate->isolate_root()); isolate_root_node_ = mcgraph()->IntPtrConstant(isolate->isolate_root());
undefined_value_node_ = graph()->NewNode(mcgraph()->common()->HeapConstant(
isolate->factory()->undefined_value()));
// Throw a TypeError if the signature is incompatible with JavaScript. // Throw a TypeError if the signature is incompatible with JavaScript.
if (!wasm::IsJSCompatibleSignature(sig_, module_, enabled_features_)) { if (!wasm::IsJSCompatibleSignature(sig_, module_, enabled_features_)) {
...@@ -7101,7 +7084,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -7101,7 +7084,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Load the original callable from the closure. // Load the original callable from the closure.
Node* func_data = gasm_->LoadFunctionDataFromJSFunction(closure); Node* func_data = gasm_->LoadFunctionDataFromJSFunction(closure);
Node* callable = LOAD_TAGGED_ANY( Node* callable = gasm_->LoadAnyTagged(
func_data, func_data,
wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset)); wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset));
...@@ -7147,9 +7130,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -7147,9 +7130,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* result_fixed_array = gasm_->LoadJSArrayElements(jsval); Node* result_fixed_array = gasm_->LoadJSArrayElements(jsval);
for (unsigned i = 0; i < sig_->return_count(); ++i) { for (unsigned i = 0; i < sig_->return_count(); ++i) {
const auto& type = sig_->GetReturn(i); const auto& type = sig_->GetReturn(i);
Node* elem = LOAD_FIXED_ARRAY_SLOT_ANY(fixed_array, i); Node* elem = gasm_->LoadFixedArrayElementAny(fixed_array, i);
Node* cast = ToJS(FromJS(elem, context, type), type); Node* cast = ToJS(FromJS(elem, context, type), type);
STORE_FIXED_ARRAY_SLOT_ANY(result_fixed_array, i, cast); gasm_->StoreFixedArrayElementAny(result_fixed_array, i, cast);
} }
} }
Return(jsval); Return(jsval);
...@@ -7165,9 +7148,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -7165,9 +7148,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* c_entry_fp = Param(CWasmEntryParameters::kCEntryFp); Node* c_entry_fp = Param(CWasmEntryParameters::kCEntryFp);
Node* fp_value = graph()->NewNode(mcgraph()->machine()->LoadFramePointer()); Node* fp_value = graph()->NewNode(mcgraph()->machine()->LoadFramePointer());
STORE_RAW(fp_value, TypedFrameConstants::kFirstPushedFrameValueOffset, gasm_->Store(StoreRepresentation(MachineType::PointerRepresentation(),
c_entry_fp, MachineType::PointerRepresentation(), kNoWriteBarrier),
kNoWriteBarrier); fp_value, TypedFrameConstants::kFirstPushedFrameValueOffset,
c_entry_fp);
int wasm_arg_count = static_cast<int>(sig_->parameter_count()); int wasm_arg_count = static_cast<int>(sig_->parameter_count());
base::SmallVector<Node*, 16> args(wasm_arg_count + 4); base::SmallVector<Node*, 16> args(wasm_arg_count + 4);
...@@ -7233,7 +7217,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -7233,7 +7217,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}; };
Signature<MachineRepresentation> c_entry_sig(1, 4, sig_reps); Signature<MachineRepresentation> c_entry_sig(1, 4, sig_reps);
Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(), Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(),
mcgraph()->common(), mcgraph()->zone(), &c_entry_sig); mcgraph()->common(), gasm_->simplified(),
mcgraph()->zone(), &c_entry_sig);
r.LowerGraph(); r.LowerGraph();
} }
} }
...@@ -7241,7 +7226,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -7241,7 +7226,6 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
private: private:
const wasm::WasmModule* module_; const wasm::WasmModule* module_;
StubCallMode stub_mode_; StubCallMode stub_mode_;
SetOncePointer<Node> undefined_value_node_;
SetOncePointer<const Operator> int32_to_heapnumber_operator_; SetOncePointer<const Operator> int32_to_heapnumber_operator_;
SetOncePointer<const Operator> tagged_non_smi_to_int32_operator_; SetOncePointer<const Operator> tagged_non_smi_to_int32_operator_;
SetOncePointer<const Operator> float32_to_number_operator_; SetOncePointer<const Operator> float32_to_number_operator_;
...@@ -7821,7 +7805,8 @@ bool BuildGraphForWasmFunction(AccountingAllocator* allocator, ...@@ -7821,7 +7805,8 @@ bool BuildGraphForWasmFunction(AccountingAllocator* allocator,
WasmGraphBuilder::kCalledFromWasm); WasmGraphBuilder::kCalledFromWasm);
if (builder.has_simd() && if (builder.has_simd() &&
(!CpuFeatures::SupportsWasmSimd128() || env->lower_simd)) { (!CpuFeatures::SupportsWasmSimd128() || env->lower_simd)) {
SimdScalarLowering(mcgraph, sig).LowerGraph(); SimplifiedOperatorBuilder simplified(mcgraph->zone());
SimdScalarLowering(mcgraph, &simplified, sig).LowerGraph();
// SimdScalarLowering changes all v128 to 4 i32, so update the machine // SimdScalarLowering changes all v128 to 4 i32, so update the machine
// signature for the call to LowerInt64. // signature for the call to LowerInt64.
...@@ -8207,18 +8192,7 @@ AssemblerOptions WasmStubAssemblerOptions() { ...@@ -8207,18 +8192,7 @@ AssemblerOptions WasmStubAssemblerOptions() {
#undef FATAL_UNSUPPORTED_OPCODE #undef FATAL_UNSUPPORTED_OPCODE
#undef WASM_INSTANCE_OBJECT_SIZE #undef WASM_INSTANCE_OBJECT_SIZE
#undef WASM_INSTANCE_OBJECT_OFFSET
#undef LOAD_INSTANCE_FIELD #undef LOAD_INSTANCE_FIELD
#undef LOAD_TAGGED_POINTER
#undef LOAD_TAGGED_ANY
#undef LOAD_FIXED_ARRAY_SLOT
#undef LOAD_FIXED_ARRAY_SLOT_SMI
#undef LOAD_FIXED_ARRAY_SLOT_PTR
#undef LOAD_FIXED_ARRAY_SLOT_ANY
#undef STORE_RAW
#undef STORE_RAW_NODE_OFFSET
#undef STORE_FIXED_ARRAY_SLOT_SMI
#undef STORE_FIXED_ARRAY_SLOT_ANY
} // namespace compiler } // namespace compiler
} // namespace internal } // namespace internal
......
...@@ -726,9 +726,6 @@ class WasmGraphBuilder { ...@@ -726,9 +726,6 @@ class WasmGraphBuilder {
WasmInstanceCacheNodes* instance_cache_ = nullptr; WasmInstanceCacheNodes* instance_cache_ = nullptr;
SetOncePointer<Node> instance_node_; SetOncePointer<Node> instance_node_;
SetOncePointer<Node> ref_null_node_;
SetOncePointer<Node> globals_start_;
SetOncePointer<Node> imported_mutable_globals_;
SetOncePointer<Node> stack_check_code_node_; SetOncePointer<Node> stack_check_code_node_;
SetOncePointer<Node> isolate_root_node_; SetOncePointer<Node> isolate_root_node_;
SetOncePointer<const Operator> stack_check_call_operator_; SetOncePointer<const Operator> stack_check_call_operator_;
......
...@@ -903,6 +903,7 @@ class RuntimeCallTimer final { ...@@ -903,6 +903,7 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssignSpillSlots) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssignSpillSlots) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRangeBundles) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRangeBundles) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRanges) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRanges) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BytecodeGraphBuilder) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, CommitAssignment) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, CommitAssignment) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ConnectRanges) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ConnectRanges) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ControlFlowOptimization) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ControlFlowOptimization) \
...@@ -914,24 +915,21 @@ class RuntimeCallTimer final { ...@@ -914,24 +915,21 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EarlyTrimming) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EarlyTrimming) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EffectLinearization) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EffectLinearization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EscapeAnalysis) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EscapeAnalysis) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterOutputDefinition) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierPopulateReferenceMaps) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterAllocator) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierSpillSlotAllocator) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FinalizeCode) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FinalizeCode) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FrameElision) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FrameElision) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, GenericLowering) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, GenericLowering) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BytecodeGraphBuilder) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Inlining) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Inlining) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmInlining) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, JumpThreading) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, JumpThreading) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierPopulateReferenceMaps) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterAllocator) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterOutputDefinition) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierSpillSlotAllocator) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateGraphTrimming) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateGraphTrimming) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateOptimization) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateOptimization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoadElimination) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoadElimination) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LocateSpillSlots) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LocateSpillSlots) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoopExitElimination) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoopExitElimination) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoopPeeling) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoopPeeling) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmLoopUnrolling) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MachineOperatorOptimization) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MachineOperatorOptimization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MeetRegisterConstraints) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MeetRegisterConstraints) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MemoryOptimization) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MemoryOptimization) \
...@@ -953,7 +951,8 @@ class RuntimeCallTimer final { ...@@ -953,7 +951,8 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Untyper) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Untyper) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, VerifyGraph) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, VerifyGraph) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmBaseOptimization) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmBaseOptimization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmFullOptimization) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmInlining) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmLoopUnrolling) \
\ \
ADD_THREAD_SPECIFIC_COUNTER(V, Parse, ArrowFunctionLiteral) \ ADD_THREAD_SPECIFIC_COUNTER(V, Parse, ArrowFunctionLiteral) \
ADD_THREAD_SPECIFIC_COUNTER(V, Parse, FunctionLiteral) \ ADD_THREAD_SPECIFIC_COUNTER(V, Parse, FunctionLiteral) \
......
...@@ -484,8 +484,8 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode() { ...@@ -484,8 +484,8 @@ Handle<Code> WasmFunctionWrapper::GetWrapperCode() {
for (size_t i = 0; i < num_params + 1; i++) { for (size_t i = 0; i < num_params + 1; i++) {
rep_builder.AddParam(MachineRepresentation::kWord32); rep_builder.AddParam(MachineRepresentation::kWord32);
} }
compiler::Int64Lowering r(graph(), machine(), common(), zone(), compiler::Int64Lowering r(graph(), machine(), common(), simplified(),
rep_builder.Build()); zone(), rep_builder.Build());
r.LowerGraph(); r.LowerGraph();
} }
......
...@@ -32,13 +32,15 @@ class Int64LoweringTest : public GraphTest { ...@@ -32,13 +32,15 @@ class Int64LoweringTest : public GraphTest {
Int64LoweringTest() Int64LoweringTest()
: GraphTest(), : GraphTest(),
machine_(zone(), MachineRepresentation::kWord32, machine_(zone(), MachineRepresentation::kWord32,
MachineOperatorBuilder::Flag::kAllOptionalOps) { MachineOperatorBuilder::Flag::kAllOptionalOps),
simplified_(zone()) {
value_[0] = 0x1234567890ABCDEF; value_[0] = 0x1234567890ABCDEF;
value_[1] = 0x1EDCBA098765432F; value_[1] = 0x1EDCBA098765432F;
value_[2] = 0x1133557799886644; value_[2] = 0x1133557799886644;
} }
MachineOperatorBuilder* machine() { return &machine_; } MachineOperatorBuilder* machine() { return &machine_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
void LowerGraph(Node* node, Signature<MachineRepresentation>* signature) { void LowerGraph(Node* node, Signature<MachineRepresentation>* signature) {
Node* zero = graph()->NewNode(common()->Int32Constant(0)); Node* zero = graph()->NewNode(common()->Int32Constant(0));
...@@ -46,7 +48,8 @@ class Int64LoweringTest : public GraphTest { ...@@ -46,7 +48,8 @@ class Int64LoweringTest : public GraphTest {
graph()->start(), graph()->start()); graph()->start(), graph()->start());
NodeProperties::MergeControlToEnd(graph(), common(), ret); NodeProperties::MergeControlToEnd(graph(), common(), ret);
Int64Lowering lowering(graph(), machine(), common(), zone(), signature); Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
signature);
lowering.LowerGraph(); lowering.LowerGraph();
} }
...@@ -64,7 +67,7 @@ class Int64LoweringTest : public GraphTest { ...@@ -64,7 +67,7 @@ class Int64LoweringTest : public GraphTest {
Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0); Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0);
sig_builder.AddReturn(rep); sig_builder.AddReturn(rep);
Int64Lowering lowering(graph(), machine(), common(), zone(), Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
sig_builder.Build(), std::move(special_case)); sig_builder.Build(), std::move(special_case));
lowering.LowerGraph(); lowering.LowerGraph();
} }
...@@ -134,6 +137,7 @@ class Int64LoweringTest : public GraphTest { ...@@ -134,6 +137,7 @@ class Int64LoweringTest : public GraphTest {
private: private:
MachineOperatorBuilder machine_; MachineOperatorBuilder machine_;
SimplifiedOperatorBuilder simplified_;
int64_t value_[3]; int64_t value_[3];
}; };
...@@ -177,22 +181,29 @@ TEST_F(Int64LoweringTest, Int64Constant) { ...@@ -177,22 +181,29 @@ TEST_F(Int64LoweringTest, Int64Constant) {
start())); start()));
#endif #endif
#define INT64_LOAD_LOWERING(kLoad) \ #define INT64_LOAD_LOWERING(kLoad, param, builder) \
int32_t base = 0x1234; \ int32_t base = 0x1234; \
int32_t index = 0x5678; \ int32_t index = 0x5678; \
\ \
LowerGraph(graph()->NewNode(machine()->kLoad(MachineType::Int64()), \ LowerGraph(graph()->NewNode(builder()->kLoad(param), Int32Constant(base), \
Int32Constant(base), Int32Constant(index), \ Int32Constant(index), start(), start()), \
start(), start()), \ MachineRepresentation::kWord64); \
MachineRepresentation::kWord64); \ \
\ Capture<Node*> high_word_load; \
Capture<Node*> high_word_load; \
LOAD_VERIFY(kLoad) LOAD_VERIFY(kLoad)
TEST_F(Int64LoweringTest, Int64Load) { INT64_LOAD_LOWERING(Load); } TEST_F(Int64LoweringTest, Int64Load) {
INT64_LOAD_LOWERING(Load, MachineType::Int64(), machine);
}
TEST_F(Int64LoweringTest, UnalignedInt64Load) { TEST_F(Int64LoweringTest, UnalignedInt64Load) {
INT64_LOAD_LOWERING(UnalignedLoad); INT64_LOAD_LOWERING(UnalignedLoad, MachineType::Int64(), machine);
}
TEST_F(Int64LoweringTest, Int64LoadFromObject) {
INT64_LOAD_LOWERING(LoadFromObject,
ObjectAccess(MachineType::Int64(), kNoWriteBarrier),
simplified);
} }
#if defined(V8_TARGET_LITTLE_ENDIAN) #if defined(V8_TARGET_LITTLE_ENDIAN)
...@@ -225,7 +236,7 @@ TEST_F(Int64LoweringTest, UnalignedInt64Load) { ...@@ -225,7 +236,7 @@ TEST_F(Int64LoweringTest, UnalignedInt64Load) {
start())); start()));
#endif #endif
#define INT64_STORE_LOWERING(kStore, kRep32, kRep64) \ #define INT64_STORE_LOWERING(kStore, kRep32, kRep64, builder) \
int32_t base = 1111; \ int32_t base = 1111; \
int32_t index = 2222; \ int32_t index = 2222; \
int32_t return_value = 0x5555; \ int32_t return_value = 0x5555; \
...@@ -233,7 +244,7 @@ TEST_F(Int64LoweringTest, UnalignedInt64Load) { ...@@ -233,7 +244,7 @@ TEST_F(Int64LoweringTest, UnalignedInt64Load) {
Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0); \ Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0); \
sig_builder.AddReturn(MachineRepresentation::kWord32); \ sig_builder.AddReturn(MachineRepresentation::kWord32); \
\ \
Node* store = graph()->NewNode(machine()->kStore(kRep64), \ Node* store = graph()->NewNode(builder()->kStore(kRep64), \
Int32Constant(base), Int32Constant(index), \ Int32Constant(base), Int32Constant(index), \
Int64Constant(value(0)), start(), start()); \ Int64Constant(value(0)), start(), start()); \
\ \
...@@ -243,7 +254,7 @@ TEST_F(Int64LoweringTest, UnalignedInt64Load) { ...@@ -243,7 +254,7 @@ TEST_F(Int64LoweringTest, UnalignedInt64Load) {
\ \
NodeProperties::MergeControlToEnd(graph(), common(), ret); \ NodeProperties::MergeControlToEnd(graph(), common(), ret); \
\ \
Int64Lowering lowering(graph(), machine(), common(), zone(), \ Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(), \
sig_builder.Build()); \ sig_builder.Build()); \
lowering.LowerGraph(); \ lowering.LowerGraph(); \
\ \
...@@ -254,7 +265,7 @@ TEST_F(Int64LoweringTest, Int64Store) { ...@@ -254,7 +265,7 @@ TEST_F(Int64LoweringTest, Int64Store) {
WriteBarrierKind::kNoWriteBarrier); WriteBarrierKind::kNoWriteBarrier);
const StoreRepresentation rep32(MachineRepresentation::kWord32, const StoreRepresentation rep32(MachineRepresentation::kWord32,
WriteBarrierKind::kNoWriteBarrier); WriteBarrierKind::kNoWriteBarrier);
INT64_STORE_LOWERING(Store, rep32, rep64); INT64_STORE_LOWERING(Store, rep32, rep64, machine);
} }
TEST_F(Int64LoweringTest, Int32Store) { TEST_F(Int64LoweringTest, Int32Store) {
...@@ -277,7 +288,7 @@ TEST_F(Int64LoweringTest, Int32Store) { ...@@ -277,7 +288,7 @@ TEST_F(Int64LoweringTest, Int32Store) {
NodeProperties::MergeControlToEnd(graph(), common(), ret); NodeProperties::MergeControlToEnd(graph(), common(), ret);
Int64Lowering lowering(graph(), machine(), common(), zone(), Int64Lowering lowering(graph(), machine(), common(), simplified(), zone(),
sig_builder.Build()); sig_builder.Build());
lowering.LowerGraph(); lowering.LowerGraph();
...@@ -292,7 +303,13 @@ TEST_F(Int64LoweringTest, Int32Store) { ...@@ -292,7 +303,13 @@ TEST_F(Int64LoweringTest, Int32Store) {
TEST_F(Int64LoweringTest, Int64UnalignedStore) { TEST_F(Int64LoweringTest, Int64UnalignedStore) {
const UnalignedStoreRepresentation rep64(MachineRepresentation::kWord64); const UnalignedStoreRepresentation rep64(MachineRepresentation::kWord64);
const UnalignedStoreRepresentation rep32(MachineRepresentation::kWord32); const UnalignedStoreRepresentation rep32(MachineRepresentation::kWord32);
INT64_STORE_LOWERING(UnalignedStore, rep32, rep64); INT64_STORE_LOWERING(UnalignedStore, rep32, rep64, machine);
}
TEST_F(Int64LoweringTest, Int64StoreToObject) {
const ObjectAccess access64(MachineType::Int64(), kNoWriteBarrier);
const ObjectAccess access32(MachineType::Int32(), kNoWriteBarrier);
INT64_STORE_LOWERING(StoreToObject, access32, access64, simplified);
} }
TEST_F(Int64LoweringTest, Int64And) { TEST_F(Int64LoweringTest, Int64And) {
......
...@@ -1153,10 +1153,10 @@ LOAD_MATCHER(UnalignedLoad) ...@@ -1153,10 +1153,10 @@ LOAD_MATCHER(UnalignedLoad)
LOAD_MATCHER(PoisonedLoad) LOAD_MATCHER(PoisonedLoad)
LOAD_MATCHER(LoadFromObject) LOAD_MATCHER(LoadFromObject)
#define STORE_MATCHER(kStore) \ #define STORE_MATCHER(kStore, representation) \
class Is##kStore##Matcher final : public TestNodeMatcher { \ class Is##kStore##Matcher final : public TestNodeMatcher { \
public: \ public: \
Is##kStore##Matcher(const Matcher<kStore##Representation>& rep_matcher, \ Is##kStore##Matcher(const Matcher<representation>& rep_matcher, \
const Matcher<Node*>& base_matcher, \ const Matcher<Node*>& base_matcher, \
const Matcher<Node*>& index_matcher, \ const Matcher<Node*>& index_matcher, \
const Matcher<Node*>& value_matcher, \ const Matcher<Node*>& value_matcher, \
...@@ -1198,9 +1198,8 @@ LOAD_MATCHER(LoadFromObject) ...@@ -1198,9 +1198,8 @@ LOAD_MATCHER(LoadFromObject)
control_node = NodeProperties::GetControlInput(node); \ control_node = NodeProperties::GetControlInput(node); \
} \ } \
return (TestNodeMatcher::MatchAndExplain(node, listener) && \ return (TestNodeMatcher::MatchAndExplain(node, listener) && \
PrintMatchAndExplain( \ PrintMatchAndExplain(OpParameter<representation>(node->op()), \
OpParameter<kStore##Representation>(node->op()), "rep", \ "rep", rep_matcher_, listener) && \
rep_matcher_, listener) && \
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), \ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), \
"base", base_matcher_, listener) && \ "base", base_matcher_, listener) && \
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), \ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), \
...@@ -1214,7 +1213,7 @@ LOAD_MATCHER(LoadFromObject) ...@@ -1214,7 +1213,7 @@ LOAD_MATCHER(LoadFromObject)
} \ } \
\ \
private: \ private: \
const Matcher<kStore##Representation> rep_matcher_; \ const Matcher<representation> rep_matcher_; \
const Matcher<Node*> base_matcher_; \ const Matcher<Node*> base_matcher_; \
const Matcher<Node*> index_matcher_; \ const Matcher<Node*> index_matcher_; \
const Matcher<Node*> value_matcher_; \ const Matcher<Node*> value_matcher_; \
...@@ -1222,8 +1221,9 @@ LOAD_MATCHER(LoadFromObject) ...@@ -1222,8 +1221,9 @@ LOAD_MATCHER(LoadFromObject)
const Matcher<Node*> control_matcher_; \ const Matcher<Node*> control_matcher_; \
}; };
STORE_MATCHER(Store) STORE_MATCHER(Store, StoreRepresentation)
STORE_MATCHER(UnalignedStore) STORE_MATCHER(UnalignedStore, UnalignedStoreRepresentation)
STORE_MATCHER(StoreToObject, ObjectAccess)
class IsStackSlotMatcher final : public TestNodeMatcher { class IsStackSlotMatcher final : public TestNodeMatcher {
public: public:
...@@ -2117,6 +2117,17 @@ Matcher<Node*> IsUnalignedStore( ...@@ -2117,6 +2117,17 @@ Matcher<Node*> IsUnalignedStore(
control_matcher)); control_matcher));
} }
Matcher<Node*> IsStoreToObject(const Matcher<ObjectAccess>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
return MakeMatcher(new IsStoreToObjectMatcher(
rep_matcher, base_matcher, index_matcher, value_matcher, effect_matcher,
control_matcher));
}
Matcher<Node*> IsStackSlot( Matcher<Node*> IsStackSlot(
const Matcher<StackSlotRepresentation>& rep_matcher) { const Matcher<StackSlotRepresentation>& rep_matcher) {
return MakeMatcher(new IsStackSlotMatcher(rep_matcher)); return MakeMatcher(new IsStackSlotMatcher(rep_matcher));
......
...@@ -354,6 +354,12 @@ Matcher<Node*> IsUnalignedStore( ...@@ -354,6 +354,12 @@ Matcher<Node*> IsUnalignedStore(
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher, const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher, const Matcher<Node*>& effect_matcher, const Matcher<Node*>& value_matcher, const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher); const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStoreToObject(const Matcher<ObjectAccess>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStackSlot(const Matcher<StackSlotRepresentation>& rep_matcher); Matcher<Node*> IsStackSlot(const Matcher<StackSlotRepresentation>& rep_matcher);
Matcher<Node*> IsWord32Popcnt(const Matcher<Node*>& value_matcher); Matcher<Node*> IsWord32Popcnt(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher, Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment