Commit b659a0ac authored by bmeurer's avatar bmeurer Committed by Commit bot

[turbofan] Add support for copy-on-write element stores.

This extends JSNativeContextSpecialization with support for stores to
fast object/smi element backing stores that are marked as copy-on-write.
In this case we first call the CopyFixedArray builtin to take a copy of
the elements backing store, and then store the new elements back to the
object, and finally perform the actual element store.

R=epertoso@chromium.org
BUG=v8:4470

Committed: https://crrev.com/ac98ad22f049a59c48387f1bab1590f135d219c6
Review-Url: https://codereview.chromium.org/2218703003
Cr-Original-Commit-Position: refs/heads/master@{#38370}
Cr-Commit-Position: refs/heads/master@{#38392}
parent e144335f
......@@ -4,7 +4,7 @@
#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
#include "src/interface-descriptors.h"
#include "src/macro-assembler.h"
namespace v8 {
......@@ -50,5 +50,132 @@ void Builtins::Generate_StackCheck(MacroAssembler* masm) {
masm->TailCallRuntime(Runtime::kStackGuard);
}
// -----------------------------------------------------------------------------
// FixedArray helpers.
void Builtins::Generate_CopyFixedArray(CodeStubAssembler* assembler) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef CodeStubAssembler::Variable Variable;
typedef CopyFixedArrayDescriptor Descriptor;
Node* source = assembler->Parameter(Descriptor::kSource);
// Load the {source} length.
Node* source_length_tagged =
assembler->LoadObjectField(source, FixedArray::kLengthOffset);
Node* source_length = assembler->SmiToWord(source_length_tagged);
// Compute the size of {source} in bytes.
Node* source_size = assembler->IntPtrAdd(
assembler->WordShl(source_length,
assembler->IntPtrConstant(kPointerSizeLog2)),
assembler->IntPtrConstant(FixedArray::kHeaderSize));
// Check if we can allocate in new space.
Label if_newspace(assembler), if_oldspace(assembler);
assembler->Branch(assembler->UintPtrLessThan(
source_size, assembler->IntPtrConstant(
Page::kMaxRegularHeapObjectSize)),
&if_newspace, &if_oldspace);
assembler->Bind(&if_newspace);
{
// Allocate the targeting FixedArray in new space.
Node* target = assembler->Allocate(source_size);
assembler->StoreMapNoWriteBarrier(
target, assembler->LoadRoot(Heap::kFixedArrayMapRootIndex));
assembler->StoreObjectFieldNoWriteBarrier(target, FixedArray::kLengthOffset,
source_length_tagged);
// Compute the limit.
Node* limit = assembler->IntPtrSub(
source_size, assembler->IntPtrConstant(kHeapObjectTag));
// Copy the {source} to the {target}.
Variable var_offset(assembler, MachineType::PointerRepresentation());
Label loop(assembler, &var_offset), done_loop(assembler);
var_offset.Bind(
assembler->IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag));
assembler->Goto(&loop);
assembler->Bind(&loop);
{
// Determine the current {offset}.
Node* offset = var_offset.value();
// Check if we are done.
assembler->GotoUnless(assembler->UintPtrLessThan(offset, limit),
&done_loop);
// Load the value from {source}.
Node* value = assembler->Load(MachineType::AnyTagged(), source, offset);
// Store the {value} to the {target} without a write barrier, since we
// know that the {target} is allocated in new space.
assembler->StoreNoWriteBarrier(MachineRepresentation::kTagged, target,
offset, value);
// Increment {offset} and continue.
var_offset.Bind(assembler->IntPtrAdd(
offset, assembler->IntPtrConstant(kPointerSize)));
assembler->Goto(&loop);
}
assembler->Bind(&done_loop);
assembler->Return(target);
}
assembler->Bind(&if_oldspace);
{
// Allocate the targeting FixedArray in old space
// (maybe even in large object space).
Node* flags = assembler->SmiConstant(
Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE)));
Node* source_size_tagged = assembler->SmiFromWord(source_size);
Node* target = assembler->CallRuntime(Runtime::kAllocateInTargetSpace,
assembler->NoContextConstant(),
source_size_tagged, flags);
assembler->StoreMapNoWriteBarrier(
target, assembler->LoadRoot(Heap::kFixedArrayMapRootIndex));
assembler->StoreObjectFieldNoWriteBarrier(target, FixedArray::kLengthOffset,
source_length_tagged);
// Compute the limit.
Node* limit = assembler->IntPtrSub(
source_size, assembler->IntPtrConstant(kHeapObjectTag));
// Copy the {source} to the {target}.
Variable var_offset(assembler, MachineType::PointerRepresentation());
Label loop(assembler, &var_offset), done_loop(assembler);
var_offset.Bind(
assembler->IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag));
assembler->Goto(&loop);
assembler->Bind(&loop);
{
// Determine the current {offset}.
Node* offset = var_offset.value();
// Check if we are done.
assembler->GotoUnless(assembler->UintPtrLessThan(offset, limit),
&done_loop);
// Load the value from {source}.
Node* value = assembler->Load(MachineType::AnyTagged(), source, offset);
// Store the {value} to the {target} with a proper write barrier.
assembler->Store(MachineRepresentation::kTagged, target, offset, value);
// Increment {offset} and continue.
var_offset.Bind(assembler->IntPtrAdd(
offset, assembler->IntPtrConstant(kPointerSize)));
assembler->Goto(&loop);
}
assembler->Bind(&done_loop);
assembler->Return(target);
}
}
} // namespace internal
} // namespace v8
......@@ -159,6 +159,9 @@ namespace internal {
ASM(AllocateInNewSpace) \
ASM(AllocateInOldSpace) \
\
/* FixedArray helpers */ \
TFS(CopyFixedArray, BUILTIN, kNoExtraICState, CopyFixedArray) \
\
/* Debugger */ \
DBG(FrameDropper_LiveEdit) \
DBG(Return_DebugBreak) \
......
......@@ -509,6 +509,12 @@ Callable CodeFactory::FastNewStrictArguments(Isolate* isolate,
return make_callable(stub);
}
// static
Callable CodeFactory::CopyFixedArray(Isolate* isolate) {
return Callable(isolate->builtins()->CopyFixedArray(),
CopyFixedArrayDescriptor(isolate));
}
// static
Callable CodeFactory::AllocateHeapNumber(Isolate* isolate) {
AllocateHeapNumberStub stub(isolate);
......
......@@ -136,6 +136,8 @@ class CodeFactory final {
static Callable FastNewStrictArguments(Isolate* isolate,
bool skip_stub_frame = false);
static Callable CopyFixedArray(Isolate* isolate);
static Callable AllocateHeapNumber(Isolate* isolate);
#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
static Callable Allocate##Type(Isolate* isolate);
......
......@@ -723,6 +723,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kPlainPrimitiveToFloat64:
state = LowerPlainPrimitiveToFloat64(node, *effect, *control);
break;
case IrOpcode::kEnsureWritableFastElements:
state = LowerEnsureWritableFastElements(node, *effect, *control);
break;
case IrOpcode::kTransitionElementsKind:
state = LowerTransitionElementsKind(node, *effect, *control);
break;
......@@ -2599,6 +2602,59 @@ EffectControlLinearizer::LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
return ValueEffectControl(value, effect, control);
}
EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node,
Node* effect,
Node* control) {
Node* object = node->InputAt(0);
Node* elements = node->InputAt(1);
// Load the current map of {elements}.
Node* elements_map = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
elements, effect, control);
// Check if {elements} is not a copy-on-write FixedArray.
Node* check = graph()->NewNode(machine()->WordEqual(), elements_map,
jsgraph()->FixedArrayMapConstant());
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
// Nothing to do if the {elements} are not copy-on-write.
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
Node* vtrue = elements;
// We need to take a copy of the {elements} and set them up for {object}.
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* efalse = effect;
Node* vfalse;
{
// We need to create a copy of the {elements} for {object}.
Operator::Properties properties = Operator::kEliminatable;
Callable callable = CodeFactory::CopyFixedArray(isolate());
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags,
properties);
vfalse = efalse = graph()->NewNode(
common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
elements, jsgraph()->NoContextConstant(), efalse);
// Store the new {elements} into {object}.
efalse = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForJSObjectElements()), object,
vfalse, efalse, if_false);
}
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
Node* value = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
return ValueEffectControl(value, effect, control);
}
EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerTransitionElementsKind(Node* node, Node* effect,
Node* control) {
......
......@@ -136,6 +136,8 @@ class EffectControlLinearizer {
Node* control);
ValueEffectControl LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
Node* control);
ValueEffectControl LowerEnsureWritableFastElements(Node* node, Node* effect,
Node* control);
ValueEffectControl LowerTransitionElementsKind(Node* node, Node* effect,
Node* control);
ValueEffectControl LowerLoadTypedElement(Node* node, Node* effect,
......
......@@ -52,6 +52,11 @@ Node* JSGraph::EmptyStringConstant() {
return CACHED(kEmptyStringConstant, HeapConstant(factory()->empty_string()));
}
Node* JSGraph::FixedArrayMapConstant() {
return CACHED(kFixedArrayMapConstant,
HeapConstant(factory()->fixed_array_map()));
}
Node* JSGraph::HeapNumberMapConstant() {
return CACHED(kHeapNumberMapConstant,
HeapConstant(factory()->heap_number_map()));
......
......@@ -46,6 +46,7 @@ class JSGraph : public ZoneObject {
Node* EmptyFixedArrayConstant();
Node* EmptyLiteralsArrayConstant();
Node* EmptyStringConstant();
Node* FixedArrayMapConstant();
Node* HeapNumberMapConstant();
Node* OptimizedOutConstant();
Node* StaleRegisterConstant();
......@@ -156,6 +157,7 @@ class JSGraph : public ZoneObject {
kEmptyFixedArrayConstant,
kEmptyLiteralsArrayConstant,
kEmptyStringConstant,
kFixedArrayMapConstant,
kHeapNumberMapConstant,
kOptimizedOutConstant,
kStaleRegisterConstant,
......
......@@ -428,6 +428,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// TODO(bmeurer): Add support for non-standard stores.
if (store_mode != STANDARD_STORE &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW &&
store_mode != STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
return NoChange();
}
......@@ -960,10 +961,11 @@ JSNativeContextSpecialization::BuildElementAccess(
// Don't try to store to a copy-on-write backing store.
if (access_mode == AccessMode::kStore &&
IsFastSmiOrObjectElementsKind(elements_kind)) {
effect = graph()->NewNode(
simplified()->CheckMaps(1), elements,
jsgraph()->HeapConstant(factory()->fixed_array_map()), effect, control);
IsFastSmiOrObjectElementsKind(elements_kind) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
effect =
graph()->NewNode(simplified()->CheckMaps(1), elements,
jsgraph()->FixedArrayMapConstant(), effect, control);
}
if (IsFixedTypedArrayElementsKind(elements_kind)) {
......@@ -1067,7 +1069,8 @@ JSNativeContextSpecialization::BuildElementAccess(
}
} else {
// TODO(turbofan): Add support for additional store modes.
DCHECK_EQ(STANDARD_STORE, store_mode);
DCHECK(store_mode == STANDARD_STORE ||
store_mode == STORE_NO_TRANSITION_HANDLE_COW);
// Load the length of the {receiver}.
Node* length = effect =
......@@ -1147,6 +1150,16 @@ JSNativeContextSpecialization::BuildElementAccess(
// Make sure we do not store signalling NaNs into double arrays.
value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
}
// Ensure that copy-on-write backing store is writable.
if (IsFastSmiOrObjectElementsKind(elements_kind) &&
store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
elements = effect =
graph()->NewNode(simplified()->EnsureWritableFastElements(),
receiver, elements, effect, control);
}
// Perform the actual element access.
effect = graph()->NewNode(simplified()->StoreElement(element_access),
elements, index, value, effect, control);
}
......
......@@ -4,6 +4,7 @@
#include "src/compiler/load-elimination.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
......@@ -53,6 +54,8 @@ Reduction LoadElimination::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kCheckMaps:
return ReduceCheckMaps(node);
case IrOpcode::kEnsureWritableFastElements:
return ReduceEnsureWritableFastElements(node);
case IrOpcode::kTransitionElementsKind:
return ReduceTransitionElementsKind(node);
case IrOpcode::kLoadField:
......@@ -326,6 +329,29 @@ Reduction LoadElimination::ReduceCheckMaps(Node* node) {
return UpdateState(node, state);
}
Reduction LoadElimination::ReduceEnsureWritableFastElements(Node* node) {
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const elements = NodeProperties::GetValueInput(node, 1);
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
if (Node* const elements_map = state->LookupField(elements, 0)) {
// Check if the {elements} already have the fixed array map.
if (elements_map == fixed_array_map) {
ReplaceWithValue(node, elements, effect);
return Replace(elements);
}
}
// We know that the resulting elements have the fixed array map.
state = state->AddField(node, 0, fixed_array_map, zone());
// Kill the previous elements on {object}.
state = state->KillField(object, 2, zone());
// Add the new elements on {object}.
state = state->AddField(object, 2, node, zone());
return UpdateState(node, state);
}
Reduction LoadElimination::ReduceTransitionElementsKind(Node* node) {
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const source_map = NodeProperties::GetValueInput(node, 1);
......@@ -552,6 +578,19 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
visited.insert(current);
if (!current->op()->HasProperty(Operator::kNoWrite)) {
switch (current->opcode()) {
case IrOpcode::kEnsureWritableFastElements: {
Node* const object = NodeProperties::GetValueInput(current, 0);
Node* const elements = NodeProperties::GetValueInput(current, 1);
state = state->KillField(elements, 0, zone());
state = state->KillField(object, 2, zone());
break;
}
case IrOpcode::kTransitionElementsKind: {
Node* const object = NodeProperties::GetValueInput(current, 0);
state = state->KillField(object, 0, zone());
state = state->KillField(object, 2, zone());
break;
}
case IrOpcode::kStoreField: {
FieldAccess const& access = FieldAccessOf(current->op());
Node* const object = NodeProperties::GetValueInput(current, 0);
......@@ -566,6 +605,11 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
state = state->KillElement(object, index, zone());
break;
}
case IrOpcode::kStoreBuffer:
case IrOpcode::kStoreTypedElement: {
// Doesn't affect anything we track with the state currently.
break;
}
default:
return empty_state();
}
......
......@@ -13,11 +13,12 @@ namespace compiler {
// Foward declarations.
struct FieldAccess;
class JSGraph;
class LoadElimination final : public AdvancedReducer {
public:
LoadElimination(Editor* editor, Zone* zone)
: AdvancedReducer(editor), node_states_(zone) {}
LoadElimination(Editor* editor, JSGraph* jsgraph, Zone* zone)
: AdvancedReducer(editor), node_states_(zone), jsgraph_(jsgraph) {}
~LoadElimination() final {}
Reduction Reduce(Node* node) final;
......@@ -150,6 +151,7 @@ class LoadElimination final : public AdvancedReducer {
};
Reduction ReduceCheckMaps(Node* node);
Reduction ReduceEnsureWritableFastElements(Node* node);
Reduction ReduceTransitionElementsKind(Node* node);
Reduction ReduceLoadField(Node* node);
Reduction ReduceStoreField(Node* node);
......@@ -168,10 +170,12 @@ class LoadElimination final : public AdvancedReducer {
static int FieldIndexOf(FieldAccess const& access);
AbstractState const* empty_state() const { return &empty_state_; }
JSGraph* jsgraph() const { return jsgraph_; }
Zone* zone() const { return node_states_.zone(); }
AbstractState const empty_state_;
AbstractStateForEffectNodes node_states_;
JSGraph* const jsgraph_;
DISALLOW_COPY_AND_ASSIGN(LoadElimination);
};
......
......@@ -305,6 +305,7 @@
V(ObjectIsSmi) \
V(ObjectIsString) \
V(ObjectIsUndetectable) \
V(EnsureWritableFastElements) \
V(TransitionElementsKind)
#define SIMPLIFIED_OP_LIST(V) \
......
......@@ -615,9 +615,10 @@ PipelineCompilationJob::Status PipelineCompilationJob::CreateGraphImpl() {
if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
}
// TODO(mstarzinger): Hack to ensure that the ToNumber call descriptor is
// TODO(mstarzinger): Hack to ensure that certain call descriptors are
// initialized on the main thread, since it is needed off-thread by the
// effect control linearizer.
CodeFactory::CopyFixedArray(info()->isolate());
CodeFactory::ToNumber(info()->isolate());
linkage_ = new (&zone_) Linkage(Linkage::ComputeIncoming(&zone_, info()));
......@@ -1074,7 +1075,8 @@ struct LoadEliminationPhase {
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
LoadElimination load_elimination(&graph_reducer, temp_zone);
LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
......
......@@ -2386,6 +2386,9 @@ class RepresentationSelector {
VisitInputs(node);
return SetOutput(node, MachineRepresentation::kNone);
}
case IrOpcode::kEnsureWritableFastElements:
return VisitBinop(node, UseInfo::AnyTagged(),
MachineRepresentation::kTagged);
//------------------------------------------------------------------
// Machine-level operators.
......
......@@ -503,6 +503,16 @@ struct SimplifiedOperatorGlobalCache final {
AllocateOperator<NOT_TENURED> kAllocateNotTenuredOperator;
AllocateOperator<TENURED> kAllocateTenuredOperator;
struct EnsureWritableFastElementsOperator final : public Operator {
EnsureWritableFastElementsOperator()
: Operator( // --
IrOpcode::kEnsureWritableFastElements, // opcode
Operator::kNoDeopt | Operator::kNoThrow, // flags
"EnsureWritableFastElements", // name
2, 1, 1, 1, 1, 0) {} // counts
};
EnsureWritableFastElementsOperator kEnsureWritableFastElements;
#define BUFFER_ACCESS(Type, type, TYPE, ctype, size) \
struct LoadBuffer##Type##Operator final : public Operator1<BufferAccess> { \
LoadBuffer##Type##Operator() \
......@@ -618,6 +628,10 @@ const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
"ReferenceEqual", 2, 0, 0, 1, 0, 0);
}
const Operator* SimplifiedOperatorBuilder::EnsureWritableFastElements() {
return &cache_.kEnsureWritableFastElements;
}
const Operator* SimplifiedOperatorBuilder::TransitionElementsKind(
ElementsTransition transition) {
return new (zone()) Operator1<ElementsTransition>( // --
......
......@@ -148,6 +148,8 @@ enum class ElementsTransition : uint8_t {
kSlowTransition // full transition, round-trip to the runtime.
};
size_t hash_value(ElementsTransition);
std::ostream& operator<<(std::ostream&, ElementsTransition);
ElementsTransition ElementsTransitionOf(const Operator* op) WARN_UNUSED_RESULT;
......@@ -311,6 +313,9 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* ObjectIsString();
const Operator* ObjectIsUndetectable();
// ensure-writable-fast-elements object, elements
const Operator* EnsureWritableFastElements();
// transition-elements-kind object, from-map, to-map
const Operator* TransitionElementsKind(ElementsTransition transition);
......
......@@ -722,6 +722,10 @@ Type* Typer::Visitor::TypeLoopExitEffect(Node* node) {
return nullptr;
}
Type* Typer::Visitor::TypeEnsureWritableFastElements(Node* node) {
return Operand(node, 1);
}
Type* Typer::Visitor::TypeTransitionElementsKind(Node* node) {
UNREACHABLE();
return nullptr;
......
......@@ -851,6 +851,11 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::PlainNumber());
CheckUpperIs(node, Type::TaggedPointer());
break;
case IrOpcode::kEnsureWritableFastElements:
CheckValueInputIs(node, 0, Type::Any());
CheckValueInputIs(node, 1, Type::Internal());
CheckUpperIs(node, Type::Internal());
break;
case IrOpcode::kTransitionElementsKind:
CheckValueInputIs(node, 0, Type::Any());
CheckValueInputIs(node, 1, Type::Internal());
......
......@@ -55,6 +55,7 @@ class PlatformInterfaceDescriptor;
V(ConstructStub) \
V(ConstructTrampoline) \
V(RegExpConstructResult) \
V(CopyFixedArray) \
V(TransitionElementsKind) \
V(AllocateHeapNumber) \
V(AllocateFloat32x4) \
......@@ -648,6 +649,12 @@ class StoreGlobalViaContextDescriptor : public CallInterfaceDescriptor {
static const Register ValueRegister();
};
class CopyFixedArrayDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kSource)
DECLARE_DEFAULT_DESCRIPTOR(CopyFixedArrayDescriptor, CallInterfaceDescriptor,
kParameterCount)
};
class TransitionElementsKindDescriptor : public CallInterfaceDescriptor {
public:
......
......@@ -4,6 +4,7 @@
#include "src/compiler/load-elimination.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
#include "test/unittests/compiler/graph-reducer-unittest.h"
......@@ -20,14 +21,20 @@ namespace compiler {
class LoadEliminationTest : public TypedGraphTest {
public:
LoadEliminationTest() : TypedGraphTest(3), simplified_(zone()) {}
LoadEliminationTest()
: TypedGraphTest(3),
simplified_(zone()),
jsgraph_(isolate(), graph(), common(), nullptr, simplified(), nullptr) {
}
~LoadEliminationTest() override {}
protected:
JSGraph* jsgraph() { return &jsgraph_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
private:
SimplifiedOperatorBuilder simplified_;
JSGraph jsgraph_;
};
TEST_F(LoadEliminationTest, LoadElementAndLoadElement) {
......@@ -39,7 +46,7 @@ TEST_F(LoadEliminationTest, LoadElementAndLoadElement) {
MachineType::AnyTagged(), kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
LoadElimination load_elimination(&editor, zone());
LoadElimination load_elimination(&editor, jsgraph(), zone());
load_elimination.Reduce(graph()->start());
......@@ -65,7 +72,7 @@ TEST_F(LoadEliminationTest, StoreElementAndLoadElement) {
MachineType::AnyTagged(), kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
LoadElimination load_elimination(&editor, zone());
LoadElimination load_elimination(&editor, jsgraph(), zone());
load_elimination.Reduce(graph()->start());
......@@ -92,7 +99,7 @@ TEST_F(LoadEliminationTest, StoreElementAndStoreFieldAndLoadElement) {
MachineType::AnyTagged(), kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
LoadElimination load_elimination(&editor, zone());
LoadElimination load_elimination(&editor, jsgraph(), zone());
load_elimination.Reduce(graph()->start());
......@@ -126,7 +133,7 @@ TEST_F(LoadEliminationTest, LoadFieldAndLoadField) {
kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
LoadElimination load_elimination(&editor, zone());
LoadElimination load_elimination(&editor, jsgraph(), zone());
load_elimination.Reduce(graph()->start());
......@@ -155,7 +162,7 @@ TEST_F(LoadEliminationTest, StoreFieldAndLoadField) {
kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
LoadElimination load_elimination(&editor, zone());
LoadElimination load_elimination(&editor, jsgraph(), zone());
load_elimination.Reduce(graph()->start());
......@@ -185,7 +192,7 @@ TEST_F(LoadEliminationTest, StoreFieldAndStoreElementAndLoadField) {
kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
LoadElimination load_elimination(&editor, zone());
LoadElimination load_elimination(&editor, jsgraph(), zone());
load_elimination.Reduce(graph()->start());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment