Commit c0c214da authored by rmcilroy's avatar rmcilroy Committed by Commit bot

[Interpreter] Add support for loading from / storing to outer context variables.

Adds support for loading from and storing to outer context
variables. Also adds support for declaring functions on contexts and
locals. Finally, fixes a couple of issues with StaContextSlot where
we weren't emitting the write barrier and therefore would crash in the
GC.

Also added code so that --print-bytecode will output the
function name before the bytecodes, and replaces MachineType with StoreRepresentation in RawMachineAssembler::Store and updates tests.

BUG=v8:4280
LOG=N

Review URL: https://codereview.chromium.org/1425633002

Cr-Commit-Position: refs/heads/master@{#31584}
parent 2a4f5a64
......@@ -125,8 +125,9 @@ Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
return raw_assembler_->Store(kMachAnyTagged, RegisterFileRawPointer(),
RegisterFrameOffset(reg_index), value);
return raw_assembler_->Store(
StoreRepresentation(kMachAnyTagged, kNoWriteBarrier),
RegisterFileRawPointer(), RegisterFrameOffset(reg_index), value);
}
......@@ -316,7 +317,9 @@ Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
Node* offset =
IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
Int32Constant(Context::kHeaderSize - kHeapObjectTag));
return raw_assembler_->Store(kMachAnyTagged, context, offset, value);
return raw_assembler_->Store(
StoreRepresentation(kMachAnyTagged, kFullWriteBarrier), context, offset,
value);
}
......
......@@ -126,12 +126,12 @@ class RawMachineAssembler {
return AddNode(machine()->Load(rep), base, index, graph()->start(),
graph()->start());
}
Node* Store(MachineType rep, Node* base, Node* value) {
Node* Store(StoreRepresentation rep, Node* base, Node* value) {
return Store(rep, base, IntPtrConstant(0), value);
}
Node* Store(MachineType rep, Node* base, Node* index, Node* value) {
return AddNode(machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)),
base, index, value, graph()->start(), graph()->start());
Node* Store(StoreRepresentation rep, Node* base, Node* index, Node* value) {
return AddNode(machine()->Store(rep), base, index, value, graph()->start(),
graph()->start());
}
// Arithmetic Operations.
......@@ -486,7 +486,7 @@ class RawMachineAssembler {
Node* LoadFromPointer(void* address, MachineType rep, int32_t offset = 0) {
return Load(rep, PointerConstant(address), Int32Constant(offset));
}
Node* StoreToPointer(void* address, MachineType rep, Node* node) {
Node* StoreToPointer(void* address, StoreRepresentation rep, Node* node) {
return Store(rep, PointerConstant(address), node);
}
Node* StringConstant(const char* string) {
......
......@@ -43,10 +43,14 @@ class BytecodeGenerator::ContextScope BASE_EMBEDDED {
generator_->set_execution_context(outer_);
}
// Returns the execution context for the given |scope| if it is a function
// local execution context, otherwise returns nullptr.
ContextScope* Previous(Scope* scope) {
int depth = scope_->ContextChainLength(scope);
// Returns the depth of the given |scope| for the current execution context.
int ContextChainDepth(Scope* scope) {
return scope_->ContextChainLength(scope);
}
// Returns the execution context at |depth| in the current context chain if it
// is a function local execution context, otherwise returns nullptr.
ContextScope* Previous(int depth) {
if (depth > depth_) {
return nullptr;
}
......@@ -414,8 +418,18 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
break;
}
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
case VariableLocation::CONTEXT:
case VariableLocation::LOCAL: {
VisitForAccumulatorValue(decl->fun());
VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
break;
}
case VariableLocation::CONTEXT: {
DCHECK_EQ(0, execution_context()->ContextChainDepth(variable->scope()));
VisitForAccumulatorValue(decl->fun());
builder()->StoreContextSlot(execution_context()->reg(),
variable->index());
break;
}
case VariableLocation::LOOKUP:
UNIMPLEMENTED();
}
......@@ -981,13 +995,27 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
break;
}
case VariableLocation::CONTEXT: {
ContextScope* context = execution_context()->Previous(variable->scope());
int depth = execution_context()->ContextChainDepth(variable->scope());
ContextScope* context = execution_context()->Previous(depth);
Register context_reg;
if (context) {
builder()->LoadContextSlot(context->reg(), variable->index());
execution_result()->SetResultInAccumulator();
context_reg = context->reg();
} else {
UNIMPLEMENTED();
context_reg = execution_result()->NewRegister();
// Walk the context chain to find the context at the given depth.
// TODO(rmcilroy): Perform this work in a bytecode handler once we have
// a generic mechanism for performing jumps in interpreter.cc.
builder()
->LoadAccumulatorWithRegister(execution_context()->reg())
.StoreAccumulatorInRegister(context_reg);
for (int i = 0; i < depth; ++i) {
builder()
->LoadContextSlot(context_reg, Context::PREVIOUS_INDEX)
.StoreAccumulatorInRegister(context_reg);
}
}
builder()->LoadContextSlot(context_reg, variable->index());
execution_result()->SetResultInAccumulator();
// TODO(rmcilroy): Perform check for uninitialized legacy const, const and
// let variables.
break;
......@@ -1039,12 +1067,29 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
}
case VariableLocation::CONTEXT: {
// TODO(rmcilroy): support const mode initialization.
ContextScope* context = execution_context()->Previous(variable->scope());
int depth = execution_context()->ContextChainDepth(variable->scope());
ContextScope* context = execution_context()->Previous(depth);
Register context_reg;
if (context) {
builder()->StoreContextSlot(context->reg(), variable->index());
context_reg = context->reg();
} else {
UNIMPLEMENTED();
Register value_temp = execution_result()->NewRegister();
context_reg = execution_result()->NewRegister();
// Walk the context chain to find the context at the given depth.
// TODO(rmcilroy): Perform this work in a bytecode handler once we have
// a generic mechanism for performing jumps in interpreter.cc.
builder()
->StoreAccumulatorInRegister(value_temp)
.LoadAccumulatorWithRegister(execution_context()->reg())
.StoreAccumulatorInRegister(context_reg);
for (int i = 0; i < depth; ++i) {
builder()
->LoadContextSlot(context_reg, Context::PREVIOUS_INDEX)
.StoreAccumulatorInRegister(context_reg);
}
builder()->LoadAccumulatorWithRegister(value_temp);
}
builder()->StoreContextSlot(context_reg, variable->index());
break;
}
case VariableLocation::LOOKUP:
......
......@@ -63,7 +63,10 @@ bool Interpreter::MakeBytecode(CompilationInfo* info) {
info->EnsureFeedbackVector();
Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
if (FLAG_print_bytecode) {
bytecodes->Print();
OFStream os(stdout);
os << "Function: " << info->GetDebugName().get() << std::endl;
bytecodes->Print(os);
os << std::flush;
}
info->SetBytecodeArray(bytecodes);
......
......@@ -36,6 +36,12 @@ inline MachineType MachineTypeForC() {
return kMachAnyTagged;
}
template <typename T>
inline StoreRepresentation StoreRepresentationForC(
WriteBarrierKind write_barrier_kind) {
return StoreRepresentation(MachineTypeForC<T>(), write_barrier_kind);
}
#define DECLARE_TEMPLATE_SPECIALIZATION(ctype, mtype) \
template <> \
inline MachineType MachineTypeForC<ctype>() { \
......
......@@ -110,7 +110,8 @@ class BinopTester {
void AddReturn(Node* val) {
if (use_result_buffer) {
T->Store(rep, T->PointerConstant(&result), T->Int32Constant(0), val);
T->Store(StoreRepresentation(rep, kNoWriteBarrier),
T->PointerConstant(&result), T->Int32Constant(0), val);
T->Return(T->Int32Constant(CHECK_VALUE));
} else {
T->Return(val);
......
......@@ -19,6 +19,12 @@ using namespace v8::internal::compiler;
typedef RawMachineAssembler::Label MLabel;
StoreRepresentation StoreRepForType(MachineType type) {
return StoreRepresentation(type, kNoWriteBarrier);
}
TEST(RunInt32Add) {
RawMachineAssemblerTester<int32_t> m;
Node* add = m.Int32Add(m.Int32Constant(0), m.Int32Constant(1));
......@@ -798,8 +804,8 @@ TEST(RunLoadStoreFloat32Offset) {
// generate load [#base + #index]
Node* load =
m.Load(kMachFloat32, m.PointerConstant(from), m.IntPtrConstant(offset));
m.Store(kMachFloat32, m.PointerConstant(to), m.IntPtrConstant(offset),
load);
m.Store(StoreRepresentation(kMachFloat32, kNoWriteBarrier),
m.PointerConstant(to), m.IntPtrConstant(offset), load);
m.Return(m.Int32Constant(magic));
FOR_FLOAT32_INPUTS(j) {
......@@ -825,8 +831,8 @@ TEST(RunLoadStoreFloat64Offset) {
// generate load [#base + #index]
Node* load =
m.Load(kMachFloat64, m.PointerConstant(from), m.IntPtrConstant(offset));
m.Store(kMachFloat64, m.PointerConstant(to), m.IntPtrConstant(offset),
load);
m.Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
m.PointerConstant(to), m.IntPtrConstant(offset), load);
m.Return(m.Int32Constant(magic));
FOR_FLOAT64_INPUTS(j) {
......@@ -3207,7 +3213,8 @@ static void RunLoadStore(MachineType rep) {
Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0]));
Node* load = m.Load(rep, base, index0);
Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0]));
m.Store(rep, base, index1, load);
StoreRepresentation store_rep(rep, kNoWriteBarrier);
m.Store(StoreRepForType(rep), base, index1, load);
m.Return(m.Int32Constant(OK));
CHECK(buffer[x] != buffer[y]);
......@@ -3258,7 +3265,7 @@ TEST(RunFloat32Binop) {
Node* binop = m.AddNode(ops[i], a, b);
Node* base = m.PointerConstant(&result);
Node* zero = m.IntPtrConstant(0);
m.Store(kMachFloat32, base, zero, binop);
m.Store(StoreRepForType(kMachFloat32), base, zero, binop);
m.Return(m.Int32Constant(i + j));
CHECK_EQ(i + j, m.Call());
}
......@@ -3294,7 +3301,7 @@ TEST(RunFloat64Binop) {
Node* binop = m.AddNode(ops[i], a, b);
Node* base = m.PointerConstant(&result);
Node* zero = m.Int32Constant(0);
m.Store(kMachFloat64, base, zero, binop);
m.Store(StoreRepForType(kMachFloat64), base, zero, binop);
m.Return(m.Int32Constant(i + j));
CHECK_EQ(i + j, m.Call());
}
......@@ -3389,7 +3396,7 @@ TEST(RunFloat32SubImm1) {
RawMachineAssemblerTester<int32_t> m;
Node* t0 = m.LoadFromPointer(&input, kMachFloat32);
Node* t1 = m.Float32Sub(m.Float32Constant(*i), t0);
m.StoreToPointer(&output, kMachFloat32, t1);
m.StoreToPointer(&output, StoreRepForType(kMachFloat32), t1);
m.Return(m.Int32Constant(0));
FOR_FLOAT32_INPUTS(j) {
input = *j;
......@@ -3409,7 +3416,7 @@ TEST(RunFloat32SubImm2) {
RawMachineAssemblerTester<int32_t> m;
Node* t0 = m.LoadFromPointer(&input, kMachFloat32);
Node* t1 = m.Float32Sub(t0, m.Float32Constant(*i));
m.StoreToPointer(&output, kMachFloat32, t1);
m.StoreToPointer(&output, StoreRepForType(kMachFloat32), t1);
m.Return(m.Int32Constant(0));
FOR_FLOAT32_INPUTS(j) {
input = *j;
......@@ -3444,7 +3451,7 @@ TEST(RunFloat64SubImm1) {
RawMachineAssemblerTester<int32_t> m;
Node* t0 = m.LoadFromPointer(&input, kMachFloat64);
Node* t1 = m.Float64Sub(m.Float64Constant(*i), t0);
m.StoreToPointer(&output, kMachFloat64, t1);
m.StoreToPointer(&output, StoreRepForType(kMachFloat64), t1);
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(j) {
input = *j;
......@@ -3464,7 +3471,7 @@ TEST(RunFloat64SubImm2) {
RawMachineAssemblerTester<int32_t> m;
Node* t0 = m.LoadFromPointer(&input, kMachFloat64);
Node* t1 = m.Float64Sub(t0, m.Float64Constant(*i));
m.StoreToPointer(&output, kMachFloat64, t1);
m.StoreToPointer(&output, StoreRepForType(kMachFloat64), t1);
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(j) {
input = *j;
......@@ -3517,7 +3524,7 @@ TEST(RunFloat64MulAndFloat64AddP) {
Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
Node* c = m.LoadFromPointer(&input_c, kMachFloat64);
m.StoreToPointer(&output, kMachFloat64,
m.StoreToPointer(&output, StoreRepForType(kMachFloat64),
m.Float64Add(m.Float64Mul(a, b), c));
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(i) {
......@@ -3539,7 +3546,7 @@ TEST(RunFloat64MulAndFloat64AddP) {
Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
Node* c = m.LoadFromPointer(&input_c, kMachFloat64);
m.StoreToPointer(&output, kMachFloat64,
m.StoreToPointer(&output, StoreRepForType(kMachFloat64),
m.Float64Add(a, m.Float64Mul(b, c)));
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(i) {
......@@ -3569,7 +3576,8 @@ TEST(RunFloat64MulAndFloat64SubP) {
Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
Node* c = m.LoadFromPointer(&input_c, kMachFloat64);
m.StoreToPointer(&output, kMachFloat64, m.Float64Sub(a, m.Float64Mul(b, c)));
m.StoreToPointer(&output, StoreRepForType(kMachFloat64),
m.Float64Sub(a, m.Float64Mul(b, c)));
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(i) {
......@@ -3597,7 +3605,7 @@ TEST(RunFloat64MulImm) {
RawMachineAssemblerTester<int32_t> m;
Node* t0 = m.LoadFromPointer(&input, kMachFloat64);
Node* t1 = m.Float64Mul(m.Float64Constant(*i), t0);
m.StoreToPointer(&output, kMachFloat64, t1);
m.StoreToPointer(&output, StoreRepForType(kMachFloat64), t1);
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(j) {
input = *j;
......@@ -3612,7 +3620,7 @@ TEST(RunFloat64MulImm) {
RawMachineAssemblerTester<int32_t> m;
Node* t0 = m.LoadFromPointer(&input, kMachFloat64);
Node* t1 = m.Float64Mul(t0, m.Float64Constant(*i));
m.StoreToPointer(&output, kMachFloat64, t1);
m.StoreToPointer(&output, StoreRepForType(kMachFloat64), t1);
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(j) {
input = *j;
......@@ -3677,8 +3685,8 @@ TEST(RunChangeInt32ToFloat64_A) {
double result = 0;
Node* convert = m.ChangeInt32ToFloat64(m.Int32Constant(magic));
m.Store(kMachFloat64, m.PointerConstant(&result), m.Int32Constant(0),
convert);
m.Store(StoreRepForType(kMachFloat64), m.PointerConstant(&result),
m.Int32Constant(0), convert);
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
......@@ -3691,8 +3699,8 @@ TEST(RunChangeInt32ToFloat64_B) {
double output = 0;
Node* convert = m.ChangeInt32ToFloat64(m.Parameter(0));
m.Store(kMachFloat64, m.PointerConstant(&output), m.Int32Constant(0),
convert);
m.Store(StoreRepForType(kMachFloat64), m.PointerConstant(&output),
m.Int32Constant(0), convert);
m.Return(m.Parameter(0));
FOR_INT32_INPUTS(i) {
......@@ -3708,8 +3716,8 @@ TEST(RunChangeUint32ToFloat64_B) {
double output = 0;
Node* convert = m.ChangeUint32ToFloat64(m.Parameter(0));
m.Store(kMachFloat64, m.PointerConstant(&output), m.Int32Constant(0),
convert);
m.Store(StoreRepForType(kMachFloat64), m.PointerConstant(&output),
m.Int32Constant(0), convert);
m.Return(m.Parameter(0));
FOR_UINT32_INPUTS(i) {
......@@ -3734,8 +3742,8 @@ TEST(RunChangeUint32ToFloat64_spilled) {
}
for (int i = 0; i < kNumInputs; i++) {
m.Store(kMachFloat64, m.PointerConstant(&result), m.Int32Constant(i * 8),
m.ChangeUint32ToFloat64(input_node[i]));
m.Store(StoreRepForType(kMachFloat64), m.PointerConstant(&result),
m.Int32Constant(i * 8), m.ChangeUint32ToFloat64(input_node[i]));
}
m.Return(m.Int32Constant(magic));
......@@ -3758,8 +3766,8 @@ TEST(RunChangeFloat64ToInt32_A) {
double input = 11.1;
int32_t result = 0;
m.Store(kMachInt32, m.PointerConstant(&result), m.Int32Constant(0),
m.ChangeFloat64ToInt32(m.Float64Constant(input)));
m.Store(StoreRepForType(kMachInt32), m.PointerConstant(&result),
m.Int32Constant(0), m.ChangeFloat64ToInt32(m.Float64Constant(input)));
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
......@@ -3775,7 +3783,8 @@ TEST(RunChangeFloat64ToInt32_B) {
Node* load =
m.Load(kMachFloat64, m.PointerConstant(&input), m.Int32Constant(0));
Node* convert = m.ChangeFloat64ToInt32(load);
m.Store(kMachInt32, m.PointerConstant(&output), m.Int32Constant(0), convert);
m.Store(StoreRepForType(kMachInt32), m.PointerConstant(&output),
m.Int32Constant(0), convert);
m.Return(convert);
{
......@@ -3816,7 +3825,8 @@ TEST(RunChangeFloat64ToUint32_B) {
Node* load =
m.Load(kMachFloat64, m.PointerConstant(&input), m.Int32Constant(0));
Node* convert = m.ChangeFloat64ToUint32(load);
m.Store(kMachInt32, m.PointerConstant(&output), m.Int32Constant(0), convert);
m.Store(StoreRepForType(kMachInt32), m.PointerConstant(&output),
m.Int32Constant(0), convert);
m.Return(convert);
{
......@@ -3864,8 +3874,8 @@ TEST(RunChangeFloat64ToInt32_spilled) {
}
for (int i = 0; i < kNumInputs; i++) {
m.Store(kMachInt32, m.PointerConstant(&result), m.Int32Constant(i * 4),
m.ChangeFloat64ToInt32(input_node[i]));
m.Store(StoreRepForType(kMachInt32), m.PointerConstant(&result),
m.Int32Constant(i * 4), m.ChangeFloat64ToInt32(input_node[i]));
}
m.Return(m.Int32Constant(magic));
......@@ -3896,8 +3906,8 @@ TEST(RunChangeFloat64ToUint32_spilled) {
}
for (int i = 0; i < kNumInputs; i++) {
m.Store(kMachUint32, m.PointerConstant(&result), m.Int32Constant(i * 4),
m.ChangeFloat64ToUint32(input_node[i]));
m.Store(StoreRepForType(kMachUint32), m.PointerConstant(&result),
m.Int32Constant(i * 4), m.ChangeFloat64ToUint32(input_node[i]));
}
m.Return(m.Int32Constant(magic));
......@@ -3936,8 +3946,8 @@ TEST(RunTruncateFloat64ToFloat32_spilled) {
}
for (int i = 0; i < kNumInputs; i++) {
m.Store(kMachFloat32, m.PointerConstant(&result), m.Int32Constant(i * 4),
m.TruncateFloat64ToFloat32(input_node[i]));
m.Store(StoreRepForType(kMachFloat32), m.PointerConstant(&result),
m.Int32Constant(i * 4), m.TruncateFloat64ToFloat32(input_node[i]));
}
m.Return(m.Int32Constant(magic));
......@@ -4012,7 +4022,8 @@ TEST(RunFloatDiamond) {
m.Goto(&end);
m.Bind(&end);
Node* phi = m.Phi(kMachFloat32, k2, k1);
m.Store(kMachFloat32, m.PointerConstant(&buffer), m.IntPtrConstant(0), phi);
m.Store(StoreRepForType(kMachFloat32), m.PointerConstant(&buffer),
m.IntPtrConstant(0), phi);
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
......@@ -4037,7 +4048,8 @@ TEST(RunDoubleDiamond) {
m.Goto(&end);
m.Bind(&end);
Node* phi = m.Phi(kMachFloat64, k2, k1);
m.Store(kMachFloat64, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
m.Store(StoreRepForType(kMachFloat64), m.PointerConstant(&buffer),
m.Int32Constant(0), phi);
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
......@@ -4063,7 +4075,8 @@ TEST(RunRefDiamond) {
m.Goto(&end);
m.Bind(&end);
Node* phi = m.Phi(kMachAnyTagged, k2, k1);
m.Store(kMachAnyTagged, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
m.Store(StoreRepForType(kMachAnyTagged), m.PointerConstant(&buffer),
m.Int32Constant(0), phi);
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
......@@ -4094,9 +4107,10 @@ TEST(RunDoubleRefDiamond) {
m.Bind(&end);
Node* dphi = m.Phi(kMachFloat64, d2, d1);
Node* rphi = m.Phi(kMachAnyTagged, r2, r1);
m.Store(kMachFloat64, m.PointerConstant(&dbuffer), m.Int32Constant(0), dphi);
m.Store(kMachAnyTagged, m.PointerConstant(&rbuffer), m.Int32Constant(0),
rphi);
m.Store(StoreRepForType(kMachFloat64), m.PointerConstant(&dbuffer),
m.Int32Constant(0), dphi);
m.Store(StoreRepForType(kMachAnyTagged), m.PointerConstant(&rbuffer),
m.Int32Constant(0), rphi);
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
......@@ -4138,9 +4152,10 @@ TEST(RunDoubleRefDoubleDiamond) {
Node* dphi2 = m.Phi(kMachFloat64, d1, dphi1);
Node* rphi2 = m.Phi(kMachAnyTagged, r1, rphi1);
m.Store(kMachFloat64, m.PointerConstant(&dbuffer), m.Int32Constant(0), dphi2);
m.Store(kMachAnyTagged, m.PointerConstant(&rbuffer), m.Int32Constant(0),
rphi2);
m.Store(StoreRepForType(kMachFloat64), m.PointerConstant(&dbuffer),
m.Int32Constant(0), dphi2);
m.Store(StoreRepForType(kMachAnyTagged), m.PointerConstant(&rbuffer),
m.Int32Constant(0), rphi2);
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
......@@ -4168,7 +4183,8 @@ TEST(RunDoubleLoopPhi) {
m.Bind(&body);
m.Goto(&header);
m.Bind(&end);
m.Store(kMachFloat64, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
m.Store(StoreRepForType(kMachFloat64), m.PointerConstant(&buffer),
m.Int32Constant(0), phi);
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
......@@ -4462,7 +4478,7 @@ static void LoadStoreTruncation() {
RawMachineAssemblerTester<int32_t> m;
Node* a = m.LoadFromPointer(&input, kRepresentation);
Node* ap1 = m.Int32Add(a, m.Int32Constant(1));
m.StoreToPointer(&input, kRepresentation, ap1);
m.StoreToPointer(&input, StoreRepForType(kRepresentation), ap1);
m.Return(ap1);
const IntType max = std::numeric_limits<IntType>::max();
......@@ -4565,7 +4581,7 @@ TEST(RunTestIntPtrArithmetic) {
Node* output = m.PointerConstant(&outputs[kInputSize - 1]);
Node* elem_size = m.IntPtrConstant(sizeof(inputs[0]));
for (int i = 0; i < kInputSize; i++) {
m.Store(kMachInt32, output, m.Load(kMachInt32, input));
m.Store(StoreRepForType(kMachInt32), output, m.Load(kMachInt32, input));
input = m.IntPtrAdd(input, elem_size);
output = m.IntPtrSub(output, elem_size);
}
......@@ -4590,7 +4606,7 @@ TEST(RunSpillLotsOfThings) {
accs[i] = acc;
}
for (int i = 0; i < kInputSize; i++) {
m.StoreToPointer(&outputs[i], kMachInt32, accs[i]);
m.StoreToPointer(&outputs[i], StoreRepForType(kMachInt32), accs[i]);
}
m.Return(one);
m.Call();
......@@ -4616,7 +4632,7 @@ TEST(RunSpillConstantsAndParameters) {
accs[i] = acc;
}
for (int i = 0; i < kInputSize; i++) {
m.StoreToPointer(&outputs[i], kMachInt32, accs[i]);
m.StoreToPointer(&outputs[i], StoreRepForType(kMachInt32), accs[i]);
}
m.Return(m.Int32Add(acc, m.Int32Add(m.Parameter(0), m.Parameter(1))));
FOR_INT32_INPUTS(i) {
......@@ -4668,7 +4684,7 @@ TEST(RunInt32AddWithOverflowP) {
Node* add = m.Int32AddWithOverflow(bt.param0, bt.param1);
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachInt32, val);
m.StoreToPointer(&actual_val, StoreRepForType(kMachInt32), val);
bt.AddReturn(ovf);
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
......@@ -4689,7 +4705,7 @@ TEST(RunInt32AddWithOverflowImm) {
Node* add = m.Int32AddWithOverflow(m.Int32Constant(*i), m.Parameter(0));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachInt32, val);
m.StoreToPointer(&actual_val, StoreRepForType(kMachInt32), val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
......@@ -4702,7 +4718,7 @@ TEST(RunInt32AddWithOverflowImm) {
Node* add = m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(*i));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachInt32, val);
m.StoreToPointer(&actual_val, StoreRepForType(kMachInt32), val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
......@@ -4716,7 +4732,7 @@ TEST(RunInt32AddWithOverflowImm) {
m.Int32AddWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachInt32, val);
m.StoreToPointer(&actual_val, StoreRepForType(kMachInt32), val);
m.Return(ovf);
int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call());
......@@ -4756,7 +4772,7 @@ TEST(RunInt32SubWithOverflowP) {
Node* add = m.Int32SubWithOverflow(bt.param0, bt.param1);
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachInt32, val);
m.StoreToPointer(&actual_val, StoreRepForType(kMachInt32), val);
bt.AddReturn(ovf);
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
......@@ -4777,7 +4793,7 @@ TEST(RunInt32SubWithOverflowImm) {
Node* add = m.Int32SubWithOverflow(m.Int32Constant(*i), m.Parameter(0));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachInt32, val);
m.StoreToPointer(&actual_val, StoreRepForType(kMachInt32), val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
int expected_ovf = bits::SignedSubOverflow32(*i, *j, &expected_val);
......@@ -4790,7 +4806,7 @@ TEST(RunInt32SubWithOverflowImm) {
Node* add = m.Int32SubWithOverflow(m.Parameter(0), m.Int32Constant(*i));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachInt32, val);
m.StoreToPointer(&actual_val, StoreRepForType(kMachInt32), val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
int expected_ovf = bits::SignedSubOverflow32(*j, *i, &expected_val);
......@@ -4804,7 +4820,7 @@ TEST(RunInt32SubWithOverflowImm) {
m.Int32SubWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachInt32, val);
m.StoreToPointer(&actual_val, StoreRepForType(kMachInt32), val);
m.Return(ovf);
int expected_ovf = bits::SignedSubOverflow32(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call());
......@@ -4861,7 +4877,8 @@ TEST(RunChangeInt32ToInt64P) {
if (kPointerSize < 8) return;
int64_t actual = -1;
RawMachineAssemblerTester<int32_t> m(kMachInt32);
m.StoreToPointer(&actual, kMachInt64, m.ChangeInt32ToInt64(m.Parameter(0)));
m.StoreToPointer(&actual, StoreRepForType(kMachInt64),
m.ChangeInt32ToInt64(m.Parameter(0)));
m.Return(m.Int32Constant(0));
FOR_INT32_INPUTS(i) {
int64_t expected = *i;
......@@ -4875,7 +4892,7 @@ TEST(RunChangeUint32ToUint64P) {
if (kPointerSize < 8) return;
int64_t actual = -1;
RawMachineAssemblerTester<int32_t> m(kMachUint32);
m.StoreToPointer(&actual, kMachUint64,
m.StoreToPointer(&actual, StoreRepForType(kMachUint64),
m.ChangeUint32ToUint64(m.Parameter(0)));
m.Return(m.Int32Constant(0));
FOR_UINT32_INPUTS(i) {
......@@ -4976,7 +4993,7 @@ TEST(RunChangeFloat32ToFloat64) {
float expected = 0.0;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(
&actual, kMachFloat64,
&actual, StoreRepForType(kMachFloat64),
m.ChangeFloat32ToFloat64(m.LoadFromPointer(&expected, kMachFloat32)));
m.Return(m.Int32Constant(0));
FOR_FLOAT32_INPUTS(i) {
......@@ -5001,8 +5018,8 @@ TEST(RunChangeFloat32ToFloat64_spilled) {
}
for (int i = 0; i < kNumInputs; i++) {
m.Store(kMachFloat64, m.PointerConstant(&result), m.Int32Constant(i * 8),
m.ChangeFloat32ToFloat64(input_node[i]));
m.Store(StoreRepForType(kMachFloat64), m.PointerConstant(&result),
m.Int32Constant(i * 8), m.ChangeFloat32ToFloat64(input_node[i]));
}
m.Return(m.Int32Constant(magic));
......@@ -5024,7 +5041,7 @@ TEST(RunTruncateFloat64ToFloat32) {
double input = 0.0;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(
&actual, kMachFloat32,
&actual, StoreRepForType(kMachFloat32),
m.TruncateFloat64ToFloat32(m.LoadFromPointer(&input, kMachFloat64)));
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(i) {
......@@ -5041,7 +5058,8 @@ TEST(RunFloat32Constant) {
float expected = *i;
float actual = *i;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(&actual, kMachFloat32, m.Float32Constant(expected));
m.StoreToPointer(&actual, StoreRepForType(kMachFloat32),
m.Float32Constant(expected));
m.Return(m.Int32Constant(0));
CHECK_EQ(0, m.Call());
CHECK_EQ(expected, actual);
......@@ -5078,7 +5096,7 @@ TEST(RunFloat64InsertLowWord32) {
uint64_t result = 0;
RawMachineAssemblerTester<int32_t> m(kMachInt32);
m.StoreToPointer(
&result, kMachFloat64,
&result, StoreRepForType(kMachFloat64),
m.Float64InsertLowWord32(m.LoadFromPointer(&input, kMachFloat64),
m.Parameter(0)));
m.Return(m.Int32Constant(0));
......@@ -5099,7 +5117,7 @@ TEST(RunFloat64InsertHighWord32) {
uint64_t result = 0;
RawMachineAssemblerTester<int32_t> m(kMachInt32);
m.StoreToPointer(
&result, kMachFloat64,
&result, StoreRepForType(kMachFloat64),
m.Float64InsertHighWord32(m.LoadFromPointer(&input, kMachFloat64),
m.Parameter(0)));
m.Return(m.Int32Constant(0));
......@@ -5119,7 +5137,7 @@ TEST(RunFloat32Abs) {
float input = -1.0;
float result = 0.0;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(&result, kMachFloat32,
m.StoreToPointer(&result, StoreRepForType(kMachFloat32),
m.Float32Abs(m.LoadFromPointer(&input, kMachFloat32)));
m.Return(m.Int32Constant(0));
FOR_FLOAT32_INPUTS(i) {
......@@ -5135,7 +5153,7 @@ TEST(RunFloat64Abs) {
double input = -1.0;
double result = 0.0;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(&result, kMachFloat64,
m.StoreToPointer(&result, StoreRepForType(kMachFloat64),
m.Float64Abs(m.LoadFromPointer(&input, kMachFloat64)));
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(i) {
......@@ -5250,7 +5268,7 @@ TEST(RunFloat64RoundDown1) {
double result = 0.0;
RawMachineAssemblerTester<int32_t> m;
if (!m.machine()->Float64RoundDown().IsSupported()) return;
m.StoreToPointer(&result, kMachFloat64,
m.StoreToPointer(&result, StoreRepForType(kMachFloat64),
m.Float64RoundDown(m.LoadFromPointer(&input, kMachFloat64)));
m.Return(m.Int32Constant(0));
for (size_t i = 0; i < arraysize(kValues); ++i) {
......@@ -5267,7 +5285,7 @@ TEST(RunFloat64RoundDown2) {
double result = 0.0;
RawMachineAssemblerTester<int32_t> m;
if (!m.machine()->Float64RoundDown().IsSupported()) return;
m.StoreToPointer(&result, kMachFloat64,
m.StoreToPointer(&result, StoreRepForType(kMachFloat64),
m.Float64Sub(m.Float64Constant(-0.0),
m.Float64RoundDown(m.Float64Sub(
m.Float64Constant(-0.0),
......@@ -5288,7 +5306,7 @@ TEST(RunFloat64RoundTruncate) {
RawMachineAssemblerTester<int32_t> m;
if (!m.machine()->Float64RoundTruncate().IsSupported()) return;
m.StoreToPointer(
&result, kMachFloat64,
&result, StoreRepForType(kMachFloat64),
m.Float64RoundTruncate(m.LoadFromPointer(&input, kMachFloat64)));
m.Return(m.Int32Constant(0));
for (size_t i = 0; i < arraysize(kValues); ++i) {
......@@ -5306,7 +5324,7 @@ TEST(RunFloat64RoundTiesAway) {
RawMachineAssemblerTester<int32_t> m;
if (!m.machine()->Float64RoundTiesAway().IsSupported()) return;
m.StoreToPointer(
&result, kMachFloat64,
&result, StoreRepForType(kMachFloat64),
m.Float64RoundTiesAway(m.LoadFromPointer(&input, kMachFloat64)));
m.Return(m.Int32Constant(0));
for (size_t i = 0; i < arraysize(kValues); ++i) {
......@@ -5446,7 +5464,7 @@ TEST(RunBitcastInt64ToFloat64) {
double output = 0.0;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(
&output, kMachFloat64,
&output, StoreRepForType(kMachFloat64),
m.BitcastInt64ToFloat64(m.LoadFromPointer(&input, kMachInt64)));
m.Return(m.Int32Constant(11));
FOR_INT64_INPUTS(i) {
......@@ -5463,7 +5481,7 @@ TEST(RunBitcastFloat64ToInt64) {
int64_t output = 0;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(
&output, kMachInt64,
&output, StoreRepForType(kMachInt64),
m.BitcastFloat64ToInt64(m.LoadFromPointer(&input, kMachFloat64)));
m.Return(m.Int32Constant(11));
FOR_FLOAT64_INPUTS(i) {
......@@ -5493,7 +5511,7 @@ TEST(RunBitcastInt32ToFloat32) {
float output = 0.0;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(
&output, kMachFloat32,
&output, StoreRepForType(kMachFloat32),
m.BitcastInt32ToFloat32(m.LoadFromPointer(&input, kMachInt32)));
m.Return(m.Int32Constant(11));
FOR_INT32_INPUTS(i) {
......
......@@ -361,7 +361,8 @@ class ArgsBuffer {
Node* StoreOutput(RawMachineAssembler& raw, Node* value) {
Node* base = raw.PointerConstant(&output);
Node* offset = raw.Int32Constant(0);
return raw.Store(MachineTypeForC<CType>(), base, offset, value);
return raw.Store(StoreRepresentationForC<CType>(kNoWriteBarrier), base,
offset, value);
}
// Computes the next set of inputs by updating the {input} array.
......@@ -573,7 +574,8 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
Node* base = raw.PointerConstant(output);
for (int i = 0; i < kNumParams; i++) {
Node* offset = raw.Int32Constant(i * sizeof(int32_t));
raw.Store(kMachInt32, base, offset, raw.Parameter(i));
raw.Store(StoreRepresentation(kMachInt32, kNoWriteBarrier), base, offset,
raw.Parameter(i));
}
raw.Return(raw.Int32Constant(42));
inner = CompileGraph("CopyTwentyInt32", desc, &graph, raw.Export());
......@@ -1141,7 +1143,8 @@ void MixedParamTest(int start) {
}
Node* call = raw.CallN(desc, target, args);
Node* store = raw.StoreToPointer(output, sig->GetReturn(), call);
StoreRepresentation store_rep(sig->GetReturn(), kNoWriteBarrier);
Node* store = raw.StoreToPointer(output, store_rep, call);
USE(store);
expected_ret = static_cast<int32_t>(constant);
raw.Return(raw.Int32Constant(expected_ret));
......
......@@ -35,7 +35,6 @@ class BytecodeGeneratorHelper {
Isolate* isolate() { return CcTest::i_isolate(); }
Factory* factory() { return CcTest::i_isolate()->factory(); }
Handle<BytecodeArray> MakeTopLevelBytecode(const char* source) {
const char* old_ignition_filter = i::FLAG_ignition_filter;
i::FLAG_ignition_filter = "*";
......@@ -45,7 +44,6 @@ class BytecodeGeneratorHelper {
return handle(js_function->shared()->bytecode_array(), CcTest::i_isolate());
}
Handle<BytecodeArray> MakeBytecode(const char* script,
const char* function_name) {
CompileRun(script);
......@@ -56,7 +54,6 @@ class BytecodeGeneratorHelper {
return handle(js_function->shared()->bytecode_array(), CcTest::i_isolate());
}
Handle<BytecodeArray> MakeBytecodeForFunctionBody(const char* body) {
ScopedVector<char> program(1024);
SNPrintF(program, "function %s() { %s }\n%s();", kFunctionName, body,
......@@ -69,6 +66,17 @@ class BytecodeGeneratorHelper {
SNPrintF(program, "%s\n%s();", function, kFunctionName);
return MakeBytecode(program.start(), kFunctionName);
}
Handle<BytecodeArray> MakeBytecodeForFunctionNoFilter(const char* function) {
const char* old_ignition_filter = i::FLAG_ignition_filter;
i::FLAG_ignition_filter = "*";
ScopedVector<char> program(1024);
SNPrintF(program, "%s\n%s();", function, kFunctionName);
Handle<BytecodeArray> return_val =
MakeBytecode(program.start(), kFunctionName);
i::FLAG_ignition_filter = old_ignition_filter;
return return_val;
}
};
......@@ -2909,6 +2917,69 @@ TEST(ContextParameters) {
}
TEST(OuterContextVariables) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
int context = Register::function_context().index();
int first_context_slot = Context::MIN_CONTEXT_SLOTS;
ExpectedSnippet<InstanceType> snippets[] = {
{"function Outer() {"
" var outerVar = 1;"
" function Inner(innerArg) {"
" this.innerFunc = function() { return outerVar * innerArg; }"
" }"
" this.getInnerFunc = function() { return new Inner(1).innerFunc; }"
"}"
"var f = new Outer().getInnerFunc();",
2 * kPointerSize,
1,
20,
{
B(Ldar), R(context), //
B(Star), R(0), //
B(LdaContextSlot), R(0), U8(Context::PREVIOUS_INDEX), //
B(Star), R(0), //
B(LdaContextSlot), R(0), U8(first_context_slot), //
B(Star), R(1), //
B(LdaContextSlot), R(context), U8(first_context_slot), //
B(Mul), R(1), //
B(Return), //
}},
{"function Outer() {"
" var outerVar = 1;"
" function Inner(innerArg) {"
" this.innerFunc = function() { outerVar = innerArg; }"
" }"
" this.getInnerFunc = function() { return new Inner(1).innerFunc; }"
"}"
"var f = new Outer().getInnerFunc();",
2 * kPointerSize,
1,
21,
{
B(LdaContextSlot), R(context), U8(first_context_slot), //
B(Star), R(0), //
B(Ldar), R(context), //
B(Star), R(1), //
B(LdaContextSlot), R(1), U8(Context::PREVIOUS_INDEX), //
B(Star), R(1), //
B(Ldar), R(0), //
B(StaContextSlot), R(1), U8(first_context_slot), //
B(LdaUndefined), //
B(Return), //
}},
};
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
helper.MakeBytecodeForFunctionNoFilter(snippets[i].code_snippet);
CheckBytecodeArrayEqual(snippets[i], bytecode_array);
}
}
TEST(CountOperators) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
......
......@@ -58,7 +58,8 @@ class InterpreterTester {
public:
InterpreterTester(Isolate* isolate, const char* source,
MaybeHandle<BytecodeArray> bytecode,
MaybeHandle<TypeFeedbackVector> feedback_vector)
MaybeHandle<TypeFeedbackVector> feedback_vector,
const char* filter)
: isolate_(isolate),
source_(source),
bytecode_(bytecode),
......@@ -70,7 +71,7 @@ class InterpreterTester {
// Set ignition filter flag via SetFlagsFromString to avoid double-free
// (or potential leak with StrDup() based on ownership confusion).
ScopedVector<char> ignition_filter(64);
SNPrintF(ignition_filter, "--ignition-filter=%s", kFunctionName);
SNPrintF(ignition_filter, "--ignition-filter=%s", filter);
FlagList::SetFlagsFromString(ignition_filter.start(),
ignition_filter.length());
// Ensure handler table is generated.
......@@ -79,13 +80,16 @@ class InterpreterTester {
InterpreterTester(Isolate* isolate, Handle<BytecodeArray> bytecode,
MaybeHandle<TypeFeedbackVector> feedback_vector =
MaybeHandle<TypeFeedbackVector>())
: InterpreterTester(isolate, nullptr, bytecode, feedback_vector) {}
MaybeHandle<TypeFeedbackVector>(),
const char* filter = kFunctionName)
: InterpreterTester(isolate, nullptr, bytecode, feedback_vector, filter) {
}
InterpreterTester(Isolate* isolate, const char* source)
InterpreterTester(Isolate* isolate, const char* source,
const char* filter = kFunctionName)
: InterpreterTester(isolate, source, MaybeHandle<BytecodeArray>(),
MaybeHandle<TypeFeedbackVector>()) {}
MaybeHandle<TypeFeedbackVector>(), filter) {}
virtual ~InterpreterTester() {}
......@@ -1919,6 +1923,39 @@ TEST(InterpreterContextParameters) {
}
TEST(InterpreterOuterContextVariables) {
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
std::pair<const char*, Handle<Object>> context_vars[2] = {
std::make_pair("return outerVar * innerArg;",
handle(Smi::FromInt(200), isolate)),
std::make_pair("outerVar = innerArg; return outerVar",
handle(Smi::FromInt(20), isolate)),
};
std::string header(
"function Outer() {"
" var outerVar = 10;"
" function Inner(innerArg) {"
" this.innerFunc = function() { ");
std::string footer(
" }}"
" this.getInnerFunc = function() { return new Inner(20).innerFunc; }"
"}"
"var f = new Outer().getInnerFunc();");
for (size_t i = 0; i < arraysize(context_vars); i++) {
std::string source = header + context_vars[i].first + footer;
InterpreterTester tester(handles.main_isolate(), source.c_str(), "*");
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*context_vars[i].second));
}
}
TEST(InterpreterComma) {
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
......
......@@ -1320,7 +1320,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
StoreRepresentation store_rep(memacc.type, kNoWriteBarrier);
m.Store(store_rep, m.Parameter(0), m.Parameter(1), m.Parameter(2));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
......@@ -1335,8 +1336,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
m.Parameter(1));
StoreRepresentation store_rep(memacc.type, kNoWriteBarrier);
m.Store(store_rep, m.Parameter(0), m.Int32Constant(index), m.Parameter(1));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
......
......@@ -2156,7 +2156,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
StoreRepresentation store_rep(memacc.type, kNoWriteBarrier);
m.Store(store_rep, m.Parameter(0), m.Parameter(1), m.Parameter(2));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
......@@ -2171,8 +2172,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
m.Parameter(1));
StoreRepresentation store_rep(memacc.type, kNoWriteBarrier);
m.Store(store_rep, m.Parameter(0), m.Int32Constant(index), m.Parameter(1));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
......
......@@ -249,7 +249,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
StoreRepresentation store_rep(memacc.type, kNoWriteBarrier);
m.Store(store_rep, m.Parameter(0), m.Parameter(1), m.Parameter(2));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
......@@ -263,7 +264,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateBase) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, base, kImmediates) {
StreamBuilder m(this, kMachInt32, kMachInt32, memacc.type);
m.Store(memacc.type, m.Int32Constant(base), m.Parameter(0), m.Parameter(1));
StoreRepresentation store_rep(memacc.type, kNoWriteBarrier);
m.Store(store_rep, m.Int32Constant(base), m.Parameter(0), m.Parameter(1));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
......@@ -284,8 +286,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, kImmediates) {
StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
m.Parameter(1));
StoreRepresentation store_rep(memacc.type, kNoWriteBarrier);
m.Store(store_rep, m.Parameter(0), m.Int32Constant(index), m.Parameter(1));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
......@@ -319,7 +321,8 @@ class AddressingModeUnitTest : public InstructionSelectorTest {
void Run(Node* base, Node* load_index, Node* store_index,
AddressingMode mode) {
Node* load = m->Load(kMachInt32, base, load_index);
m->Store(kMachInt32, base, store_index, load);
m->Store(StoreRepresentation(kMachInt32, kNoWriteBarrier), base,
store_index, load);
m->Return(m->Int32Constant(0));
Stream s = m->Build();
ASSERT_EQ(2U, s.size());
......
......@@ -497,8 +497,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, StoreContextSlot) {
Matcher<Node*> offset =
IsIntPtrAdd(IsWordShl(slot_index, IsInt32Constant(kPointerSizeLog2)),
IsInt32Constant(Context::kHeaderSize - kHeapObjectTag));
EXPECT_THAT(store_context_slot,
m.IsStore(StoreRepresentation(kMachAnyTagged, kNoWriteBarrier),
EXPECT_THAT(
store_context_slot,
m.IsStore(StoreRepresentation(kMachAnyTagged, kFullWriteBarrier),
context, offset, value));
}
}
......
......@@ -646,7 +646,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Parameter(1));
StoreRepresentation store_rep(memacc.type, kNoWriteBarrier);
m.Store(store_rep, m.Parameter(0), m.Parameter(1));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
......@@ -696,8 +697,8 @@ TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
const MemoryAccessImm memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
m.Parameter(1));
StoreRepresentation store_rep(memacc.type, kNoWriteBarrier);
m.Store(store_rep, m.Parameter(0), m.Int32Constant(index), m.Parameter(1));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
......@@ -748,8 +749,8 @@ TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
const MemoryAccessImm1 memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
m.Parameter(1));
StoreRepresentation store_rep(memacc.type, kNoWriteBarrier);
m.Store(store_rep, m.Parameter(0), m.Int32Constant(index), m.Parameter(1));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
......
......@@ -649,7 +649,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Parameter(1));
StoreRepresentation store_rep(memacc.type, kNoWriteBarrier);
m.Store(store_rep, m.Parameter(0), m.Parameter(1));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
......@@ -697,8 +698,8 @@ TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
const MemoryAccessImm memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
m.Parameter(1));
StoreRepresentation store_rep(memacc.type, kNoWriteBarrier);
m.Store(store_rep, m.Parameter(0), m.Int32Constant(index), m.Parameter(1));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
......@@ -746,8 +747,8 @@ TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
const MemoryAccessImm1 memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
m.Parameter(1));
StoreRepresentation store_rep(memacc.type, kNoWriteBarrier);
m.Store(store_rep, m.Parameter(0), m.Int32Constant(index), m.Parameter(1));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
......
......@@ -124,7 +124,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
StoreRepresentation store_rep(memacc.type, kNoWriteBarrier);
m.Store(store_rep, m.Parameter(0), m.Parameter(1), m.Parameter(2));
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment