Commit bd21c2bd authored by Daniel Clifford's avatar Daniel Clifford Committed by Commit Bot

[ignition] Optimize reloading of registers before Dispatch

Before this patch, the registers needed for bytecode dispatch in interpreter
handlers were inconsistently stored in the interpreter frame and/or kept in
values that remained live across calls.

After this patch, these registers are explicitly reloaded after calls, making it
possible to elide the spills of those registers before the call in many cases.

Some highlights from the CL:

* Added methods to the CSA and InterpreterAssembler to efficiently store and
  load Smis values and Smi interpreter registers on x64 without explicit
  tagging/untagging.

* Created Variables for all of the interpreter-internal values that need to be
  reloaded before bytecode dispatch at the end of an interpreter handler.

* The bytecode offset can be written out early in a handler by marking it
  has having a call along it's critical path. By moving this early in a
  handler, it becomes possible to use memory operands for pushes used to
  marshall parameters when making calls.

Change-Id: Icf8d7798789f88a4489e06a7092616bbbb881577
Reviewed-on: https://chromium-review.googlesource.com/442566
Commit-Queue: Daniel Clifford <danno@chromium.org>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#43260}
parent ad2a30a9
......@@ -969,6 +969,24 @@ Node* CodeStubAssembler::LoadAndUntagToWord32Root(
}
}
Node* CodeStubAssembler::StoreAndTagSmi(Node* base, int offset, Node* value) {
if (Is64()) {
int zero_offset = offset + kPointerSize / 2;
int payload_offset = offset;
#if V8_TARGET_LITTLE_ENDIAN
std::swap(zero_offset, payload_offset);
#endif
StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
IntPtrConstant(zero_offset), Int32Constant(0));
return StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
IntPtrConstant(payload_offset),
TruncateInt64ToInt32(value));
} else {
return StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base,
IntPtrConstant(offset), SmiTag(value));
}
}
Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) {
return LoadObjectField(object, HeapNumber::kValueOffset,
MachineType::Float64());
......
......@@ -338,6 +338,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Load a SMI root, untag it, and convert to Word32.
Node* LoadAndUntagToWord32Root(Heap::RootListIndex root_index);
// Tag a smi and store it.
Node* StoreAndTagSmi(Node* base, int offset, Node* value);
// Load the floating point value of a HeapNumber.
Node* LoadHeapNumberValue(Node* object);
// Load the Map of an HeapObject.
......
......@@ -336,6 +336,11 @@ void InstructionSelector::VisitStore(Node* node) {
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code =
opcode | AddressingModeField::encode(addressing_mode);
if ((ElementSizeLog2Of(store_rep.representation()) < kPointerSizeLog2) &&
(value->opcode() == IrOpcode::kTruncateInt64ToInt32) &&
CanCover(node, value)) {
value = value->InputAt(0);
}
InstructionOperand value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
inputs[input_count++] = value_operand;
......
......@@ -582,7 +582,7 @@ void InterpreterDispatchDescriptor::InitializePlatformIndependent(
// kAccumulator, kBytecodeOffset, kBytecodeArray, kDispatchTable
MachineType machine_types[] = {
MachineType::AnyTagged(), MachineType::IntPtr(), MachineType::AnyTagged(),
MachineType::AnyTagged()};
MachineType::IntPtr()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
......
......@@ -166,6 +166,19 @@ bool Bytecodes::IsRegisterOperandType(OperandType operand_type) {
return false;
}
bool Bytecodes::MakesCallAlongCriticalPath(Bytecode bytecode) {
if (IsCallOrConstruct(bytecode) || IsCallRuntime(bytecode)) return true;
switch (bytecode) {
case Bytecode::kCreateWithContext:
case Bytecode::kCreateBlockContext:
case Bytecode::kCreateCatchContext:
case Bytecode::kCreateRegExpLiteral:
return true;
default:
return false;
}
}
// static
bool Bytecodes::IsRegisterInputOperandType(OperandType operand_type) {
switch (operand_type) {
......
......@@ -619,7 +619,12 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Returns true if the bytecode is a call or a constructor call.
static constexpr bool IsCallOrConstruct(Bytecode bytecode) {
return bytecode == Bytecode::kCall || bytecode == Bytecode::kCallProperty ||
bytecode == Bytecode::kTailCall || bytecode == Bytecode::kConstruct;
bytecode == Bytecode::kTailCall ||
bytecode == Bytecode::kConstruct ||
bytecode == Bytecode::kCallWithSpread ||
bytecode == Bytecode::kConstructWithSpread ||
bytecode == Bytecode::kInvokeIntrinsic ||
bytecode == Bytecode::kCallJSRuntime;
}
// Returns true if the bytecode is a call to the runtime.
......@@ -724,6 +729,10 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Returns the equivalent jump bytecode without the accumulator coercion.
static Bytecode GetJumpWithoutToBoolean(Bytecode bytecode);
// Returns true if there is a call in the most-frequently executed path
// through the bytecode's handler.
static bool MakesCallAlongCriticalPath(Bytecode bytecode);
// Returns true if the bytecode is a debug break.
static bool IsDebugBreak(Bytecode bytecode);
......
......@@ -31,14 +31,23 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
operand_scale_(operand_scale),
bytecode_offset_(this, MachineType::PointerRepresentation()),
interpreted_frame_pointer_(this, MachineType::PointerRepresentation()),
bytecode_array_(this, MachineRepresentation::kTagged),
dispatch_table_(this, MachineType::PointerRepresentation()),
accumulator_(this, MachineRepresentation::kTagged),
accumulator_use_(AccumulatorUse::kNone),
made_call_(false),
reloaded_frame_ptr_(false),
saved_bytecode_offset_(false),
disable_stack_check_across_call_(false),
stack_pointer_before_call_(nullptr) {
accumulator_.Bind(Parameter(InterpreterDispatchDescriptor::kAccumulator));
bytecode_offset_.Bind(
Parameter(InterpreterDispatchDescriptor::kBytecodeOffset));
bytecode_array_.Bind(
Parameter(InterpreterDispatchDescriptor::kBytecodeArray));
dispatch_table_.Bind(
Parameter(InterpreterDispatchDescriptor::kDispatchTable));
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
}
......@@ -57,6 +66,10 @@ InterpreterAssembler::~InterpreterAssembler() {
Node* InterpreterAssembler::GetInterpretedFramePointer() {
if (!interpreted_frame_pointer_.IsBound()) {
interpreted_frame_pointer_.Bind(LoadParentFramePointer());
} else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
!reloaded_frame_ptr_) {
interpreted_frame_pointer_.Bind(LoadParentFramePointer());
reloaded_frame_ptr_ = true;
}
return interpreted_frame_pointer_.value();
}
......@@ -151,21 +164,33 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
}
Node* InterpreterAssembler::BytecodeOffset() {
if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
(bytecode_offset_.value() ==
Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) {
bytecode_offset_.Bind(LoadAndUntagRegister(Register::bytecode_offset()));
}
return bytecode_offset_.value();
}
Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
if (made_call_) {
// If we have made a call, restore bytecode array from stack frame in case
// the debugger has swapped us to the patched debugger bytecode array.
return LoadRegister(Register::bytecode_array());
} else {
return Parameter(InterpreterDispatchDescriptor::kBytecodeArray);
// Force a re-load of the bytecode array after every call in case the debugger
// has been activated.
if (made_call_ &&
(bytecode_array_.value() ==
Parameter(InterpreterDispatchDescriptor::kBytecodeArray))) {
bytecode_array_.Bind(LoadRegister(Register::bytecode_array()));
}
return bytecode_array_.value();
}
Node* InterpreterAssembler::DispatchTableRawPointer() {
return Parameter(InterpreterDispatchDescriptor::kDispatchTable);
if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
(dispatch_table_.value() ==
Parameter(InterpreterDispatchDescriptor::kDispatchTable))) {
dispatch_table_.Bind(ExternalConstant(
ExternalReference::interpreter_dispatch_table_address(isolate())));
}
return dispatch_table_.value();
}
Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
......@@ -187,6 +212,11 @@ Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
RegisterFrameOffset(reg_index));
}
Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
return LoadAndUntagSmi(GetInterpretedFramePointer(), reg.ToOperand()
<< kPointerSizeLog2);
}
Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
return StoreNoWriteBarrier(
MachineRepresentation::kTagged, GetInterpretedFramePointer(),
......@@ -199,6 +229,12 @@ Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
RegisterFrameOffset(reg_index), value);
}
Node* InterpreterAssembler::StoreAndTagRegister(compiler::Node* value,
Register reg) {
int offset = reg.ToOperand() << kPointerSizeLog2;
return StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
}
Node* InterpreterAssembler::NextRegister(Node* reg_index) {
// Register indexes are negative, so the next index is minus one.
return IntPtrAdd(reg_index, IntPtrConstant(-1));
......@@ -472,8 +508,20 @@ Node* InterpreterAssembler::LoadFeedbackVector() {
return vector;
}
void InterpreterAssembler::SaveBytecodeOffset() {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
StoreAndTagRegister(BytecodeOffset(), Register::bytecode_offset());
saved_bytecode_offset_ = true;
}
void InterpreterAssembler::CallPrologue() {
StoreRegister(SmiTag(BytecodeOffset()), Register::bytecode_offset());
if (!saved_bytecode_offset_) {
// If there are multiple calls in the bytecode handler, you need to spill
// before each of them, unless SaveBytecodeOffset has explicitly been called
// in a path that dominates _all_ of those calls. Therefore don't set
// saved_bytecode_offset_ to true or call SaveBytecodeOffset.
StoreAndTagRegister(BytecodeOffset(), Register::bytecode_offset());
}
if (FLAG_debug_code && !disable_stack_check_across_call_) {
DCHECK(stack_pointer_before_call_ == nullptr);
......@@ -518,6 +566,8 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
// computed, meaning that it can't appear to be a pointer. If the low bit is
// 0, then hash is computed, but the 0 bit prevents the field from appearing
// to be a pointer.
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallOrConstruct(bytecode_));
STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
WeakCell::kValueOffset &&
......@@ -675,6 +725,8 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Node* InterpreterAssembler::CallJS(Node* function, Node* context,
Node* first_arg, Node* arg_count,
TailCallMode tail_call_mode) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallOrConstruct(bytecode_));
Callable callable = CodeFactory::InterpreterPushArgsAndCall(
isolate(), tail_call_mode, InterpreterPushArgsMode::kOther);
Node* code_target = HeapConstant(callable.code());
......@@ -685,6 +737,7 @@ Node* InterpreterAssembler::CallJS(Node* function, Node* context,
Node* InterpreterAssembler::CallJSWithSpread(Node* function, Node* context,
Node* first_arg, Node* arg_count) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
Callable callable = CodeFactory::InterpreterPushArgsAndCall(
isolate(), TailCallMode::kDisallow,
InterpreterPushArgsMode::kWithFinalSpread);
......@@ -698,6 +751,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
Node* new_target, Node* first_arg,
Node* arg_count, Node* slot_id,
Node* feedback_vector) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
Variable return_value(this, MachineRepresentation::kTagged);
Variable allocation_feedback(this, MachineRepresentation::kTagged);
Label call_construct_function(this, &allocation_feedback),
......@@ -849,6 +903,7 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* constructor,
Node* context, Node* new_target,
Node* first_arg,
Node* arg_count) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
Variable return_value(this, MachineRepresentation::kTagged);
Comment("call using ConstructWithSpread");
Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(
......@@ -864,6 +919,8 @@ Node* InterpreterAssembler::ConstructWithSpread(Node* constructor,
Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
Node* first_arg, Node* arg_count,
int result_size) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallRuntime(bytecode_));
Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
Node* code_target = HeapConstant(callable.code());
......@@ -1014,6 +1071,7 @@ void InterpreterAssembler::InlineStar() {
Node* InterpreterAssembler::Dispatch() {
Comment("========= Dispatch");
DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
Node* target_offset = Advance();
Node* target_bytecode = LoadBytecode(target_offset);
......@@ -1061,6 +1119,7 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
// Indices 0-255 correspond to bytecodes with operand_scale == 0
// Indices 256-511 correspond to bytecodes with operand_scale == 1
// Indices 512-767 correspond to bytecodes with operand_scale == 2
DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
Node* next_bytecode_offset = Advance(1);
Node* next_bytecode = LoadBytecode(next_bytecode_offset);
......
......@@ -88,9 +88,11 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Loads from and stores to the interpreter register file.
compiler::Node* LoadRegister(Register reg);
compiler::Node* LoadRegister(compiler::Node* reg_index);
compiler::Node* LoadAndUntagRegister(Register reg);
compiler::Node* StoreRegister(compiler::Node* value, Register reg);
compiler::Node* StoreRegister(compiler::Node* value,
compiler::Node* reg_index);
compiler::Node* StoreAndTagRegister(compiler::Node* value, Register reg);
// Returns the next consecutive register.
compiler::Node* NextRegister(compiler::Node* reg_index);
......@@ -221,6 +223,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Returns the offset from the BytecodeArrayPointer of the current bytecode.
compiler::Node* BytecodeOffset();
// Save the bytecode offset to the interpreter frame.
void SaveBytecodeOffset();
protected:
Bytecode bytecode() const { return bytecode_; }
static bool TargetSupportsUnalignedAccess();
......@@ -335,9 +340,13 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
OperandScale operand_scale_;
CodeStubAssembler::Variable bytecode_offset_;
CodeStubAssembler::Variable interpreted_frame_pointer_;
CodeStubAssembler::Variable bytecode_array_;
CodeStubAssembler::Variable dispatch_table_;
CodeStubAssembler::Variable accumulator_;
AccumulatorUse accumulator_use_;
bool made_call_;
bool reloaded_frame_ptr_;
bool saved_bytecode_offset_;
bool disable_stack_check_across_call_;
compiler::Node* stack_pointer_before_call_;
......
......@@ -139,6 +139,9 @@ void Interpreter::InstallBytecodeHandler(Zone* zone, Bytecode bytecode,
isolate_, zone, descriptor, Code::ComputeFlags(Code::BYTECODE_HANDLER),
Bytecodes::ToString(bytecode), Bytecodes::ReturnCount(bytecode));
InterpreterAssembler assembler(&state, bytecode, operand_scale);
if (Bytecodes::MakesCallAlongCriticalPath(bytecode)) {
assembler.SaveBytecodeOffset();
}
(this->*generator)(&assembler);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
size_t index = GetDispatchTableIndex(bytecode, operand_scale);
......
......@@ -313,61 +313,6 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
return nullptr;
}
TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerTestState state(this, bytecode);
InterpreterAssemblerForTest m(&state, bytecode);
Node* tail_call_node = m.Dispatch();
OperandScale operand_scale = OperandScale::kSingle;
Matcher<Node*> next_bytecode_offset_matcher =
IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
IsIntPtrConstant(
interpreter::Bytecodes::Size(bytecode, operand_scale)));
Matcher<Node*> target_bytecode_matcher =
m.IsLoad(MachineType::Uint8(),
IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
next_bytecode_offset_matcher);
target_bytecode_matcher = IsChangeUint32ToWord(target_bytecode_matcher);
Matcher<Node*> code_target_matcher = m.IsLoad(
MachineType::Pointer(),
IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
IsWordShl(target_bytecode_matcher, IsIntPtrConstant(kPointerSizeLog2)));
if (interpreter::Bytecodes::IsStarLookahead(bytecode, operand_scale)) {
Matcher<Node*> after_lookahead_offset =
IsIntPtrAdd(next_bytecode_offset_matcher,
IsIntPtrConstant(interpreter::Bytecodes::Size(
Bytecode::kStar, operand_scale)));
next_bytecode_offset_matcher =
IsPhi(MachineType::PointerRepresentation(),
next_bytecode_offset_matcher, after_lookahead_offset, _);
Matcher<Node*> after_lookahead_bytecode =
m.IsLoad(MachineType::Uint8(),
IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
after_lookahead_offset);
after_lookahead_bytecode = IsChangeUint32ToWord(after_lookahead_bytecode);
target_bytecode_matcher =
IsPhi(MachineType::PointerRepresentation(), target_bytecode_matcher,
after_lookahead_bytecode, _);
code_target_matcher =
m.IsLoad(MachineType::Pointer(),
IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
IsWordShl(target_bytecode_matcher,
IsIntPtrConstant(kPointerSizeLog2)));
}
EXPECT_THAT(
tail_call_node,
IsTailCall(_, code_target_matcher,
IsParameter(InterpreterDispatchDescriptor::kAccumulator),
next_bytecode_offset_matcher,
IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
_, _));
}
}
TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
// If debug code is enabled we emit extra code in Jump.
if (FLAG_debug_code) return;
......@@ -470,34 +415,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
}
}
TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
if (!interpreter::Bytecodes::ReadsAccumulator(bytecode) ||
!interpreter::Bytecodes::WritesAccumulator(bytecode)) {
continue;
}
InterpreterAssemblerTestState state(this, bytecode);
InterpreterAssemblerForTest m(&state, bytecode);
// Should be incoming accumulator if not set.
EXPECT_THAT(m.GetAccumulator(),
IsParameter(InterpreterDispatchDescriptor::kAccumulator));
// Should be set by SetAccumulator.
Node* accumulator_value_1 = m.Int32Constant(0xdeadbeef);
m.SetAccumulator(accumulator_value_1);
EXPECT_THAT(m.GetAccumulator(), accumulator_value_1);
Node* accumulator_value_2 = m.Int32Constant(42);
m.SetAccumulator(accumulator_value_2);
EXPECT_THAT(m.GetAccumulator(), accumulator_value_2);
// Should be passed to next bytecode handler on dispatch.
Node* tail_call_node = m.Dispatch();
EXPECT_THAT(tail_call_node,
IsTailCall(_, _, accumulator_value_2, _, _, _, _));
}
}
TARGET_TEST_F(InterpreterAssemblerTest, GetContext) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerTestState state(this, bytecode);
......@@ -666,30 +583,33 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
const int kResultSizes[] = {1, 2};
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
TRACED_FOREACH(int, result_size, kResultSizes) {
InterpreterAssemblerTestState state(this, bytecode);
InterpreterAssemblerForTest m(&state, bytecode);
Callable builtin = CodeFactory::InterpreterCEntry(isolate(), result_size);
Node* function_id = m.Int32Constant(0);
Node* first_arg = m.IntPtrConstant(1);
Node* arg_count = m.Int32Constant(2);
Node* context = m.IntPtrConstant(4);
Matcher<Node*> function_table = IsExternalConstant(
ExternalReference::runtime_function_table_address(isolate()));
Matcher<Node*> function = IsIntPtrAdd(
function_table,
IsChangeUint32ToWord(IsInt32Mul(
function_id, IsInt32Constant(sizeof(Runtime::Function)))));
Matcher<Node*> function_entry =
m.IsLoad(MachineType::Pointer(), function,
IsIntPtrConstant(offsetof(Runtime::Function, entry)));
Node* call_runtime = m.CallRuntimeN(function_id, context, first_arg,
arg_count, result_size);
EXPECT_THAT(call_runtime,
IsCall(_, IsHeapConstant(builtin.code()), arg_count,
first_arg, function_entry, context, _, _));
if (Bytecodes::IsCallRuntime(bytecode)) {
InterpreterAssemblerTestState state(this, bytecode);
InterpreterAssemblerForTest m(&state, bytecode);
Callable builtin =
CodeFactory::InterpreterCEntry(isolate(), result_size);
Node* function_id = m.Int32Constant(0);
Node* first_arg = m.IntPtrConstant(1);
Node* arg_count = m.Int32Constant(2);
Node* context = m.IntPtrConstant(4);
Matcher<Node*> function_table = IsExternalConstant(
ExternalReference::runtime_function_table_address(isolate()));
Matcher<Node*> function = IsIntPtrAdd(
function_table,
IsChangeUint32ToWord(IsInt32Mul(
function_id, IsInt32Constant(sizeof(Runtime::Function)))));
Matcher<Node*> function_entry =
m.IsLoad(MachineType::Pointer(), function,
IsIntPtrConstant(offsetof(Runtime::Function, entry)));
Node* call_runtime = m.CallRuntimeN(function_id, context, first_arg,
arg_count, result_size);
EXPECT_THAT(call_runtime,
IsCall(_, IsHeapConstant(builtin.code()), arg_count,
first_arg, function_entry, context, _, _));
}
}
}
}
......@@ -699,18 +619,21 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallJS) {
TailCallMode::kAllow};
TRACED_FOREACH(TailCallMode, tail_call_mode, tail_call_modes) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerTestState state(this, bytecode);
InterpreterAssemblerForTest m(&state, bytecode);
Callable builtin = CodeFactory::InterpreterPushArgsAndCall(
isolate(), tail_call_mode, InterpreterPushArgsMode::kOther);
Node* function = m.IntPtrConstant(0);
Node* first_arg = m.IntPtrConstant(1);
Node* arg_count = m.Int32Constant(2);
Node* context = m.IntPtrConstant(3);
Node* call_js =
m.CallJS(function, context, first_arg, arg_count, tail_call_mode);
EXPECT_THAT(call_js, IsCall(_, IsHeapConstant(builtin.code()), arg_count,
first_arg, function, context, _, _));
if (Bytecodes::IsCallOrConstruct(bytecode)) {
InterpreterAssemblerTestState state(this, bytecode);
InterpreterAssemblerForTest m(&state, bytecode);
Callable builtin = CodeFactory::InterpreterPushArgsAndCall(
isolate(), tail_call_mode, InterpreterPushArgsMode::kOther);
Node* function = m.IntPtrConstant(0);
Node* first_arg = m.IntPtrConstant(1);
Node* arg_count = m.Int32Constant(2);
Node* context = m.IntPtrConstant(3);
Node* call_js =
m.CallJS(function, context, first_arg, arg_count, tail_call_mode);
EXPECT_THAT(call_js,
IsCall(_, IsHeapConstant(builtin.code()), arg_count,
first_arg, function, context, _, _));
}
}
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment