Commit b62bf1e6 authored by rmcilroy's avatar rmcilroy Committed by Commit bot

[Interpreter] Enable runtime profiler support for Ignition.

Adds a profiling counter to each BytecodeArray object, and adds
code to Jump and Return bytecode handlers to update this
counter by the size of the jump or the distance from the return
to the start of the function. This is more accurate than fullcodegen's
approach since it takes forward jumps into account as well as back-edges.

Modifies RuntimeProfiler to track ticks for interpreted frames.
Currently we use the SharedFunctionInfo::profiler_ticks() instead
of adding another to tick field to avoid adding another field to
BytecodeArray since SharedFunctionInfo::profiler_ticks() is only
used by Crankshaft otherwise so we shouldn't need both for

BUG=v8:4689
LOG=N

Review URL: https://codereview.chromium.org/1707693003

Cr-Commit-Position: refs/heads/master@{#34166}
parent 5d065482
......@@ -908,7 +908,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Code aging of the BytecodeArray object.
......
......@@ -917,7 +917,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Code aging of the BytecodeArray object.
......
......@@ -39,6 +39,7 @@ class Schedule;
V(Int32Add) \
V(Int32Sub) \
V(Int32Mul) \
V(Int32GreaterThanOrEqual) \
V(WordEqual) \
V(WordNotEqual) \
V(WordOr) \
......
......@@ -480,7 +480,7 @@ Object* StackGuard::HandleInterrupts() {
isolate_->counters()->stack_interrupts()->Increment();
isolate_->counters()->runtime_profiler_ticks()->Increment();
isolate_->runtime_profiler()->OptimizeNow();
isolate_->runtime_profiler()->MarkCandidatesForOptimization();
return isolate_->heap()->undefined_value();
}
......
......@@ -3029,6 +3029,7 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
instance->set_length(length);
instance->set_frame_size(frame_size);
instance->set_parameter_count(parameter_count);
instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
instance->set_constant_pool(constant_pool);
instance->set_handler_table(empty_fixed_array());
instance->set_source_position_table(empty_fixed_array());
......
......@@ -115,7 +115,7 @@ int StaticNewSpaceVisitor<StaticVisitor>::VisitBytecodeArray(
VisitPointers(
map->GetHeap(), object,
HeapObject::RawField(object, BytecodeArray::kConstantPoolOffset),
HeapObject::RawField(object, BytecodeArray::kHeaderSize));
HeapObject::RawField(object, BytecodeArray::kFrameSizeOffset));
return reinterpret_cast<BytecodeArray*>(object)->BytecodeArraySize();
}
......@@ -531,7 +531,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
StaticVisitor::VisitPointers(
map->GetHeap(), object,
HeapObject::RawField(object, BytecodeArray::kConstantPoolOffset),
HeapObject::RawField(object, BytecodeArray::kHeaderSize));
HeapObject::RawField(object, BytecodeArray::kFrameSizeOffset));
}
......
......@@ -470,7 +470,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Code aging of the BytecodeArray object.
......
......@@ -10,6 +10,7 @@
#include "src/frames.h"
#include "src/interface-descriptors.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
#include "src/machine-type.h"
#include "src/macro-assembler.h"
#include "src/zone.h"
......@@ -373,6 +374,36 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
first_arg, function_entry, result_size);
}
void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
CodeStubAssembler::Label ok(this);
CodeStubAssembler::Label interrupt_check(this);
CodeStubAssembler::Label end(this);
Node* budget_offset =
IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
// Update budget by |weight| and check if it reaches zero.
Node* old_budget =
Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
Node* new_budget = Int32Add(old_budget, weight);
Node* condition = Int32GreaterThanOrEqual(new_budget, Int32Constant(0));
Branch(condition, &ok, &interrupt_check);
// Perform interrupt and reset budget.
Bind(&interrupt_check);
CallRuntime(Runtime::kInterrupt, GetContext());
StoreNoWriteBarrier(MachineRepresentation::kWord32,
BytecodeArrayTaggedPointer(), budget_offset,
Int32Constant(Interpreter::InterruptBudget()));
Goto(&end);
// Update budget.
Bind(&ok);
StoreNoWriteBarrier(MachineRepresentation::kWord32,
BytecodeArrayTaggedPointer(), budget_offset, new_budget);
Goto(&end);
Bind(&end);
}
Node* InterpreterAssembler::Advance(int delta) {
return IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
}
......@@ -381,7 +412,10 @@ Node* InterpreterAssembler::Advance(Node* delta) {
return IntPtrAdd(BytecodeOffset(), delta);
}
void InterpreterAssembler::Jump(Node* delta) { DispatchTo(Advance(delta)); }
void InterpreterAssembler::Jump(Node* delta) {
UpdateInterruptBudget(delta);
DispatchTo(Advance(delta));
}
void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
CodeStubAssembler::Label match(this);
......@@ -389,7 +423,7 @@ void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
Branch(condition, &match, &no_match);
Bind(&match);
DispatchTo(Advance(delta));
Jump(delta);
Bind(&no_match);
Dispatch();
}
......@@ -431,6 +465,17 @@ void InterpreterAssembler::InterpreterReturn() {
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
}
// TODO(rmcilroy): Investigate whether it is worth supporting self
// optimization of primitive functions like FullCodegen.
// Update profiling count by -BytecodeOffset to simulate backedge to start of
// function.
Node* profiling_weight =
Int32Sub(Int32Constant(kHeapObjectTag + BytecodeArray::kHeaderSize),
BytecodeOffset());
UpdateInterruptBudget(profiling_weight);
InterpreterDispatchDescriptor descriptor(isolate());
Node* exit_trampoline_code_object =
HeapConstant(isolate()->builtins()->InterpreterExitTrampoline());
......
......@@ -156,6 +156,10 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
// Traces the current bytecode by calling |function_id|.
void TraceBytecode(Runtime::FunctionId function_id);
// Updates the bytecode array's interrupt budget by |weight| and calls
// Runtime::kInterrupt if counter reaches zero.
void UpdateInterruptBudget(compiler::Node* weight);
// Returns the offset of register |index| relative to RegisterFilePointer().
compiler::Node* RegisterFrameOffset(compiler::Node* index);
......
......@@ -49,6 +49,13 @@ void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
&dispatch_table_[0] + kDispatchTableSize);
}
// static
int Interpreter::InterruptBudget() {
// TODO(ignition): Tune code size multiplier.
const int kCodeSizeMultiplier = 32;
return FLAG_interrupt_budget * kCodeSizeMultiplier;
}
bool Interpreter::MakeBytecode(CompilationInfo* info) {
if (FLAG_print_bytecode || FLAG_print_source || FLAG_print_ast) {
OFStream os(stdout);
......
......@@ -33,6 +33,9 @@ class Interpreter {
// Initializes the interpreter dispatch table.
void Initialize();
// Returns the interrupt budget which should be used for the profiler counter.
static int InterruptBudget();
// Generate bytecode for |info|.
static bool MakeBytecode(CompilationInfo* info);
......
......@@ -900,7 +900,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Code aging of the BytecodeArray object.
......
......@@ -1016,7 +1016,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Code aging of the BytecodeArray object.
......
......@@ -3880,6 +3880,14 @@ void BytecodeArray::set_parameter_count(int number_of_parameters) {
(number_of_parameters << kPointerSizeLog2));
}
int BytecodeArray::interrupt_budget() const {
return READ_INT_FIELD(this, kInterruptBudgetOffset);
}
void BytecodeArray::set_interrupt_budget(int interrupt_budget) {
DCHECK_GE(interrupt_budget, 0);
WRITE_INT_FIELD(this, kInterruptBudgetOffset, interrupt_budget);
}
int BytecodeArray::parameter_count() const {
// Parameter count is stored as the size on stack of the parameters to allow
......
......@@ -4451,6 +4451,10 @@ class BytecodeArray : public FixedArrayBase {
inline int parameter_count() const;
inline void set_parameter_count(int number_of_parameters);
// Accessors for profiling count.
inline int interrupt_budget() const;
inline void set_interrupt_budget(int interrupt_budget);
// Accessors for the constant pool.
DECL_ACCESSORS(constant_pool, FixedArray)
......@@ -4477,13 +4481,14 @@ class BytecodeArray : public FixedArrayBase {
void Disassemble(std::ostream& os);
// Layout description.
static const int kFrameSizeOffset = FixedArrayBase::kHeaderSize;
static const int kParameterSizeOffset = kFrameSizeOffset + kIntSize;
static const int kConstantPoolOffset = kParameterSizeOffset + kIntSize;
static const int kConstantPoolOffset = FixedArrayBase::kHeaderSize;
static const int kHandlerTableOffset = kConstantPoolOffset + kPointerSize;
static const int kSourcePositionTableOffset =
kHandlerTableOffset + kPointerSize;
static const int kHeaderSize = kSourcePositionTableOffset + kPointerSize;
static const int kFrameSizeOffset = kSourcePositionTableOffset + kPointerSize;
static const int kParameterSizeOffset = kFrameSizeOffset + kIntSize;
static const int kInterruptBudgetOffset = kParameterSizeOffset + kIntSize;
static const int kHeaderSize = kInterruptBudgetOffset + kIntSize;
// Maximal memory consumption for a single BytecodeArray.
static const int kMaxSize = 512 * MB;
......
......@@ -1027,7 +1027,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Code aging of the BytecodeArray object.
......
......@@ -58,10 +58,11 @@ static void GetICCounts(SharedFunctionInfo* shared,
int* ic_with_type_info_count, int* ic_generic_count,
int* ic_total_count, int* type_info_percentage,
int* generic_percentage) {
Code* shared_code = shared->code();
*ic_total_count = 0;
*ic_generic_count = 0;
*ic_with_type_info_count = 0;
if (shared->code()->kind() == Code::FUNCTION) {
Code* shared_code = shared->code();
Object* raw_info = shared_code->type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
......@@ -69,6 +70,7 @@ static void GetICCounts(SharedFunctionInfo* shared,
*ic_generic_count = info->ic_generic_count();
*ic_total_count = info->ic_total_count();
}
}
// Harvest vector-ics as well
TypeFeedbackVector* vector = shared->feedback_vector();
......@@ -136,45 +138,18 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
}
}
void RuntimeProfiler::OptimizeNow() {
HandleScope scope(isolate_);
if (!isolate_->use_crankshaft()) return;
DisallowHeapAllocation no_gc;
// Run through the JavaScript frames and collect them. If we already
// have a sample of the function, we mark it for optimizations
// (eagerly or lazily).
int frame_count = 0;
int frame_count_limit = FLAG_frame_count;
for (JavaScriptFrameIterator it(isolate_);
frame_count++ < frame_count_limit && !it.done();
it.Advance()) {
JavaScriptFrame* frame = it.frame();
JSFunction* function = frame->function();
void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
int frame_count,
bool frame_optimized) {
SharedFunctionInfo* shared = function->shared();
Code* shared_code = shared->code();
List<JSFunction*> functions(4);
frame->GetFunctions(&functions);
for (int i = functions.length(); --i >= 0; ) {
SharedFunctionInfo* shared_function_info = functions[i]->shared();
int ticks = shared_function_info->profiler_ticks();
if (ticks < Smi::kMaxValue) {
shared_function_info->set_profiler_ticks(ticks + 1);
}
}
if (shared_code->kind() != Code::FUNCTION) continue;
if (function->IsInOptimizationQueue()) continue;
if (shared_code->kind() != Code::FUNCTION) return;
if (function->IsInOptimizationQueue()) return;
if (FLAG_always_osr) {
AttemptOnStackReplacement(function, Code::kMaxLoopNestingMarker);
// Fall through and do a normal optimized compile as well.
} else if (!frame->is_optimized() &&
} else if (!frame_optimized &&
(function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() ||
function->IsOptimized())) {
......@@ -190,7 +165,7 @@ void RuntimeProfiler::OptimizeNow() {
} else {
AttemptOnStackReplacement(function);
}
continue;
return;
}
// Only record top-level code on top of the execution stack and
......@@ -199,7 +174,7 @@ void RuntimeProfiler::OptimizeNow() {
const int kMaxToplevelSourceSize = 10 * 1024;
if (shared->is_toplevel() &&
(frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) {
continue;
return;
}
// Do not record non-optimizable functions.
......@@ -215,9 +190,9 @@ void RuntimeProfiler::OptimizeNow() {
shared_code->set_profiler_ticks(ticks + 1);
}
}
continue;
return;
}
if (function->IsOptimized()) continue;
if (function->IsOptimized()) return;
int ticks = shared_code->profiler_ticks();
......@@ -257,6 +232,98 @@ void RuntimeProfiler::OptimizeNow() {
} else {
shared_code->set_profiler_ticks(ticks + 1);
}
}
void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
bool frame_optimized) {
if (function->IsInOptimizationQueue()) return;
SharedFunctionInfo* shared = function->shared();
int ticks = shared->profiler_ticks();
// TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
// than kMaxToplevelSourceSize.
// TODO(rmcilroy): Consider whether we should optimize small functions when
// they are first seen on the stack (e.g., kMaxSizeEarlyOpt).
if (!frame_optimized && (function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() ||
function->IsOptimized())) {
// TODO(rmcilroy): Support OSR in these cases.
return;
}
// Do not optimize non-optimizable functions.
if (shared->optimization_disabled()) {
if (shared->deopt_count() >= FLAG_max_opt_count) {
// If optimization was disabled due to many deoptimizations,
// then check if the function is hot and try to reenable optimization.
if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
shared->set_profiler_ticks(0);
shared->TryReenableOptimization();
}
}
return;
}
if (function->IsOptimized()) return;
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, generic, total, type_percentage, generic_percentage;
GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
&generic_percentage);
if (type_percentage >= FLAG_type_info_threshold &&
generic_percentage <= FLAG_generic_ic_threshold) {
// If this particular function hasn't had any ICs patched for enough
// ticks, optimize it now.
Optimize(function, "hot and stable");
} else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
Optimize(function, "not much type info but very hot");
} else {
if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
function->PrintName();
PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
type_percentage);
}
}
}
}
void RuntimeProfiler::MarkCandidatesForOptimization() {
HandleScope scope(isolate_);
if (!isolate_->use_crankshaft()) return;
DisallowHeapAllocation no_gc;
// Run through the JavaScript frames and collect them. If we already
// have a sample of the function, we mark it for optimizations
// (eagerly or lazily).
int frame_count = 0;
int frame_count_limit = FLAG_frame_count;
for (JavaScriptFrameIterator it(isolate_);
frame_count++ < frame_count_limit && !it.done();
it.Advance()) {
JavaScriptFrame* frame = it.frame();
JSFunction* function = frame->function();
List<JSFunction*> functions(4);
frame->GetFunctions(&functions);
for (int i = functions.length(); --i >= 0; ) {
SharedFunctionInfo* shared_function_info = functions[i]->shared();
int ticks = shared_function_info->profiler_ticks();
if (ticks < Smi::kMaxValue) {
shared_function_info->set_profiler_ticks(ticks + 1);
}
}
if (FLAG_ignition) {
MaybeOptimizeIgnition(function, frame->is_optimized());
} else {
MaybeOptimizeFullCodegen(function, frame_count, frame->is_optimized());
}
}
any_ic_changed_ = false;
}
......
......@@ -23,13 +23,16 @@ class RuntimeProfiler {
public:
explicit RuntimeProfiler(Isolate* isolate);
void OptimizeNow();
void MarkCandidatesForOptimization();
void NotifyICChanged() { any_ic_changed_ = true; }
void AttemptOnStackReplacement(JSFunction* function, int nesting_levels = 1);
private:
void MaybeOptimizeFullCodegen(JSFunction* function, int frame_count,
bool frame_optimized);
void MaybeOptimizeIgnition(JSFunction* function, bool frame_optimized);
void Optimize(JSFunction* function, const char* reason);
bool CodeSizeOKForOSR(Code* shared_code);
......
......@@ -541,7 +541,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Code aging of the BytecodeArray object.
......
......@@ -599,7 +599,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Code aging of the BytecodeArray object.
......
......@@ -840,9 +840,6 @@
# BytecodeGenerator::VisitFunctionLiteral - !shared_info.is_null().
'regress/regress-crbug-429159': [FAIL],
# TODO(rmcilroy,4680): Check failed: osr_normal_entry.
'regress/regress-123919': [FAIL],
# TODO(rmcilroy,4680): Pass on debug, fail on release.
'compiler/regress-stacktrace-methods': [PASS, ['mode == release', FAIL]],
......
......@@ -223,6 +223,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
}
TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
// If debug code is enabled we emit extra code in Jump.
if (FLAG_debug_code) return;
int jump_offsets[] = {-9710, -77, 0, +3, +97109};
TRACED_FOREACH(int, jump_offset, jump_offsets) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
......@@ -236,10 +239,8 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
IsInt32Constant(jump_offset));
Matcher<Node*> target_bytecode_matcher = m.IsLoad(
MachineType::Uint8(),
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
next_bytecode_offset_matcher);
Matcher<Node*> target_bytecode_matcher =
m.IsLoad(MachineType::Uint8(), _, next_bytecode_offset_matcher);
Matcher<Node*> code_target_matcher = m.IsLoad(
MachineType::Pointer(),
IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
......@@ -253,9 +254,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
IsParameter(
InterpreterDispatchDescriptor::kRegisterFileParameter),
next_bytecode_offset_matcher,
IsParameter(
InterpreterDispatchDescriptor::kBytecodeArrayParameter),
next_bytecode_offset_matcher, _,
IsParameter(
InterpreterDispatchDescriptor::kDispatchTableParameter),
IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
......@@ -267,6 +266,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
static const int kJumpIfTrueOffset = 73;
// If debug code is enabled we emit extra code in Jump.
if (FLAG_debug_code) return;
MachineOperatorBuilder machine(zone());
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
......@@ -284,10 +286,8 @@ TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
IsInt32Constant(jump_offsets[i]));
Matcher<Node*> target_bytecode_matcher = m.IsLoad(
MachineType::Uint8(),
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
next_bytecode_offset_matcher);
Matcher<Node*> target_bytecode_matcher =
m.IsLoad(MachineType::Uint8(), _, next_bytecode_offset_matcher);
Matcher<Node*> code_target_matcher = m.IsLoad(
MachineType::Pointer(),
IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
......@@ -300,9 +300,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
IsParameter(
InterpreterDispatchDescriptor::kRegisterFileParameter),
next_bytecode_offset_matcher,
IsParameter(
InterpreterDispatchDescriptor::kBytecodeArrayParameter),
next_bytecode_offset_matcher, _,
IsParameter(
InterpreterDispatchDescriptor::kDispatchTableParameter),
IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
......@@ -314,6 +312,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
}
TARGET_TEST_F(InterpreterAssemblerTest, InterpreterReturn) {
// If debug code is enabled we emit extra code in InterpreterReturn.
if (FLAG_debug_code) return;
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
m.InterpreterReturn();
......@@ -333,7 +334,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, InterpreterReturn) {
IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
IsParameter(
InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
_,
IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
_));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment