Commit 40c78728 authored by rmcilroy's avatar rmcilroy Committed by Commit bot

[Interpreter] Make Fast-paths of StackCheck, Jump Return, ForInNext not build a frame.

Tweaks the generated code for a number of bytecode handlers. In particular, uses
deferred labels (now that they exist) to ensure that fast-paths don't build frames
where possible. This improves the codegen for StackCheck, Jump, Return and ForInNext.
Also tweak the codegen for CreateArguments, LogicalNot, ForInDone.

Seems to give ~5-8% performance boost on Octane.

BUG=v8:4280
LOG=N

Review-Url: https://codereview.chromium.org/1973873004
Cr-Commit-Position: refs/heads/master@{#36236}
parent c9a83150
...@@ -56,6 +56,7 @@ class Schedule; ...@@ -56,6 +56,7 @@ class Schedule;
V(IntPtrGreaterThanOrEqual) \ V(IntPtrGreaterThanOrEqual) \
V(IntPtrEqual) \ V(IntPtrEqual) \
V(Uint32LessThan) \ V(Uint32LessThan) \
V(UintPtrLessThan) \
V(UintPtrGreaterThanOrEqual) \ V(UintPtrGreaterThanOrEqual) \
V(WordEqual) \ V(WordEqual) \
V(WordNotEqual) \ V(WordNotEqual) \
......
...@@ -460,33 +460,32 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context, ...@@ -460,33 +460,32 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
} }
void InterpreterAssembler::UpdateInterruptBudget(Node* weight) { void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
CodeStubAssembler::Label ok(this); Label ok(this), interrupt_check(this, Label::kDeferred), end(this);
CodeStubAssembler::Label interrupt_check(this);
CodeStubAssembler::Label end(this);
Node* budget_offset = Node* budget_offset =
IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag); IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
// Update budget by |weight| and check if it reaches zero. // Update budget by |weight| and check if it reaches zero.
Variable new_budget(this, MachineRepresentation::kWord32);
Node* old_budget = Node* old_budget =
Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset); Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
Node* new_budget = Int32Add(old_budget, weight); new_budget.Bind(Int32Add(old_budget, weight));
Node* condition = Int32GreaterThanOrEqual(new_budget, Int32Constant(0)); Node* condition =
Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
Branch(condition, &ok, &interrupt_check); Branch(condition, &ok, &interrupt_check);
// Perform interrupt and reset budget. // Perform interrupt and reset budget.
Bind(&interrupt_check); Bind(&interrupt_check);
CallRuntime(Runtime::kInterrupt, GetContext()); {
StoreNoWriteBarrier(MachineRepresentation::kWord32, CallRuntime(Runtime::kInterrupt, GetContext());
BytecodeArrayTaggedPointer(), budget_offset, new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
Int32Constant(Interpreter::InterruptBudget())); Goto(&ok);
Goto(&end); }
// Update budget. // Update budget.
Bind(&ok); Bind(&ok);
StoreNoWriteBarrier(MachineRepresentation::kWord32, StoreNoWriteBarrier(MachineRepresentation::kWord32,
BytecodeArrayTaggedPointer(), budget_offset, new_budget); BytecodeArrayTaggedPointer(), budget_offset,
Goto(&end); new_budget.value());
Bind(&end);
} }
Node* InterpreterAssembler::Advance(int delta) { Node* InterpreterAssembler::Advance(int delta) {
...@@ -503,10 +502,9 @@ Node* InterpreterAssembler::Jump(Node* delta) { ...@@ -503,10 +502,9 @@ Node* InterpreterAssembler::Jump(Node* delta) {
} }
void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) { void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
CodeStubAssembler::Label match(this); Label match(this), no_match(this);
CodeStubAssembler::Label no_match(this);
Branch(condition, &match, &no_match); BranchIf(condition, &match, &no_match);
Bind(&match); Bind(&match);
Jump(delta); Jump(delta);
Bind(&no_match); Bind(&no_match);
...@@ -618,23 +616,12 @@ compiler::Node* InterpreterAssembler::InterpreterReturn() { ...@@ -618,23 +616,12 @@ compiler::Node* InterpreterAssembler::InterpreterReturn() {
return DispatchToBytecodeHandler(exit_trampoline_code_object); return DispatchToBytecodeHandler(exit_trampoline_code_object);
} }
void InterpreterAssembler::StackCheck() { Node* InterpreterAssembler::StackCheckTriggeredInterrupt() {
CodeStubAssembler::Label end(this);
CodeStubAssembler::Label ok(this);
CodeStubAssembler::Label stack_guard(this);
Node* sp = LoadStackPointer(); Node* sp = LoadStackPointer();
Node* stack_limit = Load( Node* stack_limit = Load(
MachineType::Pointer(), MachineType::Pointer(),
ExternalConstant(ExternalReference::address_of_stack_limit(isolate()))); ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
Node* condition = UintPtrGreaterThanOrEqual(sp, stack_limit); return UintPtrLessThan(sp, stack_limit);
Branch(condition, &ok, &stack_guard);
Bind(&stack_guard);
CallRuntime(Runtime::kStackGuard, GetContext());
Goto(&end);
Bind(&ok);
Goto(&end);
Bind(&end);
} }
void InterpreterAssembler::Abort(BailoutReason bailout_reason) { void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
...@@ -646,18 +633,14 @@ void InterpreterAssembler::Abort(BailoutReason bailout_reason) { ...@@ -646,18 +633,14 @@ void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs, void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
BailoutReason bailout_reason) { BailoutReason bailout_reason) {
CodeStubAssembler::Label match(this); Label ok(this), abort(this, Label::kDeferred);
CodeStubAssembler::Label no_match(this); BranchIfWordEqual(lhs, rhs, &ok, &abort);
CodeStubAssembler::Label end(this);
Node* condition = WordEqual(lhs, rhs); Bind(&abort);
Branch(condition, &match, &no_match);
Bind(&no_match);
Abort(bailout_reason); Abort(bailout_reason);
Goto(&end); Goto(&ok);
Bind(&match);
Goto(&end); Bind(&ok);
Bind(&end);
} }
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) { void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
...@@ -677,21 +660,21 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) { ...@@ -677,21 +660,21 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
Node* old_counter = Node* old_counter =
Load(MachineType::IntPtr(), counters_table, counter_offset); Load(MachineType::IntPtr(), counters_table, counter_offset);
CodeStubAssembler::Label counter_ok(this); Label counter_ok(this), counter_saturated(this, Label::kDeferred);
CodeStubAssembler::Label counter_saturated(this);
CodeStubAssembler::Label end(this);
Node* counter_reached_max = WordEqual( Node* counter_reached_max = WordEqual(
old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max())); old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
Branch(counter_reached_max, &counter_saturated, &counter_ok); BranchIf(counter_reached_max, &counter_saturated, &counter_ok);
Bind(&counter_ok); Bind(&counter_ok);
Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1)); {
StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table, Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
counter_offset, new_counter); StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
Goto(&end); counter_offset, new_counter);
Goto(&counter_saturated);
}
Bind(&counter_saturated); Bind(&counter_saturated);
Goto(&end);
Bind(&end);
} }
// static // static
......
...@@ -114,11 +114,6 @@ class InterpreterAssembler : public CodeStubAssembler { ...@@ -114,11 +114,6 @@ class InterpreterAssembler : public CodeStubAssembler {
// Jump relative to the current bytecode by |jump_offset|. // Jump relative to the current bytecode by |jump_offset|.
compiler::Node* Jump(compiler::Node* jump_offset); compiler::Node* Jump(compiler::Node* jump_offset);
// Jump relative to the current bytecode by |jump_offset| if the
// |condition| is true. Helper function for JumpIfWordEqual and
// JumpIfWordNotEqual.
void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
// Jump relative to the current bytecode by |jump_offset| if the // Jump relative to the current bytecode by |jump_offset| if the
// word values |lhs| and |rhs| are equal. // word values |lhs| and |rhs| are equal.
void JumpIfWordEqual(compiler::Node* lhs, compiler::Node* rhs, void JumpIfWordEqual(compiler::Node* lhs, compiler::Node* rhs,
...@@ -129,8 +124,8 @@ class InterpreterAssembler : public CodeStubAssembler { ...@@ -129,8 +124,8 @@ class InterpreterAssembler : public CodeStubAssembler {
void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs, void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
compiler::Node* jump_offset); compiler::Node* jump_offset);
// Perform a stack guard check. // Returns true if the stack guard check triggers an interrupt.
void StackCheck(); compiler::Node* StackCheckTriggeredInterrupt();
// Returns from the function. // Returns from the function.
compiler::Node* InterpreterReturn(); compiler::Node* InterpreterReturn();
...@@ -207,6 +202,11 @@ class InterpreterAssembler : public CodeStubAssembler { ...@@ -207,6 +202,11 @@ class InterpreterAssembler : public CodeStubAssembler {
compiler::Node* BytecodeUnsignedOperand(int operand_index, compiler::Node* BytecodeUnsignedOperand(int operand_index,
OperandSize operand_size); OperandSize operand_size);
// Jump relative to the current bytecode by |jump_offset| if the
// |condition| is true. Helper function for JumpIfWordEqual and
// JumpIfWordNotEqual.
void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
// Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not // Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
// update BytecodeOffset() itself. // update BytecodeOffset() itself.
compiler::Node* Advance(int delta); compiler::Node* Advance(int delta);
......
...@@ -862,21 +862,22 @@ void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) { ...@@ -862,21 +862,22 @@ void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
Node* context = __ GetContext(); Node* context = __ GetContext();
Node* to_boolean_value = Node* to_boolean_value =
__ CallStub(callable.descriptor(), target, context, accumulator); __ CallStub(callable.descriptor(), target, context, accumulator);
InterpreterAssembler::Label if_true(assembler), if_false(assembler); Label if_true(assembler), if_false(assembler), end(assembler);
Node* true_value = __ BooleanConstant(true); Node* true_value = __ BooleanConstant(true);
Node* false_value = __ BooleanConstant(false); Node* false_value = __ BooleanConstant(false);
Node* condition = __ WordEqual(to_boolean_value, true_value); __ BranchIfWordEqual(to_boolean_value, true_value, &if_true, &if_false);
__ Branch(condition, &if_true, &if_false);
__ Bind(&if_true); __ Bind(&if_true);
{ {
__ SetAccumulator(false_value); __ SetAccumulator(false_value);
__ Dispatch(); __ Goto(&end);
} }
__ Bind(&if_false); __ Bind(&if_false);
{ {
__ SetAccumulator(true_value); __ SetAccumulator(true_value);
__ Dispatch(); __ Goto(&end);
} }
__ Bind(&end);
__ Dispatch();
} }
// TypeOf // TypeOf
...@@ -1453,10 +1454,8 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) { ...@@ -1453,10 +1454,8 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
Node* bytecode_flags = __ BytecodeOperandFlag(2); Node* bytecode_flags = __ BytecodeOperandFlag(2);
Node* closure = __ LoadRegister(Register::function_closure()); Node* closure = __ LoadRegister(Register::function_closure());
Variable result(assembler, MachineRepresentation::kTagged);
// Check if we can do a fast clone or have to call the runtime. // Check if we can do a fast clone or have to call the runtime.
Label end(assembler), if_fast_clone(assembler), Label if_fast_clone(assembler),
if_not_fast_clone(assembler, Label::kDeferred); if_not_fast_clone(assembler, Label::kDeferred);
Node* fast_clone_properties_count = Node* fast_clone_properties_count =
__ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>( __ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
...@@ -1466,11 +1465,11 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) { ...@@ -1466,11 +1465,11 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
__ Bind(&if_fast_clone); __ Bind(&if_fast_clone);
{ {
// If we can do a fast clone do the fast-path in FastCloneShallowObjectStub. // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
Node* clone = FastCloneShallowObjectStub::GenerateFastPath( Node* result = FastCloneShallowObjectStub::GenerateFastPath(
assembler, &if_not_fast_clone, closure, literal_index, assembler, &if_not_fast_clone, closure, literal_index,
fast_clone_properties_count); fast_clone_properties_count);
result.Bind(clone); __ SetAccumulator(result);
__ Goto(&end); __ Dispatch();
} }
__ Bind(&if_not_fast_clone); __ Bind(&if_not_fast_clone);
...@@ -1486,14 +1485,12 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) { ...@@ -1486,14 +1485,12 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
__ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask)); __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask));
Node* flags = __ SmiTag(flags_raw); Node* flags = __ SmiTag(flags_raw);
result.Bind(__ CallRuntime(Runtime::kCreateObjectLiteral, context, closure, Node* result =
literal_index, constant_elements, flags)); __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure,
__ Goto(&end); literal_index, constant_elements, flags);
__ SetAccumulator(result);
__ Dispatch();
} }
__ Bind(&end);
__ SetAccumulator(result.value());
__ Dispatch();
} }
// CreateClosure <index> <tenured> // CreateClosure <index> <tenured>
...@@ -1521,9 +1518,8 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) { ...@@ -1521,9 +1518,8 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
Node* closure = __ LoadRegister(Register::function_closure()); Node* closure = __ LoadRegister(Register::function_closure());
Node* context = __ GetContext(); Node* context = __ GetContext();
Variable result(assembler, MachineRepresentation::kTagged); Label if_duplicate_parameters(assembler, Label::kDeferred);
Label end(assembler), if_duplicate_parameters(assembler, Label::kDeferred), Label if_not_duplicate_parameters(assembler);
if_not_duplicate_parameters(assembler);
// Check if function has duplicate parameters. // Check if function has duplicate parameters.
// TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports
...@@ -1538,23 +1534,23 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) { ...@@ -1538,23 +1534,23 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit); Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
__ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters); __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
__ Bind(&if_duplicate_parameters);
{
result.Bind(
__ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure));
__ Goto(&end);
}
__ Bind(&if_not_duplicate_parameters); __ Bind(&if_not_duplicate_parameters);
{ {
// TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub.
Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true); Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true);
Node* target = __ HeapConstant(callable.code()); Node* target = __ HeapConstant(callable.code());
result.Bind(__ CallStub(callable.descriptor(), target, context, closure)); Node* result = __ CallStub(callable.descriptor(), target, context, closure);
__ Goto(&end); __ SetAccumulator(result);
__ Dispatch();
}
__ Bind(&if_duplicate_parameters);
{
Node* result =
__ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
__ SetAccumulator(result);
__ Dispatch();
} }
__ Bind(&end);
__ SetAccumulator(result.value());
__ Dispatch();
} }
...@@ -1562,6 +1558,7 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) { ...@@ -1562,6 +1558,7 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
// //
// Creates a new unmapped arguments object. // Creates a new unmapped arguments object.
void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) { void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
// TODO(rmcilroy): Inline FastNewStrictArguments when it is a TurboFan stub.
Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true); Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true);
Node* target = __ HeapConstant(callable.code()); Node* target = __ HeapConstant(callable.code());
Node* context = __ GetContext(); Node* context = __ GetContext();
...@@ -1575,6 +1572,7 @@ void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) { ...@@ -1575,6 +1572,7 @@ void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
// //
// Creates a new rest parameter array. // Creates a new rest parameter array.
void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) { void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
// TODO(rmcilroy): Inline FastNewRestArguments when it is a TurboFan stub.
Callable callable = CodeFactory::FastNewRestParameter(isolate_, true); Callable callable = CodeFactory::FastNewRestParameter(isolate_, true);
Node* target = __ HeapConstant(callable.code()); Node* target = __ HeapConstant(callable.code());
Node* closure = __ LoadRegister(Register::function_closure()); Node* closure = __ LoadRegister(Register::function_closure());
...@@ -1588,8 +1586,20 @@ void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) { ...@@ -1588,8 +1586,20 @@ void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
// //
// Performs a stack guard check. // Performs a stack guard check.
void Interpreter::DoStackCheck(InterpreterAssembler* assembler) { void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
__ StackCheck(); Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred);
Node* interrupt = __ StackCheckTriggeredInterrupt();
__ BranchIf(interrupt, &stack_check_interrupt, &ok);
__ Bind(&ok);
__ Dispatch(); __ Dispatch();
__ Bind(&stack_check_interrupt);
{
Node* context = __ GetContext();
__ CallRuntime(Runtime::kStackGuard, context);
__ Dispatch();
}
} }
// Throw // Throw
...@@ -1685,10 +1695,10 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) { ...@@ -1685,10 +1695,10 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
Node* key = __ LoadFixedArrayElementSmiIndex(cache_array, index); Node* key = __ LoadFixedArrayElementSmiIndex(cache_array, index);
// Check if we can use the for-in fast path potentially using the enum cache. // Check if we can use the for-in fast path potentially using the enum cache.
InterpreterAssembler::Label if_fast(assembler), if_slow(assembler); Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset); Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
Node* condition = __ WordEqual(receiver_map, cache_type); Node* condition = __ WordEqual(receiver_map, cache_type);
__ Branch(condition, &if_fast, &if_slow); __ BranchIf(condition, &if_fast, &if_slow);
__ Bind(&if_fast); __ Bind(&if_fast);
{ {
// Enum cache in use for {receiver}, the {key} is definitely valid. // Enum cache in use for {receiver}, the {key} is definitely valid.
...@@ -1724,21 +1734,20 @@ void Interpreter::DoForInDone(InterpreterAssembler* assembler) { ...@@ -1724,21 +1734,20 @@ void Interpreter::DoForInDone(InterpreterAssembler* assembler) {
Node* cache_length = __ LoadRegister(cache_length_reg); Node* cache_length = __ LoadRegister(cache_length_reg);
// Check if {index} is at {cache_length} already. // Check if {index} is at {cache_length} already.
InterpreterAssembler::Label if_true(assembler), if_false(assembler); Label if_true(assembler), if_false(assembler), end(assembler);
Node* condition = __ WordEqual(index, cache_length); __ BranchIfWordEqual(index, cache_length, &if_true, &if_false);
__ Branch(condition, &if_true, &if_false);
__ Bind(&if_true); __ Bind(&if_true);
{ {
Node* result = __ BooleanConstant(true); __ SetAccumulator(__ BooleanConstant(true));
__ SetAccumulator(result); __ Goto(&end);
__ Dispatch();
} }
__ Bind(&if_false); __ Bind(&if_false);
{ {
Node* result = __ BooleanConstant(false); __ SetAccumulator(__ BooleanConstant(false));
__ SetAccumulator(result); __ Goto(&end);
__ Dispatch();
} }
__ Bind(&end);
__ Dispatch();
} }
// ForInStep <index> // ForInStep <index>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment