Commit 40c78728 authored by rmcilroy's avatar rmcilroy Committed by Commit bot

[Interpreter] Make Fast-paths of StackCheck, Jump Return, ForInNext not build a frame.

Tweaks the generated code for a number of bytecode handlers. In particular, uses
deferred labels (now that they exist) to ensure that fast-paths don't build frames
where possible. This improves the codegen for StackCheck, Jump, Return and ForInNext.
Also tweak the codegen for CreateArguments, LogicalNot, ForInDone.

Seems to give ~5-8% performance boost on Octane.

BUG=v8:4280
LOG=N

Review-Url: https://codereview.chromium.org/1973873004
Cr-Commit-Position: refs/heads/master@{#36236}
parent c9a83150
......@@ -56,6 +56,7 @@ class Schedule;
V(IntPtrGreaterThanOrEqual) \
V(IntPtrEqual) \
V(Uint32LessThan) \
V(UintPtrLessThan) \
V(UintPtrGreaterThanOrEqual) \
V(WordEqual) \
V(WordNotEqual) \
......
......@@ -460,33 +460,32 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
}
void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
CodeStubAssembler::Label ok(this);
CodeStubAssembler::Label interrupt_check(this);
CodeStubAssembler::Label end(this);
Label ok(this), interrupt_check(this, Label::kDeferred), end(this);
Node* budget_offset =
IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
// Update budget by |weight| and check if it reaches zero.
Variable new_budget(this, MachineRepresentation::kWord32);
Node* old_budget =
Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
Node* new_budget = Int32Add(old_budget, weight);
Node* condition = Int32GreaterThanOrEqual(new_budget, Int32Constant(0));
new_budget.Bind(Int32Add(old_budget, weight));
Node* condition =
Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
Branch(condition, &ok, &interrupt_check);
// Perform interrupt and reset budget.
Bind(&interrupt_check);
CallRuntime(Runtime::kInterrupt, GetContext());
StoreNoWriteBarrier(MachineRepresentation::kWord32,
BytecodeArrayTaggedPointer(), budget_offset,
Int32Constant(Interpreter::InterruptBudget()));
Goto(&end);
{
CallRuntime(Runtime::kInterrupt, GetContext());
new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
Goto(&ok);
}
// Update budget.
Bind(&ok);
StoreNoWriteBarrier(MachineRepresentation::kWord32,
BytecodeArrayTaggedPointer(), budget_offset, new_budget);
Goto(&end);
Bind(&end);
BytecodeArrayTaggedPointer(), budget_offset,
new_budget.value());
}
Node* InterpreterAssembler::Advance(int delta) {
......@@ -503,10 +502,9 @@ Node* InterpreterAssembler::Jump(Node* delta) {
}
void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
CodeStubAssembler::Label match(this);
CodeStubAssembler::Label no_match(this);
Label match(this), no_match(this);
Branch(condition, &match, &no_match);
BranchIf(condition, &match, &no_match);
Bind(&match);
Jump(delta);
Bind(&no_match);
......@@ -618,23 +616,12 @@ compiler::Node* InterpreterAssembler::InterpreterReturn() {
return DispatchToBytecodeHandler(exit_trampoline_code_object);
}
void InterpreterAssembler::StackCheck() {
CodeStubAssembler::Label end(this);
CodeStubAssembler::Label ok(this);
CodeStubAssembler::Label stack_guard(this);
Node* InterpreterAssembler::StackCheckTriggeredInterrupt() {
Node* sp = LoadStackPointer();
Node* stack_limit = Load(
MachineType::Pointer(),
ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
Node* condition = UintPtrGreaterThanOrEqual(sp, stack_limit);
Branch(condition, &ok, &stack_guard);
Bind(&stack_guard);
CallRuntime(Runtime::kStackGuard, GetContext());
Goto(&end);
Bind(&ok);
Goto(&end);
Bind(&end);
return UintPtrLessThan(sp, stack_limit);
}
void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
......@@ -646,18 +633,14 @@ void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
BailoutReason bailout_reason) {
CodeStubAssembler::Label match(this);
CodeStubAssembler::Label no_match(this);
CodeStubAssembler::Label end(this);
Label ok(this), abort(this, Label::kDeferred);
BranchIfWordEqual(lhs, rhs, &ok, &abort);
Node* condition = WordEqual(lhs, rhs);
Branch(condition, &match, &no_match);
Bind(&no_match);
Bind(&abort);
Abort(bailout_reason);
Goto(&end);
Bind(&match);
Goto(&end);
Bind(&end);
Goto(&ok);
Bind(&ok);
}
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
......@@ -677,21 +660,21 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
Node* old_counter =
Load(MachineType::IntPtr(), counters_table, counter_offset);
CodeStubAssembler::Label counter_ok(this);
CodeStubAssembler::Label counter_saturated(this);
CodeStubAssembler::Label end(this);
Label counter_ok(this), counter_saturated(this, Label::kDeferred);
Node* counter_reached_max = WordEqual(
old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
Branch(counter_reached_max, &counter_saturated, &counter_ok);
BranchIf(counter_reached_max, &counter_saturated, &counter_ok);
Bind(&counter_ok);
Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
counter_offset, new_counter);
Goto(&end);
{
Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
counter_offset, new_counter);
Goto(&counter_saturated);
}
Bind(&counter_saturated);
Goto(&end);
Bind(&end);
}
// static
......
......@@ -114,11 +114,6 @@ class InterpreterAssembler : public CodeStubAssembler {
// Jump relative to the current bytecode by |jump_offset|.
compiler::Node* Jump(compiler::Node* jump_offset);
// Jump relative to the current bytecode by |jump_offset| if the
// |condition| is true. Helper function for JumpIfWordEqual and
// JumpIfWordNotEqual.
void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
// Jump relative to the current bytecode by |jump_offset| if the
// word values |lhs| and |rhs| are equal.
void JumpIfWordEqual(compiler::Node* lhs, compiler::Node* rhs,
......@@ -129,8 +124,8 @@ class InterpreterAssembler : public CodeStubAssembler {
void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
compiler::Node* jump_offset);
// Perform a stack guard check.
void StackCheck();
// Returns true if the stack guard check triggers an interrupt.
compiler::Node* StackCheckTriggeredInterrupt();
// Returns from the function.
compiler::Node* InterpreterReturn();
......@@ -207,6 +202,11 @@ class InterpreterAssembler : public CodeStubAssembler {
compiler::Node* BytecodeUnsignedOperand(int operand_index,
OperandSize operand_size);
// Jump relative to the current bytecode by |jump_offset| if the
// |condition| is true. Helper function for JumpIfWordEqual and
// JumpIfWordNotEqual.
void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
// Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
// update BytecodeOffset() itself.
compiler::Node* Advance(int delta);
......
......@@ -862,21 +862,22 @@ void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
Node* context = __ GetContext();
Node* to_boolean_value =
__ CallStub(callable.descriptor(), target, context, accumulator);
InterpreterAssembler::Label if_true(assembler), if_false(assembler);
Label if_true(assembler), if_false(assembler), end(assembler);
Node* true_value = __ BooleanConstant(true);
Node* false_value = __ BooleanConstant(false);
Node* condition = __ WordEqual(to_boolean_value, true_value);
__ Branch(condition, &if_true, &if_false);
__ BranchIfWordEqual(to_boolean_value, true_value, &if_true, &if_false);
__ Bind(&if_true);
{
__ SetAccumulator(false_value);
__ Dispatch();
__ Goto(&end);
}
__ Bind(&if_false);
{
__ SetAccumulator(true_value);
__ Dispatch();
__ Goto(&end);
}
__ Bind(&end);
__ Dispatch();
}
// TypeOf
......@@ -1453,10 +1454,8 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
Node* bytecode_flags = __ BytecodeOperandFlag(2);
Node* closure = __ LoadRegister(Register::function_closure());
Variable result(assembler, MachineRepresentation::kTagged);
// Check if we can do a fast clone or have to call the runtime.
Label end(assembler), if_fast_clone(assembler),
Label if_fast_clone(assembler),
if_not_fast_clone(assembler, Label::kDeferred);
Node* fast_clone_properties_count =
__ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
......@@ -1466,11 +1465,11 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
__ Bind(&if_fast_clone);
{
// If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
Node* clone = FastCloneShallowObjectStub::GenerateFastPath(
Node* result = FastCloneShallowObjectStub::GenerateFastPath(
assembler, &if_not_fast_clone, closure, literal_index,
fast_clone_properties_count);
result.Bind(clone);
__ Goto(&end);
__ SetAccumulator(result);
__ Dispatch();
}
__ Bind(&if_not_fast_clone);
......@@ -1486,14 +1485,12 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
__ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask));
Node* flags = __ SmiTag(flags_raw);
result.Bind(__ CallRuntime(Runtime::kCreateObjectLiteral, context, closure,
literal_index, constant_elements, flags));
__ Goto(&end);
Node* result =
__ CallRuntime(Runtime::kCreateObjectLiteral, context, closure,
literal_index, constant_elements, flags);
__ SetAccumulator(result);
__ Dispatch();
}
__ Bind(&end);
__ SetAccumulator(result.value());
__ Dispatch();
}
// CreateClosure <index> <tenured>
......@@ -1521,9 +1518,8 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
Node* closure = __ LoadRegister(Register::function_closure());
Node* context = __ GetContext();
Variable result(assembler, MachineRepresentation::kTagged);
Label end(assembler), if_duplicate_parameters(assembler, Label::kDeferred),
if_not_duplicate_parameters(assembler);
Label if_duplicate_parameters(assembler, Label::kDeferred);
Label if_not_duplicate_parameters(assembler);
// Check if function has duplicate parameters.
// TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports
......@@ -1538,23 +1534,23 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
__ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
__ Bind(&if_duplicate_parameters);
{
result.Bind(
__ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure));
__ Goto(&end);
}
__ Bind(&if_not_duplicate_parameters);
{
// TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub.
Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true);
Node* target = __ HeapConstant(callable.code());
result.Bind(__ CallStub(callable.descriptor(), target, context, closure));
__ Goto(&end);
Node* result = __ CallStub(callable.descriptor(), target, context, closure);
__ SetAccumulator(result);
__ Dispatch();
}
__ Bind(&if_duplicate_parameters);
{
Node* result =
__ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
__ SetAccumulator(result);
__ Dispatch();
}
__ Bind(&end);
__ SetAccumulator(result.value());
__ Dispatch();
}
......@@ -1562,6 +1558,7 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
//
// Creates a new unmapped arguments object.
void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
// TODO(rmcilroy): Inline FastNewStrictArguments when it is a TurboFan stub.
Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true);
Node* target = __ HeapConstant(callable.code());
Node* context = __ GetContext();
......@@ -1575,6 +1572,7 @@ void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
//
// Creates a new rest parameter array.
void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
// TODO(rmcilroy): Inline FastNewRestArguments when it is a TurboFan stub.
Callable callable = CodeFactory::FastNewRestParameter(isolate_, true);
Node* target = __ HeapConstant(callable.code());
Node* closure = __ LoadRegister(Register::function_closure());
......@@ -1588,8 +1586,20 @@ void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
//
// Performs a stack guard check.
void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
__ StackCheck();
Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred);
Node* interrupt = __ StackCheckTriggeredInterrupt();
__ BranchIf(interrupt, &stack_check_interrupt, &ok);
__ Bind(&ok);
__ Dispatch();
__ Bind(&stack_check_interrupt);
{
Node* context = __ GetContext();
__ CallRuntime(Runtime::kStackGuard, context);
__ Dispatch();
}
}
// Throw
......@@ -1685,10 +1695,10 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
Node* key = __ LoadFixedArrayElementSmiIndex(cache_array, index);
// Check if we can use the for-in fast path potentially using the enum cache.
InterpreterAssembler::Label if_fast(assembler), if_slow(assembler);
Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
Node* condition = __ WordEqual(receiver_map, cache_type);
__ Branch(condition, &if_fast, &if_slow);
__ BranchIf(condition, &if_fast, &if_slow);
__ Bind(&if_fast);
{
// Enum cache in use for {receiver}, the {key} is definitely valid.
......@@ -1724,21 +1734,20 @@ void Interpreter::DoForInDone(InterpreterAssembler* assembler) {
Node* cache_length = __ LoadRegister(cache_length_reg);
// Check if {index} is at {cache_length} already.
InterpreterAssembler::Label if_true(assembler), if_false(assembler);
Node* condition = __ WordEqual(index, cache_length);
__ Branch(condition, &if_true, &if_false);
Label if_true(assembler), if_false(assembler), end(assembler);
__ BranchIfWordEqual(index, cache_length, &if_true, &if_false);
__ Bind(&if_true);
{
Node* result = __ BooleanConstant(true);
__ SetAccumulator(result);
__ Dispatch();
__ SetAccumulator(__ BooleanConstant(true));
__ Goto(&end);
}
__ Bind(&if_false);
{
Node* result = __ BooleanConstant(false);
__ SetAccumulator(result);
__ Dispatch();
__ SetAccumulator(__ BooleanConstant(false));
__ Goto(&end);
}
__ Bind(&end);
__ Dispatch();
}
// ForInStep <index>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment