Commit 51a15140 authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[Interpreter] Adapt Call bytecode handlers to drop their stack-frame.

This change adapts the Call bytecode handlers such that they don't require
a stack frame. It does this by modifying the call bytecode handler to
tail-call the Call or InterpreterPushArgsAndCall builtins. As a result, the
callee function will return to the InterpreterEntryTrampoline when it returns
(since this is the return address on the interpreter frame), which is
adapted to dispatch to the next bytecode handler. The return bytecode
handler is modified to tail-call a new InterpreterExitTramoline instead
of returning to the InterpreterEntryTrampoline.

Overall this significanlty reduces the amount of stack space required for
interpreter frames, increasing the maximum depth of recursive calls from
around 6000 to around 12,500 on x64.

BUG=chromium:753705

Change-Id: I23328e4cef878df3aca4db763b47d72a2cce664c
Reviewed-on: https://chromium-review.googlesource.com/634364
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47617}
parent c67cb265
......@@ -312,6 +312,12 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
&default_descriptor);
}
void InterpreterExitTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {kInterpreterAccumulatorRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
......
......@@ -334,6 +334,12 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
&default_descriptor);
}
void InterpreterExitTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {kInterpreterAccumulatorRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
......
......@@ -55,6 +55,7 @@
#include "src/execution.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
#include "src/ostreams.h"
......@@ -823,6 +824,12 @@ ExternalReference ExternalReference::interpreter_dispatch_counters(
isolate->interpreter()->bytecode_dispatch_counters_table());
}
ExternalReference ExternalReference::bytecode_size_table_address(
Isolate* isolate) {
return ExternalReference(
interpreter::Bytecodes::bytecode_size_table_address());
}
ExternalReference::ExternalReference(StatsCounter* counter)
: address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
......
......@@ -836,6 +836,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference interpreter_dispatch_table_address(Isolate* isolate);
static ExternalReference interpreter_dispatch_counters(Isolate* isolate);
static ExternalReference bytecode_size_table_address(Isolate* isolate);
static ExternalReference incremental_marking_record_write_function(
Isolate* isolate);
......
......@@ -1129,6 +1129,52 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation.
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Register bytecode_offset, Register scratch1,
Register scratch2) {
Register bytecode_size_table = scratch1;
Register bytecode = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
__ mov(
bytecode_size_table,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Load the current bytecode.
__ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label load_size, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmp(bytecode, Operand(0x1));
__ b(hi, &load_size);
__ b(eq, &extra_wide);
// Load the next bytecode and update table to the wide scaled table.
__ add(bytecode_offset, bytecode_offset, Operand(1));
__ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ add(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&load_size);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
__ add(bytecode_offset, bytecode_offset, Operand(1));
__ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ add(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&load_size);
// Load the size of the current bytecode.
__ bind(&load_size);
__ ldr(scratch1, MemOperand(bytecode_size_table, bytecode, LSL, 2));
__ add(bytecode_offset, bytecode_offset, scratch1);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
......@@ -1241,13 +1287,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(r9, Operand::Zero());
__ str(r3, MemOperand(fp, r9, LSL, kPointerSizeLog2), ne);
// Load accumulator and dispatch table into registers.
// Load accumulator with undefined.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
Label do_dispatch;
__ bind(&do_dispatch);
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
// Dispatch to the first bytecode handler for the function.
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ldr(r4, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
......@@ -1255,9 +1304,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Call(r4);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// The return value is in r0.
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
// Get bytecode array and bytecode offset from the stack frame.
__ ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ ldr(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Advance to the next bytecode and dispatch.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r1, r2);
__ jmp(&do_dispatch);
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
......@@ -1271,6 +1328,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ b(&bytecode_array_loaded);
}
void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// The return value is in r0.
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch,
Label* stack_overflow) {
......@@ -1455,19 +1518,19 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance the current bytecode offset stored within the given interpreter
// stack frame. This simulates what all bytecode handlers do upon completion
// of the underlying operation.
__ ldr(r1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ ldr(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(kInterpreterAccumulatorRegister, r1, r2);
__ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
__ mov(r2, r0); // Result is the new bytecode offset.
__ Pop(kInterpreterAccumulatorRegister);
}
// Get bytecode array and bytecode offset from the stack frame.
__ ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ ldr(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Advance to the next bytecode.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r1, r2);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r2, kInterpreterBytecodeOffsetRegister);
__ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
......
......@@ -1140,6 +1140,52 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation.
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Register bytecode_offset, Register scratch1,
Register scratch2) {
Register bytecode_size_table = scratch1;
Register bytecode = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
__ Mov(
bytecode_size_table,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Load the current bytecode.
__ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label load_size, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ Cmp(bytecode, Operand(0x1));
__ B(hi, &load_size);
__ B(eq, &extra_wide);
// Load the next bytecode and update table to the wide scaled table.
__ Add(bytecode_offset, bytecode_offset, Operand(1));
__ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ Add(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ B(&load_size);
__ Bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
__ Add(bytecode_offset, bytecode_offset, Operand(1));
__ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ Add(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ B(&load_size);
// Load the size of the current bytecode.
__ Bind(&load_size);
__ Ldr(scratch1.W(), MemOperand(bytecode_size_table, bytecode, LSL, 2));
__ Add(bytecode_offset, bytecode_offset, scratch1);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
......@@ -1253,13 +1299,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Str(x3, MemOperand(fp, x10, LSL, kPointerSizeLog2));
__ Bind(&no_incoming_new_target_or_generator_register);
// Load accumulator and dispatch table into registers.
// Load accumulator with undefined.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
Label do_dispatch;
__ bind(&do_dispatch);
__ Mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
// Dispatch to the first bytecode handler for the function.
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
......@@ -1267,9 +1316,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Call(ip0);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// The return value is in x0.
LeaveInterpreterFrame(masm, x2);
__ Ret();
// Get bytecode array and bytecode offset from the stack frame.
__ Ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ Ldr(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Advance to the next bytecode and dispatch.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, x1, x2);
__ B(&do_dispatch);
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
......@@ -1283,6 +1340,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ B(&bytecode_array_loaded);
}
void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// The return value is in x0.
LeaveInterpreterFrame(masm, x2);
__ Ret();
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch,
Label* stack_overflow) {
......@@ -1472,19 +1535,19 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance the current bytecode offset stored within the given interpreter
// stack frame. This simulates what all bytecode handlers do upon completion
// of the underlying operation.
__ Ldr(x1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ Ldr(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(kInterpreterAccumulatorRegister, x1, x2);
__ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
__ Mov(x2, x0); // Result is the new bytecode offset.
__ Pop(kInterpreterAccumulatorRegister);
}
// Get bytecode array and bytecode offset from the stack frame.
__ ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ ldr(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Advance to the next bytecode.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, x1, x2);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(x2, kInterpreterBytecodeOffsetRegister);
__ Str(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
......
......@@ -109,6 +109,7 @@ namespace internal {
\
/* Interpreter */ \
ASM(InterpreterEntryTrampoline) \
ASM(InterpreterExitTrampoline) \
ASM(InterpreterPushArgsThenCall) \
ASM(InterpreterPushUndefinedAndArgsThenCall) \
ASM(InterpreterPushArgsThenCallFunction) \
......
......@@ -775,6 +775,51 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation.
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Register bytecode_offset, Register scratch1,
Register scratch2) {
Register bytecode_size_table = scratch1;
Register bytecode = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
__ Move(bytecode_size_table,
Immediate(
ExternalReference::bytecode_size_table_address(masm->isolate())));
// Load the current bytecode.
__ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label load_size, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmpb(bytecode, Immediate(0x1));
__ j(above, &load_size, Label::kNear);
__ j(equal, &extra_wide, Label::kNear);
// Load the next bytecode and update table to the wide scaled table.
__ inc(bytecode_offset);
__ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ add(bytecode_size_table,
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&load_size, Label::kNear);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
__ inc(bytecode_offset);
__ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ add(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&load_size, Label::kNear);
// Load the size of the current bytecode.
__ bind(&load_size);
__ add(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
......@@ -884,15 +929,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(Operand(ebp, eax, times_pointer_size, 0), edx);
__ bind(&no_incoming_new_target_or_generator_register);
// Load accumulator, bytecode offset and dispatch table into registers.
// Load accumulator and bytecode offset into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ mov(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
Label do_dispatch;
__ bind(&do_dispatch);
__ mov(kInterpreterDispatchTableRegister,
Immediate(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
// Dispatch to the first bytecode handler for the function.
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
......@@ -900,9 +948,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ call(ebx);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// The return value is in eax.
LeaveInterpreterFrame(masm, ebx, ecx);
__ ret(0);
// Any returns to the entry trampoline are due to the interpreter tail calling
// a builtin and then a dispatch, so advance the bytecode offset here and
// dispatch to the next handler.
// Get bytecode array and bytecode offset from the stack frame.
__ mov(kInterpreterBytecodeArrayRegister,
Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Advance to the next bytecode and dispatch.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, ebx, edx);
__ jmp(&do_dispatch);
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
......@@ -920,6 +980,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&bytecode_array_loaded);
}
void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// The return value is in eax.
LeaveInterpreterFrame(masm, ebx, ecx);
__ ret(0);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch1, Register scratch2,
Label* stack_overflow,
......@@ -1232,22 +1298,21 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance the current bytecode offset stored within the given interpreter
// stack frame. This simulates what all bytecode handlers do upon completion
// of the underlying operation.
__ mov(ebx, Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ mov(edx, Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(kInterpreterAccumulatorRegister);
__ Push(ebx); // First argument is the bytecode array.
__ Push(edx); // Second argument is the bytecode offset.
__ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
__ Move(edx, eax); // Result is the new bytecode offset.
__ Pop(kInterpreterAccumulatorRegister);
}
__ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), edx);
// Get bytecode array and bytecode offset from the stack frame.
__ mov(kInterpreterBytecodeArrayRegister,
Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Advance to the next bytecode.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, ebx, edx);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ mov(ebx, kInterpreterBytecodeOffsetRegister);
__ SmiTag(ebx);
__ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), ebx);
Generate_InterpreterEnterBytecode(masm);
}
......
......@@ -1105,6 +1105,55 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation.
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Register bytecode_offset, Register scratch1,
Register scratch2, Register scratch3) {
Register bytecode_size_table = scratch1;
Register bytecode = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
__ li(
bytecode_size_table,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Load the current bytecode.
__ Addu(scratch3, bytecode_array, bytecode_offset);
__ lbu(bytecode, MemOperand(scratch3));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label load_size, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ Branch(&load_size, hi, bytecode, Operand(1));
__ Branch(&extra_wide, eq, bytecode, Operand(1));
// Load the next bytecode and update table to the wide scaled table.
__ Addu(bytecode_offset, bytecode_offset, Operand(1));
__ Addu(scratch3, bytecode_array, bytecode_offset);
__ lbu(bytecode, MemOperand(scratch3));
__ Addu(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&load_size);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
__ Addu(bytecode_offset, bytecode_offset, Operand(1));
__ Addu(scratch3, bytecode_array, bytecode_offset);
__ lbu(bytecode, MemOperand(scratch3));
__ Addu(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&load_size);
// Load the size of the current bytecode.
__ bind(&load_size);
__ Lsa(scratch3, bytecode_size_table, bytecode, 2);
__ lw(scratch3, MemOperand(scratch3));
__ Addu(bytecode_offset, bytecode_offset, scratch3);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
......@@ -1220,13 +1269,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ sw(a3, MemOperand(t1));
__ bind(&no_incoming_new_target_or_generator_register);
// Load accumulator and dispatch table into registers.
// Load accumulator with undefined.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
Label do_dispatch;
__ bind(&do_dispatch);
__ li(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
// Dispatch to the first bytecode handler for the function.
__ Addu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a0, MemOperand(a0));
......@@ -1235,9 +1287,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Call(at);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// The return value is in v0.
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
// Get bytecode array and bytecode offset from the stack frame.
__ lw(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ lw(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Advance to the next bytecode and dispatch.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, a1, a2, a3);
__ jmp(&do_dispatch);
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
......@@ -1252,6 +1312,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Branch(&bytecode_array_loaded);
}
void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// The return value is in v0.
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch1, Register scratch2,
Label* stack_overflow) {
......@@ -1439,16 +1505,18 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance the current bytecode offset stored within the given interpreter
// stack frame. This simulates what all bytecode handlers do upon completion
// of the underlying operation.
__ lw(a1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ lw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(kInterpreterAccumulatorRegister, a1, a2);
__ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
__ mov(a2, v0); // Result is the new bytecode offset.
__ Pop(kInterpreterAccumulatorRegister);
}
__ lw(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ lw(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Advance to the next bytecode.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, a1, a2, a3);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
__ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
......@@ -1742,7 +1810,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ addu(v0, v0, a1);
__ Addu(v0, v0, a1);
__ addiu(ra, v0, Code::kHeaderSize - kHeapObjectTag);
// And "return" to the OSR entry point of the function.
......
......@@ -1109,6 +1109,54 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation.
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Register bytecode_offset, Register scratch1,
Register scratch2, Register scratch3) {
Register bytecode_size_table = scratch1;
Register bytecode = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
__ li(
bytecode_size_table,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Load the current bytecode.
__ Daddu(scratch3, bytecode_array, bytecode_offset);
__ Lbu(bytecode, MemOperand(scratch3));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label load_size, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ Branch(&load_size, hi, bytecode, Operand(1));
__ Branch(&extra_wide, eq, bytecode, Operand(1));
// Load the next bytecode and update table to the wide scaled table.
__ Daddu(bytecode_offset, bytecode_offset, Operand(1));
__ Daddu(scratch3, bytecode_array, bytecode_offset);
__ Lbu(bytecode, MemOperand(scratch3));
__ Daddu(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&load_size);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
__ Daddu(bytecode_offset, bytecode_offset, Operand(1));
__ Daddu(scratch3, bytecode_array, bytecode_offset);
__ Lbu(bytecode, MemOperand(scratch3));
__ Daddu(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&load_size);
// Load the size of the current bytecode.
__ bind(&load_size);
__ Dlsa(scratch3, bytecode_size_table, bytecode, 2);
__ Lw(scratch3, MemOperand(scratch3));
__ Daddu(bytecode_offset, bytecode_offset, scratch3);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
......@@ -1224,13 +1272,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Sd(a3, MemOperand(a5));
__ bind(&no_incoming_new_target_or_generator_register);
// Load accumulator and dispatch table into registers.
// Load accumulator as undefined.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
Label do_dispatch;
__ bind(&do_dispatch);
__ li(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
// Dispatch to the first bytecode handler for the function.
__ Daddu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ Lbu(a0, MemOperand(a0));
......@@ -1239,9 +1290,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Call(at);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// The return value is in v0.
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
// Get bytecode array and bytecode offset from the stack frame.
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ Ld(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Advance to the next bytecode and dispatch.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, a1, a2, a3);
__ jmp(&do_dispatch);
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
......@@ -1256,6 +1315,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Branch(&bytecode_array_loaded);
}
void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// The return value is in v0.
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch1, Register scratch2,
Label* stack_overflow) {
......@@ -1444,16 +1509,18 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance the current bytecode offset stored within the given interpreter
// stack frame. This simulates what all bytecode handlers do upon completion
// of the underlying operation.
__ Ld(a1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ Ld(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ Ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(kInterpreterAccumulatorRegister, a1, a2);
__ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
__ mov(a2, v0); // Result is the new bytecode offset.
__ Pop(kInterpreterAccumulatorRegister);
}
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ Ld(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Advance to the next bytecode.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, a1, a2, a3);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
__ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
......@@ -1749,7 +1816,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ daddu(v0, v0, a1);
__ Daddu(v0, v0, a1);
__ daddiu(ra, v0, Code::kHeaderSize - kHeapObjectTag);
// And "return" to the OSR entry point of the function.
......
......@@ -853,6 +853,50 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ bind(&fallthrough);
}
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation.
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Register bytecode_offset, Register scratch1,
Register scratch2) {
Register bytecode_size_table = scratch1;
Register bytecode = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
__ Move(bytecode_size_table,
ExternalReference::bytecode_size_table_address(masm->isolate()));
// Load the current bytecode.
__ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label load_size, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmpb(bytecode, Immediate(0x1));
__ j(above, &load_size, Label::kNear);
__ j(equal, &extra_wide, Label::kNear);
// Load the next bytecode and update table to the wide scaled table.
__ incl(bytecode_offset);
__ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ addp(bytecode_size_table,
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&load_size, Label::kNear);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
__ incl(bytecode_offset);
__ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
__ addp(bytecode_size_table,
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&load_size, Label::kNear);
// Load the size of the current bytecode.
__ bind(&load_size);
__ addl(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
......@@ -967,13 +1011,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movp(Operand(rbp, rax, times_pointer_size, 0), rdx);
__ bind(&no_incoming_new_target_or_generator_register);
// Load accumulator and dispatch table into registers.
// Load accumulator with undefined.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
// Load the dispatch table into a register and dispatch to the bytecode
// handler at the current bytecode offset.
Label do_dispatch;
__ bind(&do_dispatch);
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
// Dispatch to the first bytecode handler for the function.
__ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
......@@ -981,9 +1028,22 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ call(rbx);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// The return value is in rax.
LeaveInterpreterFrame(masm, rbx, rcx);
__ ret(0);
// Any returns to the entry trampoline are due to the interpreter tail calling
// a builtin and then a dispatch, so advance the bytecode offset here and
// dispatch to the next handler.
// Get bytecode array and bytecode offset from the stack frame.
__ movp(kInterpreterBytecodeArrayRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ movp(kInterpreterBytecodeOffsetRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister);
// Advance to the next bytecode and dispatch.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, rbx, rcx);
__ jmp(&do_dispatch);
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
......@@ -999,6 +1059,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&bytecode_array_loaded);
}
void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// The return value is in rax.
LeaveInterpreterFrame(masm, rbx, rcx);
__ ret(0);
}
static void Generate_StackOverflowCheck(
MacroAssembler* masm, Register num_args, Register scratch,
Label* stack_overflow,
......@@ -1209,22 +1275,21 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance the current bytecode offset stored within the given interpreter
// stack frame. This simulates what all bytecode handlers do upon completion
// of the underlying operation.
__ movp(rbx, Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ movp(rdx, Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(kInterpreterAccumulatorRegister);
__ Push(rbx); // First argument is the bytecode array.
__ Push(rdx); // Second argument is the bytecode offset.
__ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
__ Move(rdx, rax); // Result is the new bytecode offset.
__ Pop(kInterpreterAccumulatorRegister);
}
__ movp(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rdx);
// Get bytecode array and bytecode offset from the stack frame.
__ movp(kInterpreterBytecodeArrayRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ movp(kInterpreterBytecodeOffsetRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister);
// Advance to the next bytecode.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, rbx, rcx);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ Integer32ToSmi(rbx, kInterpreterBytecodeOffsetRegister);
__ movp(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rbx);
Generate_InterpreterEnterBytecode(masm);
}
......
......@@ -361,6 +361,12 @@ Callable CodeFactory::ConstructFunctionForwardVarargs(Isolate* isolate) {
ConstructForwardVarargsDescriptor(isolate));
}
// static
Callable CodeFactory::InterpreterExitTrampoline(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, InterpreterExitTrampoline),
InterpreterExitTrampolineDescriptor(isolate));
}
// static
Callable CodeFactory::InterpreterPushArgsThenCall(
Isolate* isolate, ConvertReceiverMode receiver_mode,
......
......@@ -92,6 +92,7 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable ConstructForwardVarargs(Isolate* isolate);
static Callable ConstructFunctionForwardVarargs(Isolate* isolate);
static Callable InterpreterExitTrampoline(Isolate* isolate);
static Callable InterpreterPushArgsThenCall(Isolate* isolate,
ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode);
......
......@@ -724,6 +724,33 @@ Node* CodeAssembler::TailCallStubImpl(const CallInterfaceDescriptor& descriptor,
REPEAT_1_TO_12(INSTANTIATE, Node*)
#undef INSTANTIATE
template <class... TArgs>
Node* CodeAssembler::TailCallStubThenBytecodeDispatch(
const CallInterfaceDescriptor& descriptor, Node* target, Node* context,
TArgs... args) {
DCHECK_LE(descriptor.GetParameterCount(), sizeof...(args));
// Extra arguments not mentioned in the descriptor are passed on the stack.
int stack_parameter_count =
sizeof...(args) - descriptor.GetRegisterParameterCount();
DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, stack_parameter_count,
CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
MachineType::AnyTagged(), 0);
Node* nodes[] = {target, args..., context};
return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
}
// Instantiate TailCallJSAndBytecodeDispatch() for argument counts used by
// CSA-generated code
#define INSTANTIATE(...) \
template V8_EXPORT_PRIVATE Node* \
CodeAssembler::TailCallStubThenBytecodeDispatch( \
const CallInterfaceDescriptor&, Node*, Node*, Node*, __VA_ARGS__);
REPEAT_1_TO_7(INSTANTIATE, Node*)
#undef INSTANTIATE
template <class... TArgs>
Node* CodeAssembler::TailCallBytecodeDispatch(
const CallInterfaceDescriptor& descriptor, Node* target, TArgs... args) {
......
......@@ -729,6 +729,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
Node* target, TArgs... args);
template <class... TArgs>
Node* TailCallStubThenBytecodeDispatch(
const CallInterfaceDescriptor& descriptor, Node* context, Node* target,
TArgs... args);
template <class... TArgs>
Node* CallJS(Callable const& callable, Node* context, Node* function,
Node* receiver, TArgs... args) {
......
......@@ -106,6 +106,8 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
Add(ExternalReference::isolate_address(isolate).address(), "isolate");
Add(ExternalReference::interpreter_dispatch_table_address(isolate).address(),
"Interpreter::dispatch_table_address");
Add(ExternalReference::bytecode_size_table_address(isolate).address(),
"Bytecodes::bytecode_size_table_address");
Add(ExternalReference::address_of_negative_infinity().address(),
"LDoubleConstant::negative_infinity");
Add(ExternalReference::power_double_double_function(isolate).address(),
......
......@@ -307,6 +307,12 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterExitTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {kInterpreterAccumulatorRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
......
......@@ -611,6 +611,14 @@ void ApiCallbackDescriptor::InitializePlatformIndependent(
machine_types);
}
void InterpreterExitTrampolineDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kAccumulator
MachineType machine_types[] = {MachineType::AnyTagged()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void InterpreterDispatchDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kAccumulator, kBytecodeOffset, kBytecodeArray, kDispatchTable
......
......@@ -16,73 +16,74 @@ namespace internal {
class PlatformInterfaceDescriptor;
#define INTERFACE_DESCRIPTOR_LIST(V) \
V(Void) \
V(ContextOnly) \
V(Load) \
V(LoadWithVector) \
V(LoadField) \
V(LoadICProtoArray) \
V(LoadGlobal) \
V(LoadGlobalWithVector) \
V(Store) \
V(StoreWithVector) \
V(StoreNamedTransition) \
V(StoreTransition) \
V(FastNewClosure) \
V(FastNewFunctionContext) \
V(FastNewObject) \
V(FastNewArguments) \
V(RecordWrite) \
V(TypeConversion) \
V(TypeConversionStackParameter) \
V(Typeof) \
V(FastCloneRegExp) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
V(CallFunction) \
V(CallVarargs) \
V(CallForwardVarargs) \
V(CallWithSpread) \
V(CallWithArrayLike) \
V(CallTrampoline) \
V(ConstructStub) \
V(ConstructVarargs) \
V(ConstructForwardVarargs) \
V(ConstructWithSpread) \
V(ConstructWithArrayLike) \
V(ConstructTrampoline) \
V(TransitionElementsKind) \
V(AllocateHeapNumber) \
V(Builtin) \
V(ArrayConstructor) \
V(IteratingArrayBuiltin) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
V(Compare) \
V(BinaryOp) \
V(StringAdd) \
V(StringCharAt) \
V(StringCharCodeAt) \
V(StringCompare) \
V(SubString) \
V(ForInPrepare) \
V(GetProperty) \
V(ArgumentAdaptor) \
V(ApiCallback) \
V(ApiGetter) \
V(MathPowTagged) \
V(MathPowInteger) \
V(GrowArrayElements) \
V(NewArgumentsElements) \
V(InterpreterDispatch) \
V(InterpreterPushArgsThenCall) \
V(InterpreterPushArgsThenConstruct) \
V(InterpreterCEntry) \
V(ResumeGenerator) \
V(FrameDropperTrampoline) \
V(WasmRuntimeCall) \
#define INTERFACE_DESCRIPTOR_LIST(V) \
V(Void) \
V(ContextOnly) \
V(Load) \
V(LoadWithVector) \
V(LoadField) \
V(LoadICProtoArray) \
V(LoadGlobal) \
V(LoadGlobalWithVector) \
V(Store) \
V(StoreWithVector) \
V(StoreNamedTransition) \
V(StoreTransition) \
V(FastNewClosure) \
V(FastNewFunctionContext) \
V(FastNewObject) \
V(FastNewArguments) \
V(RecordWrite) \
V(TypeConversion) \
V(TypeConversionStackParameter) \
V(Typeof) \
V(FastCloneRegExp) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
V(CallFunction) \
V(CallVarargs) \
V(CallForwardVarargs) \
V(CallWithSpread) \
V(CallWithArrayLike) \
V(CallTrampoline) \
V(ConstructStub) \
V(ConstructVarargs) \
V(ConstructForwardVarargs) \
V(ConstructWithSpread) \
V(ConstructWithArrayLike) \
V(ConstructTrampoline) \
V(TransitionElementsKind) \
V(AllocateHeapNumber) \
V(Builtin) \
V(ArrayConstructor) \
V(IteratingArrayBuiltin) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
V(Compare) \
V(BinaryOp) \
V(StringAdd) \
V(StringCharAt) \
V(StringCharCodeAt) \
V(StringCompare) \
V(SubString) \
V(ForInPrepare) \
V(GetProperty) \
V(ArgumentAdaptor) \
V(ApiCallback) \
V(ApiGetter) \
V(MathPowTagged) \
V(MathPowInteger) \
V(GrowArrayElements) \
V(NewArgumentsElements) \
V(InterpreterExitTrampoline) \
V(InterpreterDispatch) \
V(InterpreterPushArgsThenCall) \
V(InterpreterPushArgsThenConstruct) \
V(InterpreterCEntry) \
V(ResumeGenerator) \
V(FrameDropperTrampoline) \
V(WasmRuntimeCall) \
BUILTIN_LIST_TFS(V)
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
......@@ -839,6 +840,14 @@ class NewArgumentsElementsDescriptor final : public CallInterfaceDescriptor {
CallInterfaceDescriptor)
};
class V8_EXPORT_PRIVATE InterpreterExitTrampolineDescriptor
: public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kAccumulator)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
InterpreterExitTrampolineDescriptor, CallInterfaceDescriptor)
};
class V8_EXPORT_PRIVATE InterpreterDispatchDescriptor
: public CallInterfaceDescriptor {
public:
......
......@@ -135,6 +135,9 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
class BytecodeOperands {
public:
// The total number of bytecodes used.
static const int kOperandTypeCount = static_cast<int>(OperandType::kLast) + 1;
// Returns true if |accumulator_use| reads the accumulator.
static constexpr bool ReadsAccumulator(AccumulatorUse accumulator_use) {
return accumulator_use == AccumulatorUse::kRead ||
......
......@@ -38,34 +38,59 @@ const AccumulatorUse Bytecodes::kAccumulatorUse[] = {
#undef ENTRY
};
const int Bytecodes::kBytecodeSizes[][3] = {
#define ENTRY(Name, ...) \
{ BytecodeTraits<__VA_ARGS__>::kSingleScaleSize, \
BytecodeTraits<__VA_ARGS__>::kDoubleScaleSize, \
BytecodeTraits<__VA_ARGS__>::kQuadrupleScaleSize },
const int Bytecodes::kBytecodeSizes[3][kBytecodeCount] = {
{
#define ENTRY(Name, ...) BytecodeTraits<__VA_ARGS__>::kSingleScaleSize,
BYTECODE_LIST(ENTRY)
#undef ENTRY
}, {
#define ENTRY(Name, ...) BytecodeTraits<__VA_ARGS__>::kDoubleScaleSize,
BYTECODE_LIST(ENTRY)
#undef ENTRY
}, {
#define ENTRY(Name, ...) BytecodeTraits<__VA_ARGS__>::kQuadrupleScaleSize,
BYTECODE_LIST(ENTRY)
#undef ENTRY
}
};
const OperandSize* const Bytecodes::kOperandSizes[][3] = {
#define ENTRY(Name, ...) \
{ BytecodeTraits<__VA_ARGS__>::kSingleScaleOperandSizes, \
BytecodeTraits<__VA_ARGS__>::kDoubleScaleOperandSizes, \
BytecodeTraits<__VA_ARGS__>::kQuadrupleScaleOperandSizes },
const OperandSize* const Bytecodes::kOperandSizes[3][kBytecodeCount] = {
{
#define ENTRY(Name, ...) \
BytecodeTraits<__VA_ARGS__>::kSingleScaleOperandSizes,
BYTECODE_LIST(ENTRY)
#undef ENTRY
}, {
#define ENTRY(Name, ...) \
BytecodeTraits<__VA_ARGS__>::kDoubleScaleOperandSizes,
BYTECODE_LIST(ENTRY)
#undef ENTRY
}, {
#define ENTRY(Name, ...) \
BytecodeTraits<__VA_ARGS__>::kQuadrupleScaleOperandSizes,
BYTECODE_LIST(ENTRY)
#undef ENTRY
}
};
const OperandSize Bytecodes::kOperandKindSizes[][3] = {
#define ENTRY(Name, ...) \
{ OperandScaler<OperandType::k##Name, \
OperandScale::kSingle>::kOperandSize, \
OperandScaler<OperandType::k##Name, \
OperandScale::kDouble>::kOperandSize, \
OperandScaler<OperandType::k##Name, \
OperandScale::kQuadruple>::kOperandSize },
OPERAND_TYPE_LIST(ENTRY)
const OperandSize
Bytecodes::kOperandKindSizes[3][BytecodeOperands::kOperandTypeCount] = {
{
#define ENTRY(Name, ...) \
OperandScaler<OperandType::k##Name, OperandScale::kSingle>::kOperandSize,
OPERAND_TYPE_LIST(ENTRY)
#undef ENTRY
}, {
#define ENTRY(Name, ...) \
OperandScaler<OperandType::k##Name, OperandScale::kDouble>::kOperandSize,
OPERAND_TYPE_LIST(ENTRY)
#undef ENTRY
}, {
#define ENTRY(Name, ...) \
OperandScaler<OperandType::k##Name, OperandScale::kQuadruple>::kOperandSize,
OPERAND_TYPE_LIST(ENTRY)
#undef ENTRY
}
};
// clang-format on
......
......@@ -441,6 +441,9 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// The maximum number of operands a bytecode may have.
static const int kMaxOperands = 5;
// The total number of bytecodes used.
static const int kBytecodeCount = static_cast<int>(Bytecode::kLast) + 1;
// Returns string representation of |bytecode|.
static const char* ToString(Bytecode bytecode);
......@@ -728,7 +731,7 @@ class V8_EXPORT_PRIVATE Bytecodes final {
STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
OperandScale::kLast == OperandScale::kQuadruple);
int scale_index = static_cast<int>(operand_scale) >> 1;
return kOperandSizes[static_cast<size_t>(bytecode)][scale_index];
return kOperandSizes[scale_index][static_cast<size_t>(bytecode)];
}
// Returns the offset of the i-th operand of |bytecode| relative to the start
......@@ -743,7 +746,7 @@ class V8_EXPORT_PRIVATE Bytecodes final {
STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
OperandScale::kLast == OperandScale::kQuadruple);
int scale_index = static_cast<int>(operand_scale) >> 1;
return kBytecodeSizes[static_cast<size_t>(bytecode)][scale_index];
return kBytecodeSizes[scale_index][static_cast<size_t>(bytecode)];
}
// Returns a debug break bytecode to replace |bytecode|.
......@@ -831,7 +834,7 @@ class V8_EXPORT_PRIVATE Bytecodes final {
STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
OperandScale::kLast == OperandScale::kQuadruple);
int scale_index = static_cast<int>(operand_scale) >> 1;
return kOperandKindSizes[static_cast<size_t>(operand_type)][scale_index];
return kOperandKindSizes[scale_index][static_cast<size_t>(operand_type)];
}
// Returns true if |operand_type| is a runtime-id operand (kRuntimeId).
......@@ -879,6 +882,10 @@ class V8_EXPORT_PRIVATE Bytecodes final {
}
}
static Address bytecode_size_table_address() {
return reinterpret_cast<Address>(const_cast<int*>(&kBytecodeSizes[0][0]));
}
private:
static const OperandType* const kOperandTypes[];
static const OperandTypeInfo* const kOperandTypeInfos[];
......@@ -886,9 +893,10 @@ class V8_EXPORT_PRIVATE Bytecodes final {
static const int kNumberOfRegisterOperands[];
static const AccumulatorUse kAccumulatorUse[];
static const bool kIsScalable[];
static const int kBytecodeSizes[][3];
static const OperandSize* const kOperandSizes[][3];
static OperandSize const kOperandKindSizes[][3];
static const int kBytecodeSizes[3][kBytecodeCount];
static const OperandSize* const kOperandSizes[3][kBytecodeCount];
static OperandSize const
kOperandKindSizes[3][BytecodeOperands::kOperandTypeCount];
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
......
......@@ -660,9 +660,9 @@ void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
BIND(&done);
}
Node* InterpreterAssembler::CallJS(Node* function, Node* context,
Node* first_arg, Node* arg_count,
ConvertReceiverMode receiver_mode) {
void InterpreterAssembler::CallJSAndDispatch(
Node* function, Node* context, Node* first_arg, Node* arg_count,
ConvertReceiverMode receiver_mode) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
bytecode_ == Bytecode::kInvokeIntrinsic);
......@@ -671,14 +671,55 @@ Node* InterpreterAssembler::CallJS(Node* function, Node* context,
isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
Node* code_target = HeapConstant(callable.code());
return CallStub(callable.descriptor(), code_target, context, arg_count,
first_arg, function);
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
arg_count, first_arg, function);
// TailCallStubThenDispatch updates accumulator with result.
accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
}
Node* InterpreterAssembler::CallJSWithSpread(Node* function, Node* context,
Node* first_arg, Node* arg_count,
Node* slot_id,
Node* feedback_vector) {
template <class... TArgs>
void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context,
Node* arg_count,
ConvertReceiverMode receiver_mode,
TArgs... args) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
bytecode_ == Bytecode::kInvokeIntrinsic);
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
Callable callable = CodeFactory::Call(isolate());
Node* code_target = HeapConstant(callable.code());
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// The first argument parameter (the receiver) is implied to be undefined.
TailCallStubThenBytecodeDispatch(
callable.descriptor(), code_target, context, function, arg_count,
static_cast<Node*>(UndefinedConstant()), args...);
} else {
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
context, function, arg_count, args...);
}
// TailCallStubThenDispatch updates accumulator with result.
accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
}
// Instantiate CallJSAndDispatch() for argument counts used by interpreter
// generator.
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
Node* function, Node* context, Node* arg_count,
ConvertReceiverMode receiver_mode);
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
Node* function, Node* context, Node* arg_count,
ConvertReceiverMode receiver_mode, Node*);
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
Node* function, Node* context, Node* arg_count,
ConvertReceiverMode receiver_mode, Node*, Node*);
template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
Node* function, Node* context, Node* arg_count,
ConvertReceiverMode receiver_mode, Node*, Node*, Node*);
void InterpreterAssembler::CallJSWithSpreadAndDispatch(
Node* function, Node* context, Node* first_arg, Node* arg_count,
Node* slot_id, Node* feedback_vector) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
CollectCallFeedback(function, context, feedback_vector, slot_id);
......@@ -688,8 +729,10 @@ Node* InterpreterAssembler::CallJSWithSpread(Node* function, Node* context,
InterpreterPushArgsMode::kWithFinalSpread);
Node* code_target = HeapConstant(callable.code());
return CallStub(callable.descriptor(), code_target, context, arg_count,
first_arg, function);
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
arg_count, first_arg, function);
// TailCallStubThenDispatch updates accumulator with result.
accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
}
Node* InterpreterAssembler::Construct(Node* target, Node* context,
......
......@@ -127,15 +127,25 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Call JSFunction or Callable |function| with |arg_count| arguments (not
// including receiver) and the first argument located at |first_arg|, possibly
// including the receiver depending on |receiver_mode|.
compiler::Node* CallJS(compiler::Node* function, compiler::Node* context,
// including the receiver depending on |receiver_mode|. After the call returns
// directly dispatches to the next bytecode.
void CallJSAndDispatch(compiler::Node* function, compiler::Node* context,
compiler::Node* first_arg, compiler::Node* arg_count,
ConvertReceiverMode receiver_mode);
// Call JSFunction or Callable |function| with |arg_count| arguments (not
// including receiver) passed as |args|, possibly including the receiver
// depending on |receiver_mode|. After the call returns directly dispatches to
// the next bytecode.
template <class... TArgs>
void CallJSAndDispatch(Node* function, Node* context, Node* arg_count,
ConvertReceiverMode receiver_mode, TArgs... args);
// Call JSFunction or Callable |function| with |arg_count|
// arguments (not including receiver) and the first argument
// located at |first_arg|.
compiler::Node* CallJSWithSpread(compiler::Node* function,
// located at |first_arg|, and the final argument being spread. After the call
// returns directly dispatches to the next bytecode.
void CallJSWithSpreadAndDispatch(compiler::Node* function,
compiler::Node* context,
compiler::Node* first_arg,
compiler::Node* arg_count,
......
......@@ -1723,10 +1723,8 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
// Collect the {function} feedback.
CollectCallFeedback(function, context, feedback_vector, slot_id);
Node* result =
CallJS(function, context, first_arg, args_count, receiver_mode);
SetAccumulator(result);
Dispatch();
// Call the function and dispatch to the next handler.
CallJSAndDispatch(function, context, first_arg, args_count, receiver_mode);
}
// Generates code to perform a JS call with a known number of arguments that
......@@ -1736,14 +1734,9 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
const int kFirstArgumentOperandIndex = 1;
const int kReceiverOperandCount =
(receiver_mode == ConvertReceiverMode::kNullOrUndefined) ? 0 : 1;
const int kRecieverAndArgOperandCount = kReceiverOperandCount + arg_count;
const int kSlotOperandIndex =
kFirstArgumentOperandIndex + kReceiverOperandCount + arg_count;
// Indices and counts of parameters to the call stub.
const int kBoilerplateParameterCount = 5;
const int kReceiverParameterIndex = 3;
const int kReceiverParameterCount = 1;
// Only used in a DCHECK.
USE(kReceiverParameterCount);
kFirstArgumentOperandIndex + kRecieverAndArgOperandCount;
Node* function_reg = BytecodeOperandReg(0);
Node* function = LoadRegister(function_reg);
......@@ -1754,35 +1747,32 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
// Collect the {function} feedback.
CollectCallFeedback(function, context, feedback_vector, slot_id);
std::array<Node*, Bytecodes::kMaxOperands + kBoilerplateParameterCount>
temp;
Callable callable = CodeFactory::Call(isolate());
temp[0] = HeapConstant(callable.code());
temp[1] = function;
temp[2] = Int32Constant(arg_count);
int parameter_index = kReceiverParameterIndex;
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// The first argument parameter (the receiver) is implied to be undefined.
Node* undefined_value =
HeapConstant(isolate()->factory()->undefined_value());
temp[parameter_index++] = undefined_value;
}
// The bytecode argument operands are copied into the remaining argument
// parameters.
for (int i = 0; i < (kReceiverOperandCount + arg_count); ++i) {
Node* reg = BytecodeOperandReg(kFirstArgumentOperandIndex + i);
temp[parameter_index++] = LoadRegister(reg);
switch (kRecieverAndArgOperandCount) {
case 0:
CallJSAndDispatch(function, context, Int32Constant(arg_count),
receiver_mode);
break;
case 1:
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex)));
break;
case 2:
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex)),
LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex + 1)));
break;
case 3:
CallJSAndDispatch(
function, context, Int32Constant(arg_count), receiver_mode,
LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex)),
LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex + 1)),
LoadRegister(BytecodeOperandReg(kFirstArgumentOperandIndex + 2)));
break;
default:
UNREACHABLE();
}
DCHECK_EQ(parameter_index,
kReceiverParameterIndex + kReceiverParameterCount + arg_count);
temp[parameter_index] = context;
Node* result = CallStubN(callable.descriptor(), 1,
arg_count + kBoilerplateParameterCount, &temp[0]);
SetAccumulator(result);
Dispatch();
}
};
......@@ -1902,10 +1892,8 @@ IGNITION_HANDLER(CallJSRuntime, InterpreterAssembler) {
Node* function = LoadContextElement(native_context, context_index);
// Call the function.
Node* result = CallJS(function, context, first_arg, args_count,
ConvertReceiverMode::kAny);
SetAccumulator(result);
Dispatch();
CallJSAndDispatch(function, context, first_arg, args_count,
ConvertReceiverMode::kAny);
}
// CallWithSpread <callable> <first_arg> <arg_count>
......@@ -1927,10 +1915,8 @@ IGNITION_HANDLER(CallWithSpread, InterpreterAssembler) {
Node* context = GetContext();
// Call into Runtime function CallWithSpread which does everything.
Node* result = CallJSWithSpread(callable, context, receiver_arg, args_count,
slot_id, feedback_vector);
SetAccumulator(result);
Dispatch();
CallJSWithSpreadAndDispatch(callable, context, receiver_arg, args_count,
slot_id, feedback_vector);
}
// ConstructWithSpread <first_arg> <arg_count>
......@@ -2976,8 +2962,10 @@ IGNITION_HANDLER(ReThrow, InterpreterAssembler) {
// Return the value in the accumulator.
IGNITION_HANDLER(Return, InterpreterAssembler) {
UpdateInterruptBudgetOnReturn();
Callable exit_trampoline = CodeFactory::InterpreterExitTrampoline(isolate());
Node* context = GetContext();
Node* accumulator = GetAccumulator();
Return(accumulator);
TailCallStub(exit_trampoline, context, accumulator);
}
// ThrowReferenceErrorIfHole <variable_name>
......
......@@ -88,13 +88,18 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(Node* function_id, Node* context,
#undef CASE
__ Switch(function_id, &abort, cases, labels, arraysize(cases));
#define HANDLE_CASE(name, lower_case, expected_arg_count) \
__ BIND(&lower_case); \
if (FLAG_debug_code && expected_arg_count >= 0) { \
AbortIfArgCountMismatch(expected_arg_count, arg_count); \
} \
result.Bind(name(first_arg_reg, arg_count, context)); \
__ Goto(&end);
#define HANDLE_CASE(name, lower_case, expected_arg_count) \
__ BIND(&lower_case); \
{ \
if (FLAG_debug_code && expected_arg_count >= 0) { \
AbortIfArgCountMismatch(expected_arg_count, arg_count); \
} \
Node* value = name(first_arg_reg, arg_count, context); \
if (value) { \
result.Bind(value); \
__ Goto(&end); \
} \
}
INTRINSICS_LIST(HANDLE_CASE)
#undef HANDLE_CASE
......@@ -334,9 +339,9 @@ Node* IntrinsicsGenerator::Call(Node* args_reg, Node* arg_count,
__ BIND(&arg_count_positive);
}
Node* result = __ CallJS(function, context, receiver_arg, target_args_count,
ConvertReceiverMode::kAny);
return result;
__ CallJSAndDispatch(function, context, receiver_arg, target_args_count,
ConvertReceiverMode::kAny);
return nullptr; // We never return from the CallJSAndDispatch above.
}
Node* IntrinsicsGenerator::ClassOf(Node* args_reg, Node* arg_count,
......
......@@ -303,6 +303,12 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterExitTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {kInterpreterAccumulatorRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
......
......@@ -303,6 +303,12 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterExitTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {kInterpreterAccumulatorRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
......
......@@ -173,19 +173,5 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeExit) {
#endif
RUNTIME_FUNCTION(Runtime_InterpreterAdvanceBytecodeOffset) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(BytecodeArray, bytecode_array, 0);
CONVERT_SMI_ARG_CHECKED(bytecode_offset, 1);
interpreter::BytecodeArrayIterator it(bytecode_array);
int offset = bytecode_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
while (it.current_offset() < offset) it.Advance();
DCHECK_EQ(offset, it.current_offset());
it.Advance(); // Advance by one bytecode.
offset = it.current_offset() + BytecodeArray::kHeaderSize - kHeapObjectTag;
return Smi::FromInt(offset);
}
} // namespace internal
} // namespace v8
......@@ -208,8 +208,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F) \
F(InterpreterNewClosure, 4, 1) \
F(InterpreterAdvanceBytecodeOffset, 2, 1)
F(InterpreterNewClosure, 4, 1)
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
F(FunctionGetName, 1, 1) \
......
......@@ -313,6 +313,12 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterExitTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {kInterpreterAccumulatorRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
......
......@@ -616,27 +616,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
}
}
TARGET_TEST_F(InterpreterAssemblerTest, CallJS) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
if (Bytecodes::IsCallOrConstruct(bytecode) &&
bytecode != Bytecode::kCallWithSpread) {
InterpreterAssemblerTestState state(this, bytecode);
InterpreterAssemblerForTest m(&state, bytecode);
ConvertReceiverMode receiver_mode = Bytecodes::GetReceiverMode(bytecode);
Callable builtin = CodeFactory::InterpreterPushArgsThenCall(
isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
Node* function = m.IntPtrConstant(0);
Node* first_arg = m.IntPtrConstant(1);
Node* arg_count = m.Int32Constant(2);
Node* context = m.IntPtrConstant(3);
Node* call_js =
m.CallJS(function, context, first_arg, arg_count, receiver_mode);
EXPECT_THAT(call_js, IsCall(_, IsHeapConstant(builtin.code()), arg_count,
first_arg, function, context, _, _));
}
}
}
TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerTestState state(this, bytecode);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment