Commit 30f08f39 authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[Interpreter] Remove InterpreterExitTrampoline.

Always return to the InterpreterEntryTrampoline rather than calling the
InterpreterExitTrampoline from the Return bytecode handler. This fixes a
regression which occured if we upset the call/return stack by skipping the
return to the InterpreterEntryTrampoline from the return bytecode handler.

BUG=chromium:759390,chromium:753705

Change-Id: Ib625654a4a5072ac6c8d8e9611d1b9c0bbced4ca
Reviewed-on: https://chromium-review.googlesource.com/649517
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47826}
parent 07660a1c
......@@ -317,12 +317,6 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
&default_descriptor);
}
void InterpreterExitTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {kInterpreterAccumulatorRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
......
......@@ -339,12 +339,6 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
&default_descriptor);
}
void InterpreterExitTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {kInterpreterAccumulatorRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
......
......@@ -1111,10 +1111,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation.
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Register bytecode_offset, Register scratch1,
Register scratch2) {
Register bytecode_offset, Register bytecode,
Register scratch1) {
Register bytecode_size_table = scratch1;
Register bytecode = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
......@@ -1122,9 +1121,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
bytecode_size_table,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Load the current bytecode.
__ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label load_size, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
......@@ -1283,6 +1279,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Call(r4);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
// or the interpreter tail calling a builtin and then a dispatch.
// Get bytecode array and bytecode offset from the stack frame.
__ ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
......@@ -1290,11 +1289,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Check if we should return.
Label do_return;
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ cmp(r1, Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
__ b(&do_return, eq);
// Advance to the next bytecode and dispatch.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r1, r2);
__ jmp(&do_dispatch);
__ bind(&do_return);
// The return value is in r0.
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
......@@ -1307,12 +1318,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ b(&bytecode_array_loaded);
}
void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// The return value is in r0.
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch,
Label* stack_overflow) {
......@@ -1504,6 +1509,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Load the current bytecode.
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r1, r2);
......
......@@ -1124,10 +1124,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation.
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Register bytecode_offset, Register scratch1,
Register scratch2) {
Register bytecode_offset, Register bytecode,
Register scratch1) {
Register bytecode_size_table = scratch1;
Register bytecode = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
......@@ -1135,9 +1134,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
bytecode_size_table,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Load the current bytecode.
__ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label load_size, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
......@@ -1297,6 +1293,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Call(ip0);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
// or the interpreter tail calling a builtin and then a dispatch.
// Get bytecode array and bytecode offset from the stack frame.
__ Ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
......@@ -1304,11 +1303,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Check if we should return.
Label do_return;
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
__ B(&do_return, eq);
// Advance to the next bytecode and dispatch.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, x1, x2);
__ B(&do_dispatch);
__ bind(&do_return);
// The return value is in x0.
LeaveInterpreterFrame(masm, x2);
__ Ret();
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
......@@ -1321,12 +1332,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ B(&bytecode_array_loaded);
}
void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// The return value is in x0.
LeaveInterpreterFrame(masm, x2);
__ Ret();
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch,
Label* stack_overflow) {
......@@ -1523,6 +1528,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Load the current bytecode.
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, x1, x2);
......
......@@ -106,7 +106,6 @@ namespace internal {
\
/* Interpreter */ \
ASM(InterpreterEntryTrampoline) \
ASM(InterpreterExitTrampoline) \
ASM(InterpreterPushArgsThenCall) \
ASM(InterpreterPushUndefinedAndArgsThenCall) \
ASM(InterpreterPushArgsThenCallFunction) \
......
......@@ -759,10 +759,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation.
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Register bytecode_offset, Register scratch1,
Register scratch2) {
Register bytecode_offset, Register bytecode,
Register scratch1) {
Register bytecode_size_table = scratch1;
Register bytecode = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
......@@ -770,9 +769,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Immediate(
ExternalReference::bytecode_size_table_address(masm->isolate())));
// Load the current bytecode.
__ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label load_size, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
......@@ -929,9 +925,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ call(ebx);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are due to the interpreter tail calling
// a builtin and then a dispatch, so advance the bytecode offset here and
// dispatch to the next handler.
// Any returns to the entry trampoline are either due to the return bytecode
// or the interpreter tail calling a builtin and then a dispatch.
// Get bytecode array and bytecode offset from the stack frame.
__ mov(kInterpreterBytecodeArrayRegister,
......@@ -940,11 +935,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Check if we should return.
Label do_return;
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ cmpb(ebx, Immediate(static_cast<int>(interpreter::Bytecode::kReturn)));
__ j(equal, &do_return, Label::kNear);
// Advance to the next bytecode and dispatch.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, ebx, edx);
__ jmp(&do_dispatch);
__ bind(&do_return);
// The return value is in eax.
LeaveInterpreterFrame(masm, ebx, ecx);
__ ret(0);
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
......@@ -961,11 +968,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&bytecode_array_loaded);
}
void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// The return value is in eax.
LeaveInterpreterFrame(masm, ebx, ecx);
__ ret(0);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch1, Register scratch2,
......@@ -1286,6 +1288,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Load the current bytecode
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
// Advance to the next bytecode.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, ebx, edx);
......
......@@ -1087,10 +1087,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation.
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Register bytecode_offset, Register scratch1,
Register scratch2, Register scratch3) {
Register bytecode_offset, Register bytecode,
Register scratch1, Register scratch2) {
Register bytecode_size_table = scratch1;
Register bytecode = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
......@@ -1098,10 +1097,6 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
bytecode_size_table,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Load the current bytecode.
__ Addu(scratch3, bytecode_array, bytecode_offset);
__ lbu(bytecode, MemOperand(scratch3));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label load_size, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
......@@ -1111,8 +1106,8 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
// Load the next bytecode and update table to the wide scaled table.
__ Addu(bytecode_offset, bytecode_offset, Operand(1));
__ Addu(scratch3, bytecode_array, bytecode_offset);
__ lbu(bytecode, MemOperand(scratch3));
__ Addu(scratch2, bytecode_array, bytecode_offset);
__ lbu(bytecode, MemOperand(scratch2));
__ Addu(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&load_size);
......@@ -1120,17 +1115,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
__ Addu(bytecode_offset, bytecode_offset, Operand(1));
__ Addu(scratch3, bytecode_array, bytecode_offset);
__ lbu(bytecode, MemOperand(scratch3));
__ Addu(scratch2, bytecode_array, bytecode_offset);
__ lbu(bytecode, MemOperand(scratch2));
__ Addu(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&load_size);
// Load the size of the current bytecode.
__ bind(&load_size);
__ Lsa(scratch3, bytecode_size_table, bytecode, 2);
__ lw(scratch3, MemOperand(scratch3));
__ Addu(bytecode_offset, bytecode_offset, scratch3);
__ Lsa(scratch2, bytecode_size_table, bytecode, 2);
__ lw(scratch2, MemOperand(scratch2));
__ Addu(bytecode_offset, bytecode_offset, scratch2);
}
// Generate code for entering a JS function with the interpreter.
......@@ -1266,6 +1261,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Call(at);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
// or the interpreter tail calling a builtin and then a dispatch.
// Get bytecode array and bytecode offset from the stack frame.
__ lw(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
......@@ -1273,11 +1271,24 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Check if we should return.
Label do_return;
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a1, MemOperand(a1));
__ Branch(&do_return, eq, a1,
Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
// Advance to the next bytecode and dispatch.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, a1, a2, a3);
__ jmp(&do_dispatch);
__ bind(&do_return);
// The return value is in v0.
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
......@@ -1291,11 +1302,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Branch(&bytecode_array_loaded);
}
void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// The return value is in v0.
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch1, Register scratch2,
......@@ -1490,6 +1496,11 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Load the current bytecode.
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a1, MemOperand(a1));
// Advance to the next bytecode.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, a1, a2, a3);
......
......@@ -1091,20 +1091,15 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation.
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Register bytecode_offset, Register scratch1,
Register scratch2, Register scratch3) {
Register bytecode_offset, Register bytecode,
Register scratch1, Register scratch2) {
Register bytecode_size_table = scratch1;
Register bytecode = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
__ li(
bytecode_size_table,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Load the current bytecode.
__ Daddu(scratch3, bytecode_array, bytecode_offset);
__ Lbu(bytecode, MemOperand(scratch3));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label load_size, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
......@@ -1114,8 +1109,8 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
// Load the next bytecode and update table to the wide scaled table.
__ Daddu(bytecode_offset, bytecode_offset, Operand(1));
__ Daddu(scratch3, bytecode_array, bytecode_offset);
__ Lbu(bytecode, MemOperand(scratch3));
__ Daddu(scratch2, bytecode_array, bytecode_offset);
__ Lbu(bytecode, MemOperand(scratch2));
__ Daddu(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&load_size);
......@@ -1123,17 +1118,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
__ Daddu(bytecode_offset, bytecode_offset, Operand(1));
__ Daddu(scratch3, bytecode_array, bytecode_offset);
__ Lbu(bytecode, MemOperand(scratch3));
__ Daddu(scratch2, bytecode_array, bytecode_offset);
__ Lbu(bytecode, MemOperand(scratch2));
__ Daddu(bytecode_size_table, bytecode_size_table,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ jmp(&load_size);
// Load the size of the current bytecode.
__ bind(&load_size);
__ Dlsa(scratch3, bytecode_size_table, bytecode, 2);
__ Lw(scratch3, MemOperand(scratch3));
__ Daddu(bytecode_offset, bytecode_offset, scratch3);
__ Dlsa(scratch2, bytecode_size_table, bytecode, 2);
__ Lw(scratch2, MemOperand(scratch2));
__ Daddu(bytecode_offset, bytecode_offset, scratch2);
}
// Generate code for entering a JS function with the interpreter.
......@@ -1269,6 +1264,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Call(at);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are either due to the return bytecode
// or the interpreter tail calling a builtin and then a dispatch.
// Get bytecode array and bytecode offset from the stack frame.
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
......@@ -1276,11 +1274,24 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Check if we should return.
Label do_return;
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ Lbu(a1, MemOperand(a1));
__ Branch(&do_return, eq, a1,
Operand(static_cast<int>(interpreter::Bytecode::kReturn)));
// Advance to the next bytecode and dispatch.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, a1, a2, a3);
__ jmp(&do_dispatch);
__ bind(&do_return);
// The return value is in v0.
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
......@@ -1294,12 +1305,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Branch(&bytecode_array_loaded);
}
void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// The return value is in v0.
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch1, Register scratch2,
Label* stack_overflow) {
......@@ -1494,6 +1499,11 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Load the current bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ Lbu(a1, MemOperand(a1));
// Advance to the next bytecode.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, a1, a2, a3);
......
......@@ -839,19 +839,15 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation.
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Register bytecode_offset, Register scratch1,
Register scratch2) {
Register bytecode_offset, Register bytecode,
Register scratch1) {
Register bytecode_size_table = scratch1;
Register bytecode = scratch2;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
bytecode));
__ Move(bytecode_size_table,
ExternalReference::bytecode_size_table_address(masm->isolate()));
// Load the current bytecode.
__ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label load_size, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
......@@ -1011,9 +1007,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ call(rbx);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
// Any returns to the entry trampoline are due to the interpreter tail calling
// a builtin and then a dispatch, so advance the bytecode offset here and
// dispatch to the next handler.
// Any returns to the entry trampoline are either due to the return bytecode
// or the interpreter tail calling a builtin and then a dispatch.
// Get bytecode array and bytecode offset from the stack frame.
__ movp(kInterpreterBytecodeArrayRegister,
......@@ -1023,11 +1018,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister);
// Check if we should return.
Label do_return;
__ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ cmpb(rbx, Immediate(static_cast<int>(interpreter::Bytecode::kReturn)));
__ j(equal, &do_return, Label::kNear);
// Advance to the next bytecode and dispatch.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, rbx, rcx);
__ jmp(&do_dispatch);
__ bind(&do_return);
// The return value is in rax.
LeaveInterpreterFrame(masm, rbx, rcx);
__ ret(0);
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
......@@ -1042,12 +1049,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(&bytecode_array_loaded);
}
void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// The return value is in rax.
LeaveInterpreterFrame(masm, rbx, rcx);
__ ret(0);
}
static void Generate_StackOverflowCheck(
MacroAssembler* masm, Register num_args, Register scratch,
Label* stack_overflow,
......@@ -1266,6 +1267,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister);
// Load the current bytecode.
__ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
// Advance to the next bytecode.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, rbx, rcx);
......
......@@ -361,12 +361,6 @@ Callable CodeFactory::ConstructFunctionForwardVarargs(Isolate* isolate) {
ConstructForwardVarargsDescriptor(isolate));
}
// static
Callable CodeFactory::InterpreterExitTrampoline(Isolate* isolate) {
return Callable(BUILTIN_CODE(isolate, InterpreterExitTrampoline),
InterpreterExitTrampolineDescriptor(isolate));
}
// static
Callable CodeFactory::InterpreterPushArgsThenCall(
Isolate* isolate, ConvertReceiverMode receiver_mode,
......
......@@ -92,7 +92,6 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable ConstructForwardVarargs(Isolate* isolate);
static Callable ConstructFunctionForwardVarargs(Isolate* isolate);
static Callable InterpreterExitTrampoline(Isolate* isolate);
static Callable InterpreterPushArgsThenCall(Isolate* isolate,
ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode);
......
......@@ -313,12 +313,6 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterExitTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {kInterpreterAccumulatorRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
......
......@@ -612,14 +612,6 @@ void ApiCallbackDescriptor::InitializePlatformIndependent(
machine_types);
}
void InterpreterExitTrampolineDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kAccumulator
MachineType machine_types[] = {MachineType::AnyTagged()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void InterpreterDispatchDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kAccumulator, kBytecodeOffset, kBytecodeArray, kDispatchTable
......
......@@ -76,7 +76,6 @@ class PlatformInterfaceDescriptor;
V(MathPowInteger) \
V(GrowArrayElements) \
V(NewArgumentsElements) \
V(InterpreterExitTrampoline) \
V(InterpreterDispatch) \
V(InterpreterPushArgsThenCall) \
V(InterpreterPushArgsThenConstruct) \
......@@ -840,14 +839,6 @@ class NewArgumentsElementsDescriptor final : public CallInterfaceDescriptor {
CallInterfaceDescriptor)
};
class V8_EXPORT_PRIVATE InterpreterExitTrampolineDescriptor
: public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kAccumulator)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
InterpreterExitTrampolineDescriptor, CallInterfaceDescriptor)
};
class V8_EXPORT_PRIVATE InterpreterDispatchDescriptor
: public CallInterfaceDescriptor {
public:
......
......@@ -57,8 +57,9 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
[this] { CallEpilogue(); });
// Save the bytecode offset immediately if bytecode will make a call along the
// critical path.
if (Bytecodes::MakesCallAlongCriticalPath(bytecode)) {
// critical path, or it is a return bytecode.
if (Bytecodes::MakesCallAlongCriticalPath(bytecode) ||
bytecode_ == Bytecode::kReturn) {
SaveBytecodeOffset();
}
}
......
......@@ -2943,10 +2943,8 @@ IGNITION_HANDLER(ReThrow, InterpreterAssembler) {
// Return the value in the accumulator.
IGNITION_HANDLER(Return, InterpreterAssembler) {
UpdateInterruptBudgetOnReturn();
Callable exit_trampoline = CodeFactory::InterpreterExitTrampoline(isolate());
Node* context = GetContext();
Node* accumulator = GetAccumulator();
TailCallStub(exit_trampoline, context, accumulator);
Return(accumulator);
}
// ThrowReferenceErrorIfHole <variable_name>
......
......@@ -308,12 +308,6 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterExitTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {kInterpreterAccumulatorRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
......
......@@ -308,12 +308,6 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterExitTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {kInterpreterAccumulatorRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
......
......@@ -46,6 +46,12 @@ RUNTIME_FUNCTION(Runtime_DebugBreakOnBytecode) {
int bytecode_offset = interpreted_frame->GetBytecodeOffset();
interpreter::Bytecode bytecode =
interpreter::Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
if (bytecode == interpreter::Bytecode::kReturn) {
// If we are returning, reset the bytecode array on the interpreted stack
// frame to the non-debug variant so that the interpreter entry trampoline
// sees the return bytecode rather than the DebugBreak.
interpreted_frame->PatchBytecodeArray(bytecode_array);
}
return isolate->interpreter()->GetBytecodeHandler(
bytecode, interpreter::OperandScale::kSingle);
}
......
......@@ -313,12 +313,6 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterExitTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {kInterpreterAccumulatorRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment