Commit 0f1b6112 authored by Aleksandar Rikalo's avatar Aleksandar Rikalo Committed by Commit Bot

[mips] InterpreterEntryTrampoline improvement

Logic is added to InterpreterEntryTrampoline to detect flushed functions,
and enter CompileLazy instead. Get the bytecode array from the function
object and load it. The bytecode array could have been flushed from the
shared function info, if so, call into CompileLazy.

This fixes:

   cctest/test-heap/TestBytecodeFlushing
   cctest/test-heap/TestOptimizeAfterBytecodeFlushingCandidate
   debugger/debug/lazy-deopt-then-flush-bytecode

[mips] Macro-assembler fix

Fix massive failing of tests after fa3cbf60.

Change-Id: Ic1978b5233eefc743fd7b020f65153630ffa281f
Reviewed-on: https://chromium-review.googlesource.com/c/1388528Reviewed-by: 's avatarSreten Kovacevic <skovacevic@wavecomp.com>
Commit-Queue: Sreten Kovacevic <skovacevic@wavecomp.com>
Cr-Commit-Position: refs/heads/master@{#58463}
parent d7493fb1
......@@ -1012,6 +1012,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register closure = a1;
Register feedback_vector = a2;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ lw(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ lw(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, t0);
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
Label compile_lazy;
__ GetObjectType(kInterpreterBytecodeArrayRegister, a0, a0);
__ Branch(&compile_lazy, ne, a0, Operand(BYTECODE_ARRAY_TYPE));
// Load the feedback vector from the closure.
__ lw(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
......@@ -1026,12 +1039,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ lw(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ lw(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, t0);
// Increment invocation count for the function.
__ lw(t0, FieldMemOperand(feedback_vector,
......@@ -1040,18 +1047,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ sw(t0, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ SmiTst(kInterpreterBytecodeArrayRegister, t0);
__ Assert(ne,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
t0, Operand(zero_reg));
__ GetObjectType(kInterpreterBytecodeArrayRegister, t0, t0);
__ Assert(eq,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
t0, Operand(BYTECODE_ARRAY_TYPE));
}
// Reset code age.
DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge);
__ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
......@@ -1145,6 +1140,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in v0.
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
// Unreachable code.
__ break_(0xCC);
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
......
......@@ -1005,6 +1005,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register closure = a1;
Register feedback_vector = a2;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ Ld(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, a4);
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
Label compile_lazy;
__ GetObjectType(kInterpreterBytecodeArrayRegister, a0, a0);
__ Branch(&compile_lazy, ne, a0, Operand(BYTECODE_ARRAY_TYPE));
// Load the feedback vector from the closure.
__ Ld(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
......@@ -1019,13 +1032,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ Ld(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, a4);
// Increment invocation count for the function.
__ Lw(a4, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
......@@ -1033,18 +1039,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Sw(a4, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ SmiTst(kInterpreterBytecodeArrayRegister, a4);
__ Assert(ne,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
a4, Operand(zero_reg));
__ GetObjectType(kInterpreterBytecodeArrayRegister, a4, a4);
__ Assert(eq,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
a4, Operand(BYTECODE_ARRAY_TYPE));
}
// Reset code age.
DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge);
__ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
......@@ -1139,6 +1133,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in v0.
LeaveInterpreterFrame(masm, t0);
__ Jump(ra);
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
// Unreachable code.
__ break_(0xCC);
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
......
......@@ -3980,9 +3980,8 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
STATIC_ASSERT(kSmiTag == 0);
// The builtin_pointer register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
Lsa(builtin_pointer, kRootRegister, builtin_pointer,
kSystemPointerSize - kSmiTagSize);
SmiUntag(builtin_pointer, builtin_pointer);
Lsa(builtin_pointer, kRootRegister, builtin_pointer, kSystemPointerSizeLog2);
lw(builtin_pointer,
MemOperand(builtin_pointer, IsolateData::builtin_entry_table_offset()));
Call(builtin_pointer);
......
......@@ -4308,7 +4308,7 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
// The builtin_pointer register contains the builtin index as a Smi.
SmiUntag(builtin_pointer, builtin_pointer);
Lsa(builtin_pointer, kRootRegister, builtin_pointer, kSystemPointerSizeLog2);
Dlsa(builtin_pointer, kRootRegister, builtin_pointer, kSystemPointerSizeLog2);
Ld(builtin_pointer,
MemOperand(builtin_pointer, IsolateData::builtin_entry_table_offset()));
Call(builtin_pointer);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment