Commit 3136b4f5 authored by Junliang Yan's avatar Junliang Yan Committed by Commit Bot

PPC/s390: [SFI] Add support for flushing old Bytecode from SharedFunctionInfos.

Port a55803a1

Original Commit Message:

    This change makes the SFI to bytecode link pseudo-weak. The marking visitors
    check whether the bytecode is old, and if so, don't mark it and instead
    push the SFI onto a bytecode_flushing_candidates worklist. Once marking
    is complete, this list is walked, and for any of the candidates who's bytecode
    has not been marked (i.e., is only referenced by the shared function info),
    the bytecode is flushed and the SFI has the function data replaced with
    an UncompiledData (which overwrites the flushed bytecode array).

    Since we don't track JSFunctions, these can still think the underlying
    function is compiled, and so calling them will invoke
    InterpreterEntryTrampoline. As such, logic is added to
    InterpreterEntryTrampoline to detect flushed functions, and enter
    CompileLazy instead.

R=rmcilroy@chromium.org, joransiu@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=v8:8395
LOG=N

Change-Id: I4a913d318363a584d79aa6d7f4ee09f04a89bbbe
Reviewed-on: https://chromium-review.googlesource.com/c/1393824Reviewed-by: 's avatarJoran Siu <joransiu@ca.ibm.com>
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#58508}
parent f225a474
......@@ -1036,6 +1036,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register closure = r4;
Register feedback_vector = r5;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ LoadP(r3, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r7);
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
Label compile_lazy;
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
BYTECODE_ARRAY_TYPE);
__ bne(&compile_lazy);
// Load the feedback vector from the closure.
__ LoadP(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
......@@ -1045,20 +1060,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ LoadP(r3, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r7);
// Increment invocation count for the function.
__ LoadWord(
r8,
......@@ -1070,18 +1071,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset),
r0);
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
__ Assert(ne,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
cr0);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
BYTECODE_ARRAY_TYPE);
__ Assert(
eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
// Reset code age.
__ mov(r8, Operand(BytecodeArray::kNoAgeBytecodeAge));
......@@ -1179,6 +1173,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in r3.
LeaveInterpreterFrame(masm, r5);
__ blr();
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ bkpt(0); // Should not return.
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
......
......@@ -1077,6 +1077,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register closure = r3;
Register feedback_vector = r4;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ LoadP(r2, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r6);
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
Label compile_lazy;
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r2, no_reg,
BYTECODE_ARRAY_TYPE);
__ bne(&compile_lazy);
// Load the feedback vector from the closure.
__ LoadP(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
......@@ -1086,20 +1101,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ LoadP(r2, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r6);
// Increment invocation count for the function.
__ LoadW(r1, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
......@@ -1107,16 +1108,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ StoreW(r1, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ TestIfSmi(kInterpreterBytecodeArrayRegister);
__ Assert(
ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r2, no_reg,
BYTECODE_ARRAY_TYPE);
__ Assert(
eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
// Reset code age.
__ mov(r1, Operand(BytecodeArray::kNoAgeBytecodeAge));
......@@ -1216,6 +1212,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in r2.
LeaveInterpreterFrame(masm, r4);
__ Ret();
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
__ bkpt(0); // Should not return.
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment