Commit fc738f1e authored by bjaideep's avatar bjaideep Committed by Commit bot

PPC/s390: Reland: [Interpreter] Transition JSFunctions to call optimized code when possible.

Port 73d21080

Original Commit Message:

    Now that the optimized code hangs off the feedback vector, it is possible
    to check whether a function has optimized code available every time it's
    called in the interpreter entry trampoline. If optimized code exists, the
    interpreter entry trampoline 'self-heals' the closure to point to the
    optimized code and links the closure into the optimized code list.

R=rmcilroy@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=v8:6246
LOG=N

Review-Url: https://codereview.chromium.org/2897483002
Cr-Commit-Position: refs/heads/master@{#45410}
parent 55849b16
......@@ -994,6 +994,41 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
static void ReplaceClosureEntryWithOptimizedCode(
MacroAssembler* masm, Register optimized_code_entry, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
Register native_context = scratch1;
// Store code entry in the closure.
__ addi(optimized_code_entry, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(optimized_code_entry,
FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
// Link the closure into the optimized function list.
// r7 : code entry
// r10: native context
// r4 : closure
__ LoadP(native_context, NativeContextMemOperand());
__ LoadP(scratch2, ContextMemOperand(native_context,
Context::OPTIMIZED_FUNCTIONS_LIST));
__ StoreP(scratch2,
FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), r0);
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ StoreP(
closure,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
// Save closure before the write barrier.
__ mr(scratch2, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure,
scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ mr(closure, scratch2);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
Register args_count = scratch;
......@@ -1034,6 +1069,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(r4);
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = r7;
__ LoadP(r3, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset));
__ LoadP(r3, FieldMemOperand(r3, Cell::kValueOffset));
__ LoadP(
optimized_code_entry,
FieldMemOperand(r3, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ LoadP(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
__ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
......@@ -1154,6 +1204,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ StoreP(r7, FieldMemOperand(r4, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(r4, r7, r8);
__ JumpToJSEntry(r7);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ lbz(r8, FieldMemOperand(optimized_code_entry,
Code::kKindSpecificFlags1Offset));
__ TestBit(r8, Code::kMarkedForDeoptimizationBit, r0);
__ bne(&gotta_call_runtime, cr0);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r4, r9, r8,
r5);
__ JumpToJSEntry(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
......@@ -1421,31 +1494,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ bne(&gotta_call_runtime, cr0);
// Code is good, get it into the closure and tail call.
__ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, entry, r8);
// Load native context into r9.
Register native_context = r9;
__ LoadP(native_context, NativeContextMemOperand());
// Link the closure into the optimized function list.
__ LoadP(
r8, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ StoreP(r8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset),
r0);
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r8, r5,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ StoreP(
closure,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
// Save closure before the write barrier.
__ mr(r8, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, r8, r5,
kLRHasNotBeenSaved, kDontSaveFPRegs);
ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r9, r8, r5);
__ JumpToJSEntry(entry);
// We found no optimized code.
......
......@@ -988,6 +988,41 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
static void ReplaceClosureEntryWithOptimizedCode(
MacroAssembler* masm, Register optimized_code_entry, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
Register native_context = scratch1;
// Store code entry in the closure.
__ AddP(optimized_code_entry, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(optimized_code_entry,
FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
// Link the closure into the optimized function list.
// r6 : code entry
// r9: native context
// r3 : closure
__ LoadP(native_context, NativeContextMemOperand());
__ LoadP(scratch2, ContextMemOperand(native_context,
Context::OPTIMIZED_FUNCTIONS_LIST));
__ StoreP(scratch2,
FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), r0);
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ StoreP(
closure,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
// Save closure before the write barrier.
__ LoadRR(scratch2, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure,
scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ LoadRR(closure, scratch2);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
Register args_count = scratch;
......@@ -1028,6 +1063,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(r3);
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = r6;
__ LoadP(r2, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
__ LoadP(r2, FieldMemOperand(r2, Cell::kValueOffset));
__ LoadP(
optimized_code_entry,
FieldMemOperand(r2, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ LoadP(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
__ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
......@@ -1147,6 +1197,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ StoreP(r6, FieldMemOperand(r3, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(r3, r6, r7);
__ JumpToJSEntry(r6);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ LoadlB(r7, FieldMemOperand(optimized_code_entry,
Code::kKindSpecificFlags1Offset));
__ tmll(r7, Operand(Code::kMarkedForDeoptimizationBit));
__ bne(&gotta_call_runtime);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r3, r8, r7,
r4);
__ JumpToJSEntry(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
......@@ -1413,31 +1486,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ bne(&gotta_call_runtime);
// Code is good, get it into the closure and tail call.
__ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, entry, r7);
// Load native context into r8.
Register native_context = r8;
__ LoadP(native_context, NativeContextMemOperand());
// Link the closure into the optimized function list.
__ LoadP(
r7, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ StoreP(r7, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset),
r0);
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r7, r4,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ StoreP(
closure,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
// Save closure before the write barrier.
__ LoadRR(r7, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, r7, r4,
kLRHasNotBeenSaved, kDontSaveFPRegs);
ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r8, r7, r4);
__ JumpToJSEntry(entry);
// We found no optimized code.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment