Commit 73d21080 authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

Reland: [Interpreter] Transition JSFunctions to call optimized code when possible.

Now that the optimized code hangs off the feedback vector, it is possible
to check whether a function has optimized code available every time it's
called in the interpreter entry trampoline. If optimized code exists, the
interpreter entry trampoline 'self-heals' the closure to point to the
optimized code and links the closure into the optimized code list.
 
BUG=v8:6246

Change-Id: I53b095db2a75ae4824c8195faf8649d766c86118
Reviewed-on: https://chromium-review.googlesource.com/501967Reviewed-by: 's avatarMichael Stanton <mvstanton@chromium.org>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#45328}
parent fab691b8
...@@ -963,6 +963,38 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { ...@@ -963,6 +963,38 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true); Generate_JSEntryTrampolineHelper(masm, true);
} }
static void ReplaceClosureEntryWithOptimizedCode(
MacroAssembler* masm, Register optimized_code_entry, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
Register native_context = scratch1;
// Store code entry in the closure.
__ add(optimized_code_entry, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ str(optimized_code_entry,
FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
// Link the closure into the optimized function list.
__ ldr(native_context, NativeContextMemOperand());
__ ldr(scratch2,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ str(scratch2,
FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ str(closure,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
// Save closure before the write barrier.
__ mov(scratch2, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure,
scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ mov(closure, scratch2);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
Register args_count = scratch; Register args_count = scratch;
...@@ -988,7 +1020,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { ...@@ -988,7 +1020,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
// o r1: the JS function object being called. // o r1: the JS function object being called.
// o r3: the new target // o r3: the new target
// o cp: our context // o cp: our context
// o pp: the caller's constant pool pointer (if enabled)
// o fp: the caller's frame pointer // o fp: the caller's frame pointer
// o sp: stack pointer // o sp: stack pointer
// o lr: return address // o lr: return address
...@@ -1004,6 +1035,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1004,6 +1035,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL); FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(r1); __ PushStandardFrame(r1);
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = r4;
__ ldr(r0, FieldMemOperand(r1, JSFunction::kFeedbackVectorOffset));
__ ldr(r0, FieldMemOperand(r0, Cell::kValueOffset));
__ ldr(
optimized_code_entry,
FieldMemOperand(r0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ ldr(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if // Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister. // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); __ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
...@@ -1116,6 +1161,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1116,6 +1161,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ str(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); __ str(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(r1, r4, r5); __ RecordWriteCodeEntryField(r1, r4, r5);
__ Jump(r4); __ Jump(r4);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ ldr(r5, FieldMemOperand(optimized_code_entry,
Code::kKindSpecificFlags1Offset));
__ tst(r5, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ b(ne, &gotta_call_runtime);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r1, r6, r5,
r2);
__ Jump(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
} }
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
...@@ -1379,30 +1448,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1379,30 +1448,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ b(ne, &gotta_call_runtime); __ b(ne, &gotta_call_runtime);
// Code is good, get it into the closure and tail call. // Code is good, get it into the closure and tail call.
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r6, r5, r2);
__ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, r5);
// Load native context into r6.
Register native_context = r6;
__ ldr(native_context, NativeContextMemOperand());
// Link the closure into the optimized function list.
__ ldr(r5,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ str(r5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r5, r2,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ str(closure,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
// Save closure before the write barrier.
__ mov(r5, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure, r2,
kLRHasNotBeenSaved, kDontSaveFPRegs);
__ mov(closure, r5);
__ Jump(entry); __ Jump(entry);
// We found no optimized code. // We found no optimized code.
......
...@@ -987,6 +987,36 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { ...@@ -987,6 +987,36 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true); Generate_JSEntryTrampolineHelper(masm, true);
} }
static void ReplaceClosureEntryWithOptimizedCode(
MacroAssembler* masm, Register optimized_code_entry, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
Register native_context = scratch1;
// Store code entry in the closure.
__ Add(optimized_code_entry, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Str(optimized_code_entry,
FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
// Link the closure into the optimized function list.
__ Ldr(native_context, NativeContextMemOperand());
__ Ldr(scratch2,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ Str(scratch2,
FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ Str(closure,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ Mov(scratch2, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, scratch2,
scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
Register args_count = scratch; Register args_count = scratch;
...@@ -1028,6 +1058,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1028,6 +1058,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(lr, fp, cp, x1); __ Push(lr, fp, cp, x1);
__ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = x7;
__ Ldr(x0, FieldMemOperand(x1, JSFunction::kFeedbackVectorOffset));
__ Ldr(x0, FieldMemOperand(x0, Cell::kValueOffset));
__ Ldr(
optimized_code_entry,
FieldMemOperand(x0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Ldr(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if // Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister. // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); __ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
...@@ -1143,6 +1187,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1143,6 +1187,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Str(x7, FieldMemOperand(x1, JSFunction::kCodeEntryOffset)); __ Str(x7, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(x1, x7, x5); __ RecordWriteCodeEntryField(x1, x7, x5);
__ Jump(x7); __ Jump(x7);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ Ldr(w8, FieldMemOperand(optimized_code_entry,
Code::kKindSpecificFlags1Offset));
__ TestAndBranchIfAnySet(w8, 1 << Code::kMarkedForDeoptimizationBit,
&gotta_call_runtime);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, x1, x4, x5,
x13);
__ Jump(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
} }
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
...@@ -1412,28 +1479,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1412,28 +1479,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
&gotta_call_runtime); &gotta_call_runtime);
// Code is good, get it into the closure and tail call. // Code is good, get it into the closure and tail call.
__ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, x4, x5, x13);
__ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, x5);
// Load native context into x4.
Register native_context = x4;
__ Ldr(native_context, NativeContextMemOperand());
// Link the closure into the optimized function list.
__ Ldr(x8,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ Str(x8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, x8, x13,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ Str(closure,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ Mov(x5, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, x5, x13,
kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Jump(entry); __ Jump(entry);
// We found no optimized code. // We found no optimized code.
......
...@@ -617,6 +617,37 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -617,6 +617,37 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(&stepping_prepared); __ jmp(&stepping_prepared);
} }
static void ReplaceClosureEntryWithOptimizedCode(
MacroAssembler* masm, Register optimized_code_entry, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
Register native_context = scratch1;
// Store the optimized code in the closure.
__ lea(optimized_code_entry,
FieldOperand(optimized_code_entry, Code::kHeaderSize));
__ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset),
optimized_code_entry);
__ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
// Link the closure into the optimized function list.
__ mov(native_context, NativeContextOperand());
__ mov(scratch3,
ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), scratch3);
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch3,
scratch2, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
closure);
// Save closure before the write barrier.
__ mov(scratch3, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure,
scratch2, kDontSaveFPRegs);
__ mov(closure, scratch3);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Register scratch2) { Register scratch2) {
Register args_count = scratch1; Register args_count = scratch1;
...@@ -664,6 +695,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -664,6 +695,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ push(edi); // Callee's JS function. __ push(edi); // Callee's JS function.
__ push(edx); // Callee's new target. __ push(edx); // Callee's new target.
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = ecx;
__ mov(ebx, FieldOperand(edi, JSFunction::kFeedbackVectorOffset));
__ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
__ mov(optimized_code_entry,
FieldOperand(ebx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ mov(optimized_code_entry,
FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if // Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister. // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
...@@ -780,6 +824,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -780,6 +824,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(FieldOperand(edi, JSFunction::kCodeEntryOffset), ecx); __ mov(FieldOperand(edi, JSFunction::kCodeEntryOffset), ecx);
__ RecordWriteCodeEntryField(edi, ecx, ebx); __ RecordWriteCodeEntryField(edi, ecx, ebx);
__ jmp(ecx); __ jmp(ecx);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ test(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &gotta_call_runtime);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
__ push(edx);
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, edi, edx,
eax, ebx);
__ pop(edx);
__ leave();
__ jmp(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
__ leave();
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
} }
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
...@@ -1184,31 +1253,9 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1184,31 +1253,9 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ j(not_zero, &gotta_call_runtime); __ j(not_zero, &gotta_call_runtime);
// Code is good, get it into the closure and tail call. // Code is good, get it into the closure and tail call.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
__ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ push(argument_count); __ push(argument_count);
__ push(new_target); __ push(new_target);
__ RecordWriteCodeEntryField(closure, entry, eax); ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, edx, eax, ebx);
// Load native context into edx.
Register native_context = edx;
__ mov(native_context, NativeContextOperand());
// Link the closure into the optimized function list.
__ mov(ebx,
ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), ebx);
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, ebx, eax,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
closure);
// Save closure before the write barrier.
__ mov(ebx, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure, eax,
kDontSaveFPRegs);
__ mov(closure, ebx);
__ pop(new_target); __ pop(new_target);
__ pop(argument_count); __ pop(argument_count);
__ jmp(entry); __ jmp(entry);
......
...@@ -956,6 +956,38 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -956,6 +956,38 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
} }
static void ReplaceClosureEntryWithOptimizedCode(
MacroAssembler* masm, Register optimized_code_entry, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
Register native_context = scratch1;
// Store code entry in the closure.
__ Addu(optimized_code_entry, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ sw(optimized_code_entry,
FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
// Link the closure into the optimized function list.
__ lw(native_context, NativeContextMemOperand());
__ lw(scratch2,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ sw(scratch2,
FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ sw(closure,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
// Save closure before the write barrier.
__ mov(scratch2, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure,
scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs);
__ mov(closure, scratch2);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
Register args_count = scratch; Register args_count = scratch;
...@@ -996,6 +1028,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -996,6 +1028,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL); FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(a1); __ PushStandardFrame(a1);
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = t0;
__ lw(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
__ lw(a0, FieldMemOperand(a0, Cell::kValueOffset));
__ lw(optimized_code_entry,
FieldMemOperand(a0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ lw(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if // Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister. // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
...@@ -1114,6 +1159,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1114,6 +1159,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ sw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); __ sw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(a1, t0, t1); __ RecordWriteCodeEntryField(a1, t0, t1);
__ Jump(t0); __ Jump(t0);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ lw(t1,
FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
__ And(t1, t1, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, a1, t3, t1,
t2);
__ Jump(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
} }
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
...@@ -1380,30 +1448,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1380,30 +1448,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg)); __ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
// Code is good, get it into the closure and tail call. // Code is good, get it into the closure and tail call.
__ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, t3, t1, t2);
__ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, t1);
// Load native context into t3.
Register native_context = t3;
__ lw(native_context, NativeContextMemOperand());
// Link the closure into the optimized function list.
__ lw(t1,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ sw(t1, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, t1, t2,
kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ sw(closure,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
// Save closure before the write barrier.
__ mov(t1, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure, t2,
kRAHasNotBeenSaved, kDontSaveFPRegs);
__ mov(closure, t1);
__ Jump(entry); __ Jump(entry);
// We found no optimized code. // We found no optimized code.
......
...@@ -957,6 +957,38 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { ...@@ -957,6 +957,38 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true); Generate_JSEntryTrampolineHelper(masm, true);
} }
static void ReplaceClosureEntryWithOptimizedCode(
MacroAssembler* masm, Register optimized_code_entry, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
Register native_context = scratch1;
// Store code entry in the closure.
__ Daddu(optimized_code_entry, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Sd(optimized_code_entry,
FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
// Link the closure into the optimized function list.
__ Ld(native_context, NativeContextMemOperand());
__ Ld(scratch2,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ Sd(scratch2,
FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ Sd(closure,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
// Save closure before the write barrier.
__ mov(scratch2, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure,
scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs);
__ mov(closure, scratch2);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) { static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
Register args_count = scratch; Register args_count = scratch;
...@@ -996,6 +1028,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -996,6 +1028,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL); FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(a1); __ PushStandardFrame(a1);
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = a4;
__ Ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
__ Ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
__ Ld(optimized_code_entry,
FieldMemOperand(a0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Ld(optimized_code_entry,
FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if // Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister. // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
...@@ -1114,6 +1159,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1114,6 +1159,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Sd(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); __ Sd(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(a1, a4, a5); __ RecordWriteCodeEntryField(a1, a4, a5);
__ Jump(a4); __ Jump(a4);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ Lw(a5,
FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
__ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, a1, t3, a5,
t0);
__ Jump(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
} }
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
...@@ -1381,30 +1449,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1381,30 +1449,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg)); __ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
// Code is good, get it into the closure and tail call. // Code is good, get it into the closure and tail call.
__ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, t3, a5, t0);
__ Sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, a5);
// Load native context into t3.
Register native_context = t3;
__ Ld(native_context, NativeContextMemOperand());
// Link the closure into the optimized function list.
__ Ld(a5,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ Sd(a5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, a5, t0,
kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ Sd(closure,
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
// Save closure before the write barrier.
__ mov(a5, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure, t0,
kRAHasNotBeenSaved, kDontSaveFPRegs);
__ mov(closure, a5);
__ Jump(entry); __ Jump(entry);
// We found no optimized code. // We found no optimized code.
......
...@@ -696,6 +696,37 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -696,6 +696,37 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(&stepping_prepared); __ jmp(&stepping_prepared);
} }
static void ReplaceClosureEntryWithOptimizedCode(
MacroAssembler* masm, Register optimized_code_entry, Register closure,
Register scratch1, Register scratch2, Register scratch3) {
Register native_context = scratch1;
// Store the optimized code in the closure.
__ leap(optimized_code_entry,
FieldOperand(optimized_code_entry, Code::kHeaderSize));
__ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset),
optimized_code_entry);
__ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
// Link the closure into the optimized function list.
__ movp(native_context, NativeContextOperand());
__ movp(scratch3,
ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ movp(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), scratch3);
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch3,
scratch2, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ movp(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
closure);
// Save closure before the write barrier.
__ movp(scratch3, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure,
scratch2, kDontSaveFPRegs);
__ movp(closure, scratch3);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Register scratch2) { Register scratch2) {
Register args_count = scratch1; Register args_count = scratch1;
...@@ -743,6 +774,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -743,6 +774,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(rdi); // Callee's JS function. __ Push(rdi); // Callee's JS function.
__ Push(rdx); // Callee's new target. __ Push(rdx); // Callee's new target.
// First check if there is optimized code in the feedback vector which we
// could call instead.
Label switch_to_optimized_code;
Register optimized_code_entry = rcx;
__ movp(rbx, FieldOperand(rdi, JSFunction::kFeedbackVectorOffset));
__ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
__ movp(rbx,
FieldOperand(rbx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ movp(optimized_code_entry, FieldOperand(rbx, WeakCell::kValueOffset));
__ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
// Get the bytecode array from the function object (or from the DebugInfo if // Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister. // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
...@@ -857,6 +900,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -857,6 +900,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movp(FieldOperand(rdi, JSFunction::kCodeEntryOffset), rcx); __ movp(FieldOperand(rdi, JSFunction::kCodeEntryOffset), rcx);
__ RecordWriteCodeEntryField(rdi, rcx, r15); __ RecordWriteCodeEntryField(rdi, rcx, r15);
__ jmp(rcx); __ jmp(rcx);
// If there is optimized code on the type feedback vector, check if it is good
// to run, and if so, self heal the closure and call the optimized code.
__ bind(&switch_to_optimized_code);
__ leave();
Label gotta_call_runtime;
// Check if the optimized code is marked for deopt.
__ testl(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ j(not_zero, &gotta_call_runtime);
// Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code.
ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, rdi, r14,
r15, rbx);
__ jmp(optimized_code_entry);
// Optimized code is marked for deopt, bailout to the CompileLazy runtime
// function which will clear the feedback vector's optimized code slot.
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
} }
static void Generate_StackOverflowCheck( static void Generate_StackOverflowCheck(
...@@ -1160,29 +1225,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1160,29 +1225,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ j(not_zero, &gotta_call_runtime); __ j(not_zero, &gotta_call_runtime);
// Code is good, get it into the closure and tail call. // Code is good, get it into the closure and tail call.
__ leap(entry, FieldOperand(entry, Code::kHeaderSize)); ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r14, r15, rbx);
__ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, r15);
// Load native context into r14.
Register native_context = r14;
__ movp(native_context, NativeContextOperand());
// Link the closure into the optimized function list.
__ movp(rbx,
ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ movp(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), rbx);
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, rbx, r15,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ movp(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
closure);
// Save closure before the write barrier.
__ movp(rbx, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure, r15,
kDontSaveFPRegs);
__ movp(closure, rbx);
__ jmp(entry); __ jmp(entry);
// We found no optimized code. // We found no optimized code.
......
...@@ -68,6 +68,17 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) { ...@@ -68,6 +68,17 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
return function->code(); return function->code();
} }
RUNTIME_FUNCTION(Runtime_EvictOptimizedCodeSlot) {
SealHandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
DCHECK(function->is_compiled());
function->feedback_vector()->EvictOptimizedCodeMarkedForDeoptimization(
function->shared(), "Runtime_EvictOptimizedCodeSlot");
return function->code();
}
RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) { RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
HandleScope scope(isolate); HandleScope scope(isolate);
DCHECK_EQ(args.length(), 4); DCHECK_EQ(args.length(), 4);
......
...@@ -130,6 +130,7 @@ namespace internal { ...@@ -130,6 +130,7 @@ namespace internal {
F(CompileLazy, 1, 1) \ F(CompileLazy, 1, 1) \
F(CompileOptimized_Concurrent, 1, 1) \ F(CompileOptimized_Concurrent, 1, 1) \
F(CompileOptimized_NotConcurrent, 1, 1) \ F(CompileOptimized_NotConcurrent, 1, 1) \
F(EvictOptimizedCodeSlot, 1, 1) \
F(NotifyStubFailure, 0, 1) \ F(NotifyStubFailure, 0, 1) \
F(NotifyDeoptimized, 1, 1) \ F(NotifyDeoptimized, 1, 1) \
F(CompileForOnStackReplacement, 1, 1) \ F(CompileForOnStackReplacement, 1, 1) \
......
...@@ -8,51 +8,61 @@ function Data() { ...@@ -8,51 +8,61 @@ function Data() {
} }
Data.prototype = { x: 1 }; Data.prototype = { x: 1 };
function CreateClosure() { function TriggerDeopt() {
return function() { return new Data() } Data.prototype = { x: 2 };
} }
// Create some function closures which don't have function TestDontSelfHealWithDeoptedCode(run_unoptimized, ClosureFactory) {
// optimized code. // Create some function closures which don't have
var compile_lazy_closure = CreateClosure(); // optimized code.
var baseline_closure = CreateClosure(); var unoptimized_closure = ClosureFactory();
baseline_closure(); if (run_unoptimized) {
unoptimized_closure();
// Run and optimize the code (do this in a seperate function }
// so that the closure doesn't leak in a dead register).
(() => { // Run and optimize the code (do this in a seperate function
var optimized_closure = CreateClosure(); // so that the closure doesn't leak in a dead register).
// Use .call to avoid the CallIC retaining the JSFunction in the (() => {
// feedback vector via a weak map, which would mean it wouldn't be var optimized_closure = ClosureFactory();
// collected in the minor gc below. // Use .call to avoid the CallIC retaining the JSFunction in the
optimized_closure.call(undefined); // feedback vector via a weak map, which would mean it wouldn't be
%OptimizeFunctionOnNextCall(optimized_closure); // collected in the minor gc below.
optimized_closure.call(undefined); optimized_closure.call(undefined);
})(); %OptimizeFunctionOnNextCall(optimized_closure);
optimized_closure.call(undefined);
// Optimize a dummy function, just so it gets linked into the })();
// Contexts optimized_functions list head, which is in the old
// space, and the link from to the optimized_closure's JSFunction // Optimize a dummy function, just so it gets linked into the
// moves to the inline link in dummy's JSFunction in the new space, // Contexts optimized_functions list head, which is in the old
// otherwise optimized_closure's JSFunction will be retained by the // space, and the link from to the optimized_closure's JSFunction
// old->new remember set. // moves to the inline link in dummy's JSFunction in the new space,
(() => { // otherwise optimized_closure's JSFunction will be retained by the
var dummy = function() { return 1; }; // old->new remember set.
%OptimizeFunctionOnNextCall(dummy); (() => {
dummy(); var dummy = function() { return 1; };
})(); %OptimizeFunctionOnNextCall(dummy);
dummy();
// GC the optimized closure with a minor GC - the optimized })();
// code will remain in the feedback vector.
gc(true); // GC the optimized closure with a minor GC - the optimized
// code will remain in the feedback vector.
// Trigger deoptimization by changing the prototype of Data. This gc(true);
// will mark the code for deopt, but since no live JSFunction has
// optimized code, we won't clear the feedback vector. // Trigger deoptimization by changing the prototype of Data. This
Data.prototype = { x: 2 }; // will mark the code for deopt, but since no live JSFunction has
// optimized code, we won't clear the feedback vector.
// Call pre-existing functions, these will try to self-heal with the TriggerDeopt();
// optimized code in the feedback vector op, but should bail-out
// since the code is marked for deoptimization. // Call pre-existing functions, these will try to self-heal with the
compile_lazy_closure(); // optimized code in the feedback vector op, but should bail-out
baseline_closure(); // since the code is marked for deoptimization.
unoptimized_closure();
}
// Run with the unoptimized closure both uncomplied and compiled for the
// interpreter initially, to test self healing on both CompileLazy and
// the InterpreterEntryTrampoline respectively.
TestDontSelfHealWithDeoptedCode(false,
() => { return () => { return new Data() }});
TestDontSelfHealWithDeoptedCode(true,
() => { return () => { return new Data() }});
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment