Commit c33b25bf authored by bjaideep's avatar bjaideep Committed by Commit bot

Revert of PPC/s390: Reland: [TypeFeedbackVector] Store optimized code in the...

Revert of PPC/s390: Reland: [TypeFeedbackVector] Store optimized code in the vector (patchset #1 id:1 of https://codereview.chromium.org/2861863003/ )

Reason for revert:
Original CL reverted.
Crashing on Canary
BUG=chromium:718891

Original issue's description:
> PPC/s390: Reland: [TypeFeedbackVector] Store optimized code in the vector
>
> Port 662aa425
>
> Original Commit Message:
>
>     Since the feedback vector is itself a native context structure, why
>     not store optimized code for a function in there rather than in
>     a map from native context to code? This allows us to get rid of
>     the optimized code map in the SharedFunctionInfo, saving a pointer,
>     and making lookup of any optimized code quicker.
>
>     Original patch by Michael Stanton <mvstanton@chromium.org>
>
> R=rmcilroy@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com
> BUG=v8:6246
> LOG=N
>
> Review-Url: https://codereview.chromium.org/2861863003
> Cr-Commit-Position: refs/heads/master@{#45111}
> Committed: https://chromium.googlesource.com/v8/v8/+/d587812258c232f7b9a1f1a9a017ba3f9cea12ea

TBR=joransiu@ca.ibm.com,jyan@ca.ibm.com,michael_dawson@ca.ibm.com,rmcilroy@chromium.org
# Not skipping CQ checks because original CL landed more than 1 days ago.
BUG=v8:6246

Review-Url: https://codereview.chromium.org/2870703003
Cr-Commit-Position: refs/heads/master@{#45195}
parent 6867bd6c
...@@ -1342,8 +1342,10 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1342,8 +1342,10 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// First lookup code, maybe we don't need to compile! // First lookup code, maybe we don't need to compile!
Label gotta_call_runtime; Label gotta_call_runtime;
Label try_shared; Label try_shared;
Label loop_top, loop_bottom;
Register closure = r4; Register closure = r4;
Register map = r9;
Register index = r5; Register index = r5;
// Do we have a valid feedback vector? // Do we have a valid feedback vector?
...@@ -1351,29 +1353,58 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1351,29 +1353,58 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ LoadP(index, FieldMemOperand(index, Cell::kValueOffset)); __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset));
__ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime); __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
// Is optimized code available in the feedback vector? __ LoadP(map,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(map,
FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset));
__ CmpSmiLiteral(index, Smi::FromInt(2), r0);
__ blt(&try_shared);
// r10 : native context
// r5 : length / index
// r9 : optimized code map
// r6 : new target
// r4 : closure
Register native_context = r10;
__ LoadP(native_context, NativeContextMemOperand());
__ bind(&loop_top);
Register temp = r11;
Register array_pointer = r8;
// Does the native context match?
__ SmiToPtrArrayOffset(array_pointer, index);
__ add(array_pointer, map, array_pointer);
__ LoadP(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousContext));
__ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ bne(&loop_bottom);
// Code available?
Register entry = r7; Register entry = r7;
__ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex * __ LoadP(entry,
kPointerSize + FieldMemOperand(array_pointer,
FeedbackVector::kHeaderSize)); SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared); __ JumpIfSmi(entry, &try_shared);
// Found code. Get it into the closure and return.
// Store code entry in the closure. // Store code entry in the closure.
__ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0); __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, entry, r8); __ RecordWriteCodeEntryField(closure, entry, r8);
// Load native context into r9.
Register native_context = r9;
__ LoadP(native_context, NativeContextMemOperand());
// Link the closure into the optimized function list. // Link the closure into the optimized function list.
// r7 : code entry
// r10: native context
// r4 : closure
__ LoadP( __ LoadP(
r8, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); r8, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ StoreP(r8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), __ StoreP(r8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset),
r0); r0);
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r8, r5, __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r8, temp,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
const int function_list_offset = const int function_list_offset =
...@@ -1383,11 +1414,19 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1383,11 +1414,19 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0); ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
// Save closure before the write barrier. // Save closure before the write barrier.
__ mr(r8, closure); __ mr(r8, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, r8, r5, __ RecordWriteContextSlot(native_context, function_list_offset, r8, temp,
kLRHasNotBeenSaved, kDontSaveFPRegs); kLRHasNotBeenSaved, kDontSaveFPRegs);
__ JumpToJSEntry(entry); __ JumpToJSEntry(entry);
// We found no optimized code. __ bind(&loop_bottom);
__ SubSmiLiteral(index, index, Smi::FromInt(SharedFunctionInfo::kEntryLength),
r0);
__ CmpSmiLiteral(index, Smi::FromInt(1), r0);
__ bgt(&loop_top);
// We found no code.
__ b(&gotta_call_runtime);
__ bind(&try_shared); __ bind(&try_shared);
__ LoadP(entry, __ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
......
...@@ -1345,8 +1345,10 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1345,8 +1345,10 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// First lookup code, maybe we don't need to compile! // First lookup code, maybe we don't need to compile!
Label gotta_call_runtime; Label gotta_call_runtime;
Label try_shared; Label try_shared;
Label loop_top, loop_bottom;
Register closure = r3; Register closure = r3;
Register map = r8;
Register index = r4; Register index = r4;
// Do we have a valid feedback vector? // Do we have a valid feedback vector?
...@@ -1354,29 +1356,59 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1354,29 +1356,59 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ LoadP(index, FieldMemOperand(index, Cell::kValueOffset)); __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset));
__ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime); __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
// Is optimized code available in the feedback vector? __ LoadP(map,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(map,
FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset));
__ CmpSmiLiteral(index, Smi::FromInt(2), r0);
__ blt(&try_shared);
// Find literals.
// r9 : native context
// r4 : length / index
// r8 : optimized code map
// r5 : new target
// r3 : closure
Register native_context = r9;
__ LoadP(native_context, NativeContextMemOperand());
__ bind(&loop_top);
Register temp = r1;
Register array_pointer = r7;
// Does the native context match?
__ SmiToPtrArrayOffset(array_pointer, index);
__ AddP(array_pointer, map, array_pointer);
__ LoadP(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousContext));
__ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ CmpP(temp, native_context);
__ bne(&loop_bottom, Label::kNear);
// Code available?
Register entry = r6; Register entry = r6;
__ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex * __ LoadP(entry,
kPointerSize + FieldMemOperand(array_pointer,
FeedbackVector::kHeaderSize)); SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared); __ JumpIfSmi(entry, &try_shared);
// Found code. Get it into the closure and return.
// Store code entry in the closure. // Store code entry in the closure.
__ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0); __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, entry, r7); __ RecordWriteCodeEntryField(closure, entry, r7);
// Load native context into r8.
Register native_context = r8;
__ LoadP(native_context, NativeContextMemOperand());
// Link the closure into the optimized function list. // Link the closure into the optimized function list.
// r6 : code entry
// r9: native context
// r3 : closure
__ LoadP( __ LoadP(
r7, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); r7, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ StoreP(r7, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), __ StoreP(r7, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset),
r0); r0);
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r7, r4, __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r7, temp,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
const int function_list_offset = const int function_list_offset =
...@@ -1386,18 +1418,26 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1386,18 +1418,26 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0); ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
// Save closure before the write barrier. // Save closure before the write barrier.
__ LoadRR(r7, closure); __ LoadRR(r7, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, r7, r4, __ RecordWriteContextSlot(native_context, function_list_offset, r7, temp,
kLRHasNotBeenSaved, kDontSaveFPRegs); kLRHasNotBeenSaved, kDontSaveFPRegs);
__ JumpToJSEntry(entry); __ JumpToJSEntry(entry);
// We found no optimized code. __ bind(&loop_bottom);
__ SubSmiLiteral(index, index, Smi::FromInt(SharedFunctionInfo::kEntryLength),
r0);
__ CmpSmiLiteral(index, Smi::FromInt(1), r0);
__ bgt(&loop_top);
// We found no code.
__ b(&gotta_call_runtime);
__ bind(&try_shared); __ bind(&try_shared);
__ LoadP(entry, __ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up? // Is the shared function marked for tier up?
__ LoadlB(r7, FieldMemOperand( __ LoadlB(temp, FieldMemOperand(
entry, SharedFunctionInfo::kMarkedForTierUpByteOffset)); entry, SharedFunctionInfo::kMarkedForTierUpByteOffset));
__ TestBit(r7, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0); __ TestBit(temp, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
__ bne(&gotta_call_runtime); __ bne(&gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that. // If SFI points to anything other than CompileLazy, install that.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment