Commit 19aa7a20 authored by bjaideep's avatar bjaideep Committed by Commit bot

PPC/s390: [TypeFeedbackVector] Root literal arrays in function literals slots

Port 93df0940

Original Commit Message:

    Literal arrays and feedback vectors for a function can be garbage
    collected if we don't have a rooted closure for the function, which
    happens often. It's expensive to come back from this (recreating
    boilerplates and gathering feedback again), and the cost is
    disproportionate if the function was inlined into optimized code.

    To guard against losing these arrays when we need them, we'll now
    create literal arrays when creating the feedback vector for the outer
    closure, and root them strongly in that vector.

R=mvstanton@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=v8:5456
LOG=N

Review-Url: https://codereview.chromium.org/2592043003
Cr-Commit-Position: refs/heads/master@{#41898}
parent 455c244c
...@@ -1369,7 +1369,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1369,7 +1369,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ CmpSmiLiteral(index, Smi::FromInt(2), r0); __ CmpSmiLiteral(index, Smi::FromInt(2), r0);
__ blt(&gotta_call_runtime); __ blt(&gotta_call_runtime);
// Find literals.
// r10 : native context // r10 : native context
// r5 : length / index // r5 : length / index
// r9 : optimized code map // r9 : optimized code map
...@@ -1390,18 +1389,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1390,18 +1389,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context); __ cmp(temp, native_context);
__ bne(&loop_bottom); __ bne(&loop_bottom);
// Literals available?
__ LoadP(temp,
FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousLiterals));
__ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ JumpIfSmi(temp, &gotta_call_runtime);
// Save the literals in the closure.
__ StoreP(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset), r0);
__ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r7,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Code available? // Code available?
Register entry = r7; Register entry = r7;
...@@ -1411,7 +1398,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1411,7 +1398,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared); __ JumpIfSmi(entry, &try_shared);
// Found literals and code. Get them into the closure and return. // Found code. Get it into the closure and return.
// Store code entry in the closure. // Store code entry in the closure.
__ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0); __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
...@@ -1445,7 +1432,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1445,7 +1432,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ CmpSmiLiteral(index, Smi::FromInt(1), r0); __ CmpSmiLiteral(index, Smi::FromInt(1), r0);
__ bgt(&loop_top); __ bgt(&loop_top);
// We found neither literals nor code. // We found no code.
__ b(&gotta_call_runtime); __ b(&gotta_call_runtime);
__ bind(&try_shared); __ bind(&try_shared);
......
...@@ -1374,7 +1374,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1374,7 +1374,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ CmpSmiLiteral(index, Smi::FromInt(2), r0); __ CmpSmiLiteral(index, Smi::FromInt(2), r0);
__ blt(&gotta_call_runtime); __ blt(&gotta_call_runtime);
// Find literals.
// r9 : native context // r9 : native context
// r4 : length / index // r4 : length / index
// r8 : optimized code map // r8 : optimized code map
...@@ -1395,18 +1394,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1395,18 +1394,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ CmpP(temp, native_context); __ CmpP(temp, native_context);
__ bne(&loop_bottom, Label::kNear); __ bne(&loop_bottom, Label::kNear);
// Literals available?
__ LoadP(temp,
FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousLiterals));
__ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ JumpIfSmi(temp, &gotta_call_runtime);
// Save the literals in the closure.
__ StoreP(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset), r0);
__ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r6,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Code available? // Code available?
Register entry = r6; Register entry = r6;
...@@ -1416,7 +1403,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1416,7 +1403,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared); __ JumpIfSmi(entry, &try_shared);
// Found literals and code. Get them into the closure and return. // Found code. Get it into the closure and return.
// Store code entry in the closure. // Store code entry in the closure.
__ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0); __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
...@@ -1450,7 +1437,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) { ...@@ -1450,7 +1437,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ CmpSmiLiteral(index, Smi::FromInt(1), r0); __ CmpSmiLiteral(index, Smi::FromInt(1), r0);
__ bgt(&loop_top); __ bgt(&loop_top);
// We found neither literals nor code. // We found no code.
__ b(&gotta_call_runtime); __ b(&gotta_call_runtime);
__ bind(&try_shared); __ bind(&try_shared);
......
...@@ -63,7 +63,7 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; } ...@@ -63,7 +63,7 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
void FastNewClosureDescriptor::InitializePlatformSpecific( void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = {r5}; Register registers[] = {r4, r5, r6};
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
......
...@@ -61,7 +61,7 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; } ...@@ -61,7 +61,7 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
void FastNewClosureDescriptor::InitializePlatformSpecific( void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = {r4}; Register registers[] = {r3, r4, r5};
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment