Commit 753ad25e authored by mbrandy's avatar mbrandy Committed by Commit bot

PPC: Type Feedback Vector lives in the closure

Port bb31db3a

Original commit message:
    (RELAND: the problem before was a missing write barrier for adding the code
    entry to the new closure. It's been addressed with a new macro instruction
    and test. The only change to this CL is the addition of two calls to
    __ RecordWriteCodeEntryField() in the platform CompileLazy builtin.)

    We get less "pollution" of type feedback if we have one vector per native
    context, rather than one for the whole system. This CL moves the vector
    appropriately.

    We rely more heavily on the Optimized Code Map in the SharedFunctionInfo. The
    vector actually lives in the first slot of the literals array (indeed there is
    great commonality between those arrays, they can be thought of as the same
    thing). So we make greater effort to ensure there is a valid literals array
    after compilation.

    This meant, for performance reasons, that we needed to extend
    FastNewClosureStub to support creating closures with literals. And ultimately,
    it drove us to move the optimized code map lookup out of FastNewClosureStub
    and into the compile lazy builtin.

    The heap change is trivial so I TBR Hannes for it...
    Also, Yang has had a look at the debugger changes already and approved 'em. So he is TBR style too.
    And Benedikt reviewed it as well.

R=mvstanton@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=

Review URL: https://codereview.chromium.org/1671553002

Cr-Commit-Position: refs/heads/master@{#33753}
parent ee10b595
......@@ -1185,7 +1185,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
// we are creating here gets a chance to have its code optimized and
// doesn't just get a copy of the existing unoptimized code.
if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
scope()->is_function_scope() && info->num_literals() == 0) {
scope()->is_function_scope()) {
FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
__ mov(r5, Operand(info));
__ CallStub(&stub);
......
......@@ -1235,6 +1235,145 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r6 : new target (preserved for callee)
// -- r4 : target function (preserved for callee)
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
Register closure = r4;
Register map = r3;
Register index = r5;
__ LoadP(map,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(map,
FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset));
__ CmpSmiLiteral(index, Smi::FromInt(2), r0);
__ blt(&gotta_call_runtime);
// Find literals.
// r4 : closure
// r6 : new target
// r3 : optimized code map
// r5 : length / index
// r10 : native context
Register native_context = r10;
__ LoadP(native_context, NativeContextMemOperand());
__ bind(&loop_top);
Register array_pointer = r8;
Register temp = r9;
// Does the native context match?
__ SmiToPtrArrayOffset(array_pointer, index);
__ add(array_pointer, map, array_pointer);
__ LoadP(temp,
FieldMemOperand(array_pointer,
SharedFunctionInfo::OffsetToPreviousContext()));
__ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ bne(&loop_bottom);
// OSR id set to none?
__ LoadP(temp,
FieldMemOperand(array_pointer,
SharedFunctionInfo::OffsetToPreviousOsrAstId()));
const int bailout_id = BailoutId::None().ToInt();
__ CmpSmiLiteral(temp, Smi::FromInt(bailout_id), r0);
__ bne(&loop_bottom);
// Literals available?
__ LoadP(temp,
FieldMemOperand(array_pointer,
SharedFunctionInfo::OffsetToPreviousLiterals()));
__ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ JumpIfSmi(temp, &gotta_call_runtime);
// Save the literals in the closure.
__ StoreP(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset), r0);
__ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r11,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Code available?
Register entry = r7;
__ LoadP(entry,
FieldMemOperand(array_pointer,
SharedFunctionInfo::OffsetToPreviousCachedCode()));
__ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &maybe_call_runtime);
// Found literals and code. Get them into the closure and return.
// Store code entry in the closure.
__ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
Label install_optimized_code_and_tailcall;
__ bind(&install_optimized_code_and_tailcall);
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, entry, r8);
// Link the closure into the optimized function list.
// r7 : code entry
// r10: native context
// r4 : closure
__ LoadP(
r8, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ StoreP(r8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset),
r0);
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r8, r3,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ StoreP(closure, ContextMemOperand(native_context,
Context::OPTIMIZED_FUNCTIONS_LIST));
// Save closure before the write barrier.
__ mr(r8, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, r8, r3,
kLRHasNotBeenSaved, kDontSaveFPRegs);
__ JumpToJSEntry(entry);
__ bind(&loop_bottom);
__ SubSmiLiteral(index, index, Smi::FromInt(SharedFunctionInfo::kEntryLength),
r0);
__ CmpSmiLiteral(index, Smi::FromInt(1), r0);
__ bgt(&loop_top);
// We found neither literals nor code.
__ b(&gotta_call_runtime);
__ bind(&maybe_call_runtime);
// Last possibility. Check the context free optimized code map entry.
__ LoadP(entry,
FieldMemOperand(map, FixedArray::kHeaderSize +
SharedFunctionInfo::kSharedCodeIndex));
__ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Store code entry in the closure.
__ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ b(&install_optimized_code_and_tailcall);
__ bind(&try_shared);
// Is the full code valid?
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ LoadP(r8, FieldMemOperand(entry, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(r8);
__ cmpi(r8, Operand(Code::BUILTIN));
__ beq(&gotta_call_runtime);
// Yes, install the full code.
__ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, entry, r8);
__ JumpToJSEntry(entry);
__ bind(&gotta_call_runtime);
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
}
......
......@@ -825,9 +825,8 @@ void MacroAssembler::Prologue(bool code_pre_aging, Register base,
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
LoadP(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
LoadP(vector,
FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
LoadP(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
LoadP(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment