Commit 25bfba93 authored by zhengxing.li's avatar zhengxing.li Committed by Commit bot

X87: Type Feedback Vector lives in the closure.

  port bb31db3a (r33741)

  original commit message:
  (RELAND: the problem before was a missing write barrier for adding the code
  entry to the new closure. It's been addressed with a new macro instruction
  and test. The only change to this CL is the addition of two calls to
  __ RecordWriteCodeEntryField() in the platform CompileLazy builtin.)

  We get less "pollution" of type feedback if we have one vector per native
  context, rather than one for the whole system. This CL moves the vector
  appropriately.

  We rely more heavily on the Optimized Code Map in the SharedFunctionInfo. The
  vector actually lives in the first slot of the literals array (indeed there is
  great commonality between those arrays, they can be thought of as the same
  thing). So we make greater effort to ensure there is a valid literals array
  after compilation.

  This meant, for performance reasons, that we needed to extend
  FastNewClosureStub to support creating closures with literals. And ultimately,
  it drove us to move the optimized code map lookup out of FastNewClosureStub
  and into the compile lazy builtin.

  The heap change is trivial so I TBR Hannes for it...
  Also, Yang has had a look at the debugger changes already and approved 'em. So he is TBR style too.
  And Benedikt reviewed it as well.

BUG=

Review URL: https://codereview.chromium.org/1672643002

Cr-Commit-Position: refs/heads/master@{#33759}
parent acab11e0
......@@ -1136,11 +1136,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
// flag, we need to use the runtime function so that the new function
// we are creating here gets a chance to have its code optimized and
// doesn't just get a copy of the existing unoptimized code.
if (!FLAG_always_opt &&
!FLAG_prepare_always_opt &&
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
scope()->is_function_scope()) {
FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
__ mov(ebx, Immediate(info));
__ CallStub(&stub);
......
......@@ -852,6 +852,146 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : new target (preserved for callee)
// -- edi : target function (preserved for callee)
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime, gotta_call_runtime_no_stack;
Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
Register closure = edi;
Register new_target = edx;
__ push(new_target);
__ push(closure);
Register map = eax;
Register index = ebx;
__ mov(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
__ cmp(index, Immediate(Smi::FromInt(2)));
__ j(less, &gotta_call_runtime);
// Find literals.
// edx : native context
// ebx : length / index
// eax : optimized code map
// stack[0] : new target
// stack[4] : closure
Register native_context = edx;
__ mov(native_context, NativeContextOperand());
__ bind(&loop_top);
Register temp = edi;
// Does the native context match?
__ mov(temp, FieldOperand(map, index, times_half_pointer_size,
SharedFunctionInfo::OffsetToPreviousContext()));
__ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ j(not_equal, &loop_bottom);
// OSR id set to none?
__ mov(temp, FieldOperand(map, index, times_half_pointer_size,
SharedFunctionInfo::OffsetToPreviousOsrAstId()));
const int bailout_id = BailoutId::None().ToInt();
__ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
__ j(not_equal, &loop_bottom);
// Literals available?
__ mov(temp, FieldOperand(map, index, times_half_pointer_size,
SharedFunctionInfo::OffsetToPreviousLiterals()));
__ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
__ JumpIfSmi(temp, &gotta_call_runtime);
// Save the literals in the closure.
__ mov(ecx, Operand(esp, 0));
__ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp);
__ push(index);
__ RecordWriteField(ecx, JSFunction::kLiteralsOffset, temp, index,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ pop(index);
// Code available?
Register entry = ecx;
__ mov(entry, FieldOperand(map, index, times_half_pointer_size,
SharedFunctionInfo::OffsetToPreviousCachedCode()));
__ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &maybe_call_runtime);
// Found literals and code. Get them into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
Label install_optimized_code_and_tailcall;
__ bind(&install_optimized_code_and_tailcall);
__ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, eax);
// Link the closure into the optimized function list.
// ecx : code entry
// edx : native context
// edi : closure
__ mov(ebx,
ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
__ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), ebx);
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, ebx, eax,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
const int function_list_offset =
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
__ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
closure);
// Save closure before the write barrier.
__ mov(ebx, closure);
__ RecordWriteContextSlot(native_context, function_list_offset, closure, eax,
kDontSaveFPRegs);
__ mov(closure, ebx);
__ pop(new_target);
__ jmp(entry);
__ bind(&loop_bottom);
__ sub(index, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
__ cmp(index, Immediate(Smi::FromInt(1)));
__ j(greater, &loop_top);
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
__ bind(&maybe_call_runtime);
__ pop(closure);
// Last possibility. Check the context free optimized code map entry.
__ mov(entry, FieldOperand(map, FixedArray::kHeaderSize +
SharedFunctionInfo::kSharedCodeIndex));
__ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Store code entry in the closure.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
__ jmp(&install_optimized_code_and_tailcall);
__ bind(&try_shared);
__ pop(new_target);
// Is the full code valid?
__ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
__ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
__ and_(ebx, Code::KindField::kMask);
__ shr(ebx, Code::KindField::kShift);
__ cmp(ebx, Immediate(Code::BUILTIN));
__ j(equal, &gotta_call_runtime_no_stack);
// Yes, install the full code.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
__ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, eax);
__ jmp(entry);
__ bind(&gotta_call_runtime);
__ pop(closure);
__ pop(new_target);
__ bind(&gotta_call_runtime_no_stack);
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
}
......
......@@ -966,8 +966,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
mov(vector, FieldOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
mov(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
mov(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment