Commit 4237fc37 authored by Mythri's avatar Mythri Committed by Commit Bot

Preparation for feedback-free V8: Use feedback vector only when valid

This cl updates:
1. Adds a new feedback cell map to specify that no feedback is
collected
2. Checks if feedback vectors are valid before using then when
creating closures
3. Runtime profiler to only tier up functions with feedback
4. Interpreter entry trampoline to check for feedback vector before
using it.

Bug: v8:8394
Change-Id: I0248c8cd35d841c2744b22f4c672fa2e82033f6e
Reviewed-on: https://chromium-review.googlesource.com/c/1339866
Commit-Queue: Mythri Alle <mythria@chromium.org>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57648}
parent 0d2dcb0c
......@@ -690,6 +690,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Operand(Smi::FromEnum(OptimizationMarker::kNone)));
__ b(eq, &fallthrough);
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
......@@ -833,13 +836,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
__ CompareRoot(feedback_vector, RootIndex::kUndefinedValue);
__ b(eq, &push_stack_frame);
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
// Increment invocation count for the function.
__ ldr(r9, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
__ add(r9, r9, Operand(1));
__ str(r9, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
__ bind(&push_stack_frame);
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
......@@ -850,13 +868,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r4);
// Increment invocation count for the function.
__ ldr(r9, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
__ add(r9, r9, Operand(1));
__ str(r9, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ SmiTst(kInterpreterBytecodeArrayRegister);
......
......@@ -786,6 +786,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Operand(Smi::FromEnum(OptimizationMarker::kNone)), eq,
&fallthrough);
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
......@@ -927,13 +930,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
__ CompareRoot(feedback_vector, RootIndex::kUndefinedValue);
__ B(eq, &push_stack_frame);
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
// Increment invocation count for the function.
// MaybeTailCallOptimizedCodeSlot preserves feedback_vector, so safe to reuse
__ Ldr(w10, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
__ Add(w10, w10, Operand(1));
__ Str(w10, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
__ Bind(&push_stack_frame);
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, closure);
__ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
......@@ -952,13 +971,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
InterpreterData::kBytecodeArrayOffset));
__ Bind(&has_bytecode_array);
// Increment invocation count for the function.
__ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(x11, FieldMemOperand(x11, Cell::kValueOffset));
__ Ldr(w10, FieldMemOperand(x11, FeedbackVector::kInvocationCountOffset));
__ Add(w10, w10, Operand(1));
__ Str(w10, FieldMemOperand(x11, FeedbackVector::kInvocationCountOffset));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ AssertNotSmi(
......
......@@ -71,6 +71,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
Node* const feedback_cell_map = LoadMap(feedback_cell);
Label no_closures(this), one_closure(this), cell_done(this);
GotoIf(IsNoFeedbackCellMap(feedback_cell_map), &cell_done);
GotoIf(IsNoClosuresCellMap(feedback_cell_map), &no_closures);
GotoIf(IsOneClosureCellMap(feedback_cell_map), &one_closure);
CSA_ASSERT(this, IsManyClosuresCellMap(feedback_cell_map),
......
......@@ -647,17 +647,15 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// -- eax : argument count (preserved for callee if needed, and caller)
// -- edx : new target (preserved for callee if needed, and caller)
// -- edi : target function (preserved for callee if needed, and caller)
// -- ecx : feedback vector (also used as scratch, value is not preserved)
// -----------------------------------
DCHECK(!AreAliased(eax, edx, edi, scratch));
Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = edi;
// Load the feedback vector from the closure.
// Scratch contains feedback_vector.
Register feedback_vector = scratch;
__ mov(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
// Load the optimized code from the feedback vector and re-use the register.
Register optimized_code_entry = scratch;
......@@ -677,6 +675,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Immediate(Smi::FromEnum(OptimizationMarker::kNone)));
__ j(equal, &fallthrough);
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
......@@ -823,12 +824,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register closure = edi;
Register feedback_vector = ecx;
Label push_stack_frame;
// Load feedback vector and check if it is valid. If valid, check for
// optimized code and update invocation count. Otherwise, setup the stack
// frame.
__ mov(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, RootIndex::kUndefinedValue, &push_stack_frame);
// Read off the optimized code slot in the closure's feedback vector, and if
// there is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, ecx);
// Load the feedback vector and increment the invocation count.
Register feedback_vector = ecx;
__ mov(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
......@@ -837,6 +847,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set
// up the frame (that is done below).
__ bind(&push_stack_frame);
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
......
......@@ -723,6 +723,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Smi::FromEnum(OptimizationMarker::kNone));
__ j(equal, &fallthrough);
// TODO(v8:8394): The logging of first execution will break if
// feedback vectors are not allocated. We need to find a different way of
// logging these events if required.
TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
OptimizationMarker::kLogFirstExecution,
Runtime::kFunctionFirstExecution);
......@@ -863,13 +866,24 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movp(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
__ JumpIfRoot(feedback_vector, RootIndex::kUndefinedValue, &push_stack_frame);
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
// Increment invocation count for the function.
__ incl(
FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
__ bind(&push_stack_frame);
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ pushq(rbp); // Caller's frame pointer.
__ movp(rbp, rsp);
......@@ -884,10 +898,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
kScratchRegister);
// Increment invocation count for the function.
__ incl(
FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
......
......@@ -71,6 +71,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(MutableHeapNumberMap, mutable_heap_number_map, MutableHeapNumberMap) \
V(NanValue, nan_value, Nan) \
V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
V(NoFeedbackCellMap, no_feedback_cell_map, NoFeedbackCellMap) \
V(NullValue, null_value, Null) \
V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
V(PreParsedScopeDataMap, pre_parsed_scope_data_map, PreParsedScopeDataMap) \
......
......@@ -2019,7 +2019,9 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
if (shared->is_compiled() && !shared->HasAsmWasmData()) {
JSFunction::EnsureFeedbackVector(function);
Code code = function->feedback_vector()->optimized_code();
Code code = function->has_feedback_vector()
? function->feedback_vector()->optimized_code()
: Code();
if (!code.is_null()) {
// Caching of optimized code enabled and optimized code found.
DCHECK(!code->marked_for_deoptimization());
......
......@@ -1802,6 +1802,17 @@ Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) {
return cell;
}
Handle<FeedbackCell> Factory::NewNoFeedbackCell() {
AllowDeferredHandleDereference convert_to_cell;
HeapObject* result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
*no_feedback_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
// Set the value to undefined. We wouldn't allocate feedback vectors with
// NoFeedbackCell map type.
cell->set_value(*undefined_value());
return cell;
}
Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
PretenureFlag pretenure) {
DCHECK(name->IsUniqueName());
......@@ -2501,7 +2512,8 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
} else if (feedback_cell->map() == *one_closure_cell_map()) {
feedback_cell->set_map(*many_closures_cell_map());
} else {
DCHECK_EQ(feedback_cell->map(), *many_closures_cell_map());
DCHECK(feedback_cell->map() == *no_feedback_cell_map() ||
feedback_cell->map() == *many_closures_cell_map());
}
// Check that the optimized code in the feedback cell wasn't marked for
......
......@@ -481,6 +481,7 @@ class V8_EXPORT_PRIVATE Factory {
Handle<FeedbackCell> NewNoClosuresCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewOneClosureCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewManyClosuresCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewNoFeedbackCell();
Handle<TransitionArray> NewTransitionArray(int number_of_transitions,
int slack = 0);
......
......@@ -444,6 +444,8 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, one_closure_cell)
roots.one_closure_cell_map()->mark_unstable();
ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, many_closures_cell)
ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, no_feedback_cell)
roots.no_feedback_cell_map()->mark_unstable();
ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
......@@ -769,6 +771,10 @@ void Heap::CreateInitialObjects() {
factory->NewManyClosuresCell(factory->undefined_value());
set_many_closures_cell(*many_closures_cell);
// Allocate FeedbackCell for cases where we don't collect feedback.
Handle<FeedbackCell> no_feedback_cell = factory->NewNoFeedbackCell();
set_no_feedback_cell(*no_feedback_cell);
set_default_microtask_queue(*factory->NewMicrotaskQueue());
{
......
......@@ -2559,18 +2559,30 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
Node* flags = BytecodeOperandFlag(2);
Node* context = GetContext();
Node* slot = BytecodeOperandIdx(1);
Node* feedback_vector = LoadFeedbackVector();
TNode<Object> feedback_cell =
CAST(LoadFeedbackVectorSlot(feedback_vector, slot));
Label if_undefined(this), load_feedback_done(this);
Variable feedback_cell(this, MachineRepresentation::kTagged);
Node* feedback_vector = LoadFeedbackVectorUnchecked();
GotoIf(IsUndefined(feedback_vector), &if_undefined);
feedback_cell.Bind(LoadFeedbackVectorSlot(feedback_vector, slot));
Goto(&load_feedback_done);
BIND(&if_undefined);
{
feedback_cell.Bind(LoadRoot(RootIndex::kNoFeedbackCell));
Goto(&load_feedback_done);
}
BIND(&load_feedback_done);
Label if_fast(this), if_slow(this, Label::kDeferred);
Branch(IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags), &if_fast,
&if_slow);
BIND(&if_fast);
{
Node* result =
CallBuiltin(Builtins::kFastNewClosure, context, shared, feedback_cell);
Node* result = CallBuiltin(Builtins::kFastNewClosure, context, shared,
feedback_cell.value());
SetAccumulator(result);
Dispatch();
}
......@@ -2583,8 +2595,8 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
BIND(&if_newspace);
{
Node* result =
CallRuntime(Runtime::kNewClosure, context, shared, feedback_cell);
Node* result = CallRuntime(Runtime::kNewClosure, context, shared,
feedback_cell.value());
SetAccumulator(result);
Dispatch();
}
......@@ -2592,7 +2604,7 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
BIND(&if_oldspace);
{
Node* result = CallRuntime(Runtime::kNewClosure_Tenured, context, shared,
feedback_cell);
feedback_cell.value());
SetAccumulator(result);
Dispatch();
}
......
......@@ -3536,6 +3536,8 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
ReadOnlyRoots roots = GetReadOnlyRoots();
os << "<FeedbackCell[";
if (map() == roots.no_closures_cell_map()) {
os << "no feedback";
} else if (map() == roots.no_closures_cell_map()) {
os << "no closures";
} else if (map() == roots.one_closure_cell_map()) {
os << "one closure";
......
......@@ -95,6 +95,7 @@ class RootVisitor;
V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
V(Map, name_dictionary_map, NameDictionaryMap) \
V(Map, no_closures_cell_map, NoClosuresCellMap) \
V(Map, no_feedback_cell_map, NoFeedbackCellMap) \
V(Map, number_dictionary_map, NumberDictionaryMap) \
V(Map, one_closure_cell_map, OneClosureCellMap) \
V(Map, ordered_hash_map_map, OrderedHashMapMap) \
......@@ -232,6 +233,7 @@ class RootVisitor;
/* Canonical empty values */ \
V(Script*, empty_script, EmptyScript) \
V(FeedbackCell*, many_closures_cell, ManyClosuresCell) \
V(FeedbackCell*, no_feedback_cell, NoFeedbackCell) \
V(Cell*, invalid_prototype_validity_cell, InvalidPrototypeValidityCell) \
/* Protectors */ \
V(Cell*, array_constructor_protector, ArrayConstructorProtector) \
......
......@@ -241,6 +241,8 @@ void RuntimeProfiler::MarkCandidatesForOptimization() {
DCHECK(function->shared()->is_compiled());
if (!function->shared()->IsInterpreted()) continue;
if (!function->has_feedback_vector()) continue;
MaybeOptimize(function, InterpretedFrame::cast(frame));
// TODO(leszeks): Move this increment to before the maybe optimize checks,
......
......@@ -154,16 +154,23 @@ Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
Handle<Object> value;
if (is_function) {
DCHECK(possibly_feedback_cell_slot->IsSmi());
// If feedback vector was not allocated for this function, then we don't
// have any information about number of closures. Use NoFeedbackCell to
// indicate that.
Handle<FeedbackCell> feedback_cell =
isolate->factory()->no_feedback_cell();
if (!feedback_vector.is_null()) {
DCHECK(possibly_feedback_cell_slot->IsSmi());
FeedbackSlot feedback_cells_slot(
Smi::ToInt(*possibly_feedback_cell_slot));
feedback_cell = Handle<FeedbackCell>(
FeedbackCell::cast(feedback_vector->Get(feedback_cells_slot)
->GetHeapObjectAssumeStrong()),
isolate);
}
// Copy the function and update its context. Use it as value.
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>::cast(initial_value);
FeedbackSlot feedback_cells_slot(
Smi::ToInt(*possibly_feedback_cell_slot));
Handle<FeedbackCell> feedback_cell(
FeedbackCell::cast(feedback_vector->Get(feedback_cells_slot)
->GetHeapObjectAssumeStrong()),
isolate);
Handle<JSFunction> function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared, context, feedback_cell, TENURED);
......@@ -202,7 +209,11 @@ RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
CONVERT_SMI_ARG_CHECKED(flags, 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 2);
Handle<FeedbackVector> feedback_vector(closure->feedback_vector(), isolate);
Handle<FeedbackVector> feedback_vector = Handle<FeedbackVector>();
if (closure->has_feedback_vector()) {
feedback_vector =
Handle<FeedbackVector>(closure->feedback_vector(), isolate);
}
return DeclareGlobals(isolate, declarations, flags, feedback_vector);
}
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment