Commit 804a612c authored by Mythri A's avatar Mythri A Committed by Commit Bot

[turboprop] Add tiering up support for TurboProp

This cl adds support for tiering up in TurboProp. This cl makes
necessary changes to support tier up but doesn't tier up yet. More
specifically this cl:
1. Introduces a new flag for interrupt_budget_for_midtier and
updates code to use the correct interrupt_budget.
2. Introduces a flag turboprop_as_midtier and necessary support
to tier up. When this flag is enabled, we introduce checks for tierup
and updating interrupt budget.


Bug: v8:9684
Change-Id: I58785ce4b9de46488a22d3b4d0cebedac460a773
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2460822
Commit-Queue: Mythri Alle <mythria@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70915}
parent 808a5d76
...@@ -35,7 +35,7 @@ class BytecodeGraphBuilder { ...@@ -35,7 +35,7 @@ class BytecodeGraphBuilder {
BytecodeGraphBuilder(JSHeapBroker* broker, Zone* local_zone, BytecodeGraphBuilder(JSHeapBroker* broker, Zone* local_zone,
NativeContextRef const& native_context, NativeContextRef const& native_context,
SharedFunctionInfoRef const& shared_info, SharedFunctionInfoRef const& shared_info,
FeedbackVectorRef const& feedback_vector, FeedbackCellRef const& feedback_cell,
BailoutId osr_offset, JSGraph* jsgraph, BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency, CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions, int inlining_id, SourcePositionTable* source_positions, int inlining_id,
...@@ -67,6 +67,7 @@ class BytecodeGraphBuilder { ...@@ -67,6 +67,7 @@ class BytecodeGraphBuilder {
bool native_context_independent() const { bool native_context_independent() const {
return CodeKindIsNativeContextIndependentJSFunction(code_kind_); return CodeKindIsNativeContextIndependentJSFunction(code_kind_);
} }
bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; }
bool generate_full_feedback_collection() const { bool generate_full_feedback_collection() const {
// NCI code currently collects full feedback. // NCI code currently collects full feedback.
DCHECK_IMPLIES(native_context_independent(), DCHECK_IMPLIES(native_context_independent(),
...@@ -117,8 +118,7 @@ class BytecodeGraphBuilder { ...@@ -117,8 +118,7 @@ class BytecodeGraphBuilder {
// Checks the optimization marker and potentially triggers compilation or // Checks the optimization marker and potentially triggers compilation or
// installs the finished code object. // installs the finished code object.
// Only relevant for specific code kinds (see // Only relevant for specific code kinds (see CodeKindCanTierUp).
// CodeKindChecksOptimizationMarker).
void MaybeBuildTierUpCheck(); void MaybeBuildTierUpCheck();
// Like bytecode, NCI code must collect call feedback to preserve proper // Like bytecode, NCI code must collect call feedback to preserve proper
...@@ -422,6 +422,7 @@ class BytecodeGraphBuilder { ...@@ -422,6 +422,7 @@ class BytecodeGraphBuilder {
// The native context for which we optimize. // The native context for which we optimize.
NativeContextRef const native_context_; NativeContextRef const native_context_;
SharedFunctionInfoRef const shared_info_; SharedFunctionInfoRef const shared_info_;
FeedbackCellRef const feedback_cell_;
FeedbackVectorRef const feedback_vector_; FeedbackVectorRef const feedback_vector_;
CallFrequency const invocation_frequency_; CallFrequency const invocation_frequency_;
JSTypeHintLowering const type_hint_lowering_; JSTypeHintLowering const type_hint_lowering_;
...@@ -987,7 +988,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( ...@@ -987,7 +988,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
JSHeapBroker* broker, Zone* local_zone, JSHeapBroker* broker, Zone* local_zone,
NativeContextRef const& native_context, NativeContextRef const& native_context,
SharedFunctionInfoRef const& shared_info, SharedFunctionInfoRef const& shared_info,
FeedbackVectorRef const& feedback_vector, BailoutId osr_offset, FeedbackCellRef const& feedback_cell, BailoutId osr_offset,
JSGraph* jsgraph, CallFrequency const& invocation_frequency, JSGraph* jsgraph, CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions, int inlining_id, CodeKind code_kind, SourcePositionTable* source_positions, int inlining_id, CodeKind code_kind,
BytecodeGraphBuilderFlags flags, TickCounter* tick_counter) BytecodeGraphBuilderFlags flags, TickCounter* tick_counter)
...@@ -996,10 +997,11 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( ...@@ -996,10 +997,11 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
jsgraph_(jsgraph), jsgraph_(jsgraph),
native_context_(native_context), native_context_(native_context),
shared_info_(shared_info), shared_info_(shared_info),
feedback_vector_(feedback_vector), feedback_cell_(feedback_cell),
feedback_vector_(feedback_cell.value().AsFeedbackVector()),
invocation_frequency_(invocation_frequency), invocation_frequency_(invocation_frequency),
type_hint_lowering_( type_hint_lowering_(
broker, jsgraph, feedback_vector, broker, jsgraph, feedback_vector_,
(flags & BytecodeGraphBuilderFlag::kBailoutOnUninitialized) (flags & BytecodeGraphBuilderFlag::kBailoutOnUninitialized)
? JSTypeHintLowering::kBailoutOnUninitialized ? JSTypeHintLowering::kBailoutOnUninitialized
: JSTypeHintLowering::kNoFlags), : JSTypeHintLowering::kNoFlags),
...@@ -1053,6 +1055,8 @@ void BytecodeGraphBuilder::CreateFeedbackCellNode() { ...@@ -1053,6 +1055,8 @@ void BytecodeGraphBuilder::CreateFeedbackCellNode() {
DCHECK_NULL(feedback_cell_node_); DCHECK_NULL(feedback_cell_node_);
if (native_context_independent()) { if (native_context_independent()) {
feedback_cell_node_ = BuildLoadFeedbackCell(); feedback_cell_node_ = BuildLoadFeedbackCell();
} else if (is_turboprop()) {
feedback_cell_node_ = jsgraph()->Constant(feedback_cell_);
} }
} }
...@@ -1116,7 +1120,10 @@ Node* BytecodeGraphBuilder::BuildLoadNativeContext() { ...@@ -1116,7 +1120,10 @@ Node* BytecodeGraphBuilder::BuildLoadNativeContext() {
} }
void BytecodeGraphBuilder::MaybeBuildTierUpCheck() { void BytecodeGraphBuilder::MaybeBuildTierUpCheck() {
if (!CodeKindChecksOptimizationMarker(code_kind())) return; // For OSR we don't tier up, so we don't need to build this check. Also
// tiering up currently tail calls to IET which tail calls aren't supported
// with OSR. See AdjustStackPointerForTailCall.
if (!CodeKindCanTierUp(code_kind()) || osr_) return;
int parameter_count = bytecode_array().parameter_count(); int parameter_count = bytecode_array().parameter_count();
Node* target = GetFunctionClosure(); Node* target = GetFunctionClosure();
...@@ -4092,13 +4099,13 @@ void BytecodeGraphBuilder::BuildJumpIfJSReceiver() { ...@@ -4092,13 +4099,13 @@ void BytecodeGraphBuilder::BuildJumpIfJSReceiver() {
} }
void BytecodeGraphBuilder::BuildUpdateInterruptBudget(int delta) { void BytecodeGraphBuilder::BuildUpdateInterruptBudget(int delta) {
if (native_context_independent()) { if (!CodeKindCanTierUp(code_kind())) return;
// Keep uses of this in sync with Ignition's UpdateInterruptBudget.
int delta_with_current_bytecode = // Keep uses of this in sync with Ignition's UpdateInterruptBudget.
delta - bytecode_iterator().current_bytecode_size(); int delta_with_current_bytecode =
NewNode(simplified()->UpdateInterruptBudget(delta_with_current_bytecode), delta - bytecode_iterator().current_bytecode_size();
feedback_cell_node()); NewNode(simplified()->UpdateInterruptBudget(delta_with_current_bytecode),
} feedback_cell_node());
} }
JSTypeHintLowering::LoweringResult JSTypeHintLowering::LoweringResult
...@@ -4493,17 +4500,18 @@ void BytecodeGraphBuilder::UpdateSourcePosition(int offset) { ...@@ -4493,17 +4500,18 @@ void BytecodeGraphBuilder::UpdateSourcePosition(int offset) {
void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone, void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
SharedFunctionInfoRef const& shared_info, SharedFunctionInfoRef const& shared_info,
FeedbackVectorRef const& feedback_vector, FeedbackCellRef const& feedback_cell,
BailoutId osr_offset, JSGraph* jsgraph, BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency, CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions, SourcePositionTable* source_positions,
int inlining_id, CodeKind code_kind, int inlining_id, CodeKind code_kind,
BytecodeGraphBuilderFlags flags, BytecodeGraphBuilderFlags flags,
TickCounter* tick_counter) { TickCounter* tick_counter) {
DCHECK(broker->IsSerializedForCompilation(shared_info, feedback_vector)); DCHECK(broker->IsSerializedForCompilation(
shared_info, feedback_cell.value().AsFeedbackVector()));
BytecodeGraphBuilder builder( BytecodeGraphBuilder builder(
broker, local_zone, broker->target_native_context(), shared_info, broker, local_zone, broker->target_native_context(), shared_info,
feedback_vector, osr_offset, jsgraph, invocation_frequency, feedback_cell, osr_offset, jsgraph, invocation_frequency,
source_positions, inlining_id, code_kind, flags, tick_counter); source_positions, inlining_id, code_kind, flags, tick_counter);
builder.CreateGraph(); builder.CreateGraph();
} }
......
...@@ -41,7 +41,7 @@ using BytecodeGraphBuilderFlags = base::Flags<BytecodeGraphBuilderFlag>; ...@@ -41,7 +41,7 @@ using BytecodeGraphBuilderFlags = base::Flags<BytecodeGraphBuilderFlag>;
// on AIX (v8:8193). // on AIX (v8:8193).
void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone, void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
SharedFunctionInfoRef const& shared_info, SharedFunctionInfoRef const& shared_info,
FeedbackVectorRef const& feedback_vector, FeedbackCellRef const& feedback_cell,
BailoutId osr_offset, JSGraph* jsgraph, BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency, CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions, SourcePositionTable* source_positions,
......
...@@ -350,6 +350,7 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef { ...@@ -350,6 +350,7 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
NativeContextRef native_context() const; NativeContextRef native_context() const;
SharedFunctionInfoRef shared() const; SharedFunctionInfoRef shared() const;
FeedbackVectorRef feedback_vector() const; FeedbackVectorRef feedback_vector() const;
FeedbackCellRef raw_feedback_cell() const;
CodeRef code() const; CodeRef code() const;
int InitialMapInstanceSizeWithMinSlack() const; int InitialMapInstanceSizeWithMinSlack() const;
}; };
......
...@@ -664,6 +664,7 @@ class JSFunctionData : public JSObjectData { ...@@ -664,6 +664,7 @@ class JSFunctionData : public JSObjectData {
ObjectData* initial_map() const { return initial_map_; } ObjectData* initial_map() const { return initial_map_; }
ObjectData* prototype() const { return prototype_; } ObjectData* prototype() const { return prototype_; }
ObjectData* shared() const { return shared_; } ObjectData* shared() const { return shared_; }
ObjectData* raw_feedback_cell() const { return feedback_cell_; }
ObjectData* feedback_vector() const { return feedback_vector_; } ObjectData* feedback_vector() const { return feedback_vector_; }
ObjectData* code() const { return code_; } ObjectData* code() const { return code_; }
int initial_map_instance_size_with_min_slack() const { int initial_map_instance_size_with_min_slack() const {
...@@ -686,6 +687,7 @@ class JSFunctionData : public JSObjectData { ...@@ -686,6 +687,7 @@ class JSFunctionData : public JSObjectData {
ObjectData* prototype_ = nullptr; ObjectData* prototype_ = nullptr;
ObjectData* shared_ = nullptr; ObjectData* shared_ = nullptr;
ObjectData* feedback_vector_ = nullptr; ObjectData* feedback_vector_ = nullptr;
ObjectData* feedback_cell_ = nullptr;
ObjectData* code_ = nullptr; ObjectData* code_ = nullptr;
int initial_map_instance_size_with_min_slack_; int initial_map_instance_size_with_min_slack_;
}; };
...@@ -1331,12 +1333,14 @@ void JSFunctionData::Serialize(JSHeapBroker* broker) { ...@@ -1331,12 +1333,14 @@ void JSFunctionData::Serialize(JSHeapBroker* broker) {
DCHECK_NULL(initial_map_); DCHECK_NULL(initial_map_);
DCHECK_NULL(prototype_); DCHECK_NULL(prototype_);
DCHECK_NULL(shared_); DCHECK_NULL(shared_);
DCHECK_NULL(feedback_cell_);
DCHECK_NULL(feedback_vector_); DCHECK_NULL(feedback_vector_);
DCHECK_NULL(code_); DCHECK_NULL(code_);
context_ = broker->GetOrCreateData(function->context()); context_ = broker->GetOrCreateData(function->context());
native_context_ = broker->GetOrCreateData(function->native_context()); native_context_ = broker->GetOrCreateData(function->native_context());
shared_ = broker->GetOrCreateData(function->shared()); shared_ = broker->GetOrCreateData(function->shared());
feedback_cell_ = broker->GetOrCreateData(function->raw_feedback_cell());
feedback_vector_ = has_feedback_vector() feedback_vector_ = has_feedback_vector()
? broker->GetOrCreateData(function->feedback_vector()) ? broker->GetOrCreateData(function->feedback_vector())
: nullptr; : nullptr;
...@@ -3427,6 +3431,7 @@ BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context) ...@@ -3427,6 +3431,7 @@ BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
BIMODAL_ACCESSOR(JSFunction, Map, initial_map) BIMODAL_ACCESSOR(JSFunction, Map, initial_map)
BIMODAL_ACCESSOR(JSFunction, Object, prototype) BIMODAL_ACCESSOR(JSFunction, Object, prototype)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared) BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
BIMODAL_ACCESSOR(JSFunction, FeedbackCell, raw_feedback_cell)
BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector) BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector)
BIMODAL_ACCESSOR(JSFunction, Code, code) BIMODAL_ACCESSOR(JSFunction, Code, code)
......
...@@ -332,8 +332,8 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget( ...@@ -332,8 +332,8 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
// following static information is provided: // following static information is provided:
// - context : The context (as SSA value) bound by the call target. // - context : The context (as SSA value) bound by the call target.
// - feedback_vector : The target is guaranteed to use this feedback vector. // - feedback_vector : The target is guaranteed to use this feedback vector.
FeedbackVectorRef JSInliner::DetermineCallContext(Node* node, FeedbackCellRef JSInliner::DetermineCallContext(Node* node,
Node** context_out) { Node** context_out) {
DCHECK(IrOpcode::IsInlineeOpcode(node->opcode())); DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
Node* target = node->InputAt(JSCallOrConstructNode::TargetIndex()); Node* target = node->InputAt(JSCallOrConstructNode::TargetIndex());
HeapObjectMatcher match(target); HeapObjectMatcher match(target);
...@@ -345,7 +345,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node, ...@@ -345,7 +345,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
// The inlinee specializes to the context from the JSFunction object. // The inlinee specializes to the context from the JSFunction object.
*context_out = jsgraph()->Constant(function.context()); *context_out = jsgraph()->Constant(function.context());
return function.feedback_vector(); return function.raw_feedback_cell();
} }
if (match.IsJSCreateClosure()) { if (match.IsJSCreateClosure()) {
...@@ -356,7 +356,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node, ...@@ -356,7 +356,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
// The inlinee uses the locally provided context at instantiation. // The inlinee uses the locally provided context at instantiation.
*context_out = NodeProperties::GetContextInput(match.node()); *context_out = NodeProperties::GetContextInput(match.node());
return cell.value().AsFeedbackVector(); return cell;
} else if (match.IsCheckClosure()) { } else if (match.IsCheckClosure()) {
FeedbackCellRef cell(broker(), FeedbackCellOf(match.op())); FeedbackCellRef cell(broker(), FeedbackCellOf(match.op()));
...@@ -367,7 +367,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node, ...@@ -367,7 +367,7 @@ FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
match.node(), effect, control); match.node(), effect, control);
NodeProperties::ReplaceEffectInput(node, effect); NodeProperties::ReplaceEffectInput(node, effect);
return cell.value().AsFeedbackVector(); return cell;
} }
// Must succeed. // Must succeed.
...@@ -438,8 +438,9 @@ Reduction JSInliner::ReduceJSCall(Node* node) { ...@@ -438,8 +438,9 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
: "")); : ""));
// Determine the target's feedback vector and its context. // Determine the target's feedback vector and its context.
Node* context; Node* context;
FeedbackVectorRef feedback_vector = DetermineCallContext(node, &context); FeedbackCellRef feedback_cell = DetermineCallContext(node, &context);
CHECK(broker()->IsSerializedForCompilation(*shared_info, feedback_vector)); CHECK(broker()->IsSerializedForCompilation(
*shared_info, feedback_cell.value().AsFeedbackVector()));
// ---------------------------------------------------------------- // ----------------------------------------------------------------
// After this point, we've made a decision to inline this function. // After this point, we've made a decision to inline this function.
...@@ -468,7 +469,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) { ...@@ -468,7 +469,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
} }
{ {
CallFrequency frequency = call.frequency(); CallFrequency frequency = call.frequency();
BuildGraphFromBytecode(broker(), zone(), *shared_info, feedback_vector, BuildGraphFromBytecode(broker(), zone(), *shared_info, feedback_cell,
BailoutId::None(), jsgraph(), frequency, BailoutId::None(), jsgraph(), frequency,
source_positions_, inlining_id, info_->code_kind(), source_positions_, inlining_id, info_->code_kind(),
flags, &info_->tick_counter()); flags, &info_->tick_counter());
......
...@@ -59,7 +59,7 @@ class JSInliner final : public AdvancedReducer { ...@@ -59,7 +59,7 @@ class JSInliner final : public AdvancedReducer {
SourcePositionTable* const source_positions_; SourcePositionTable* const source_positions_;
base::Optional<SharedFunctionInfoRef> DetermineCallTarget(Node* node); base::Optional<SharedFunctionInfoRef> DetermineCallTarget(Node* node);
FeedbackVectorRef DetermineCallContext(Node* node, Node** context_out); FeedbackCellRef DetermineCallContext(Node* node, Node** context_out);
Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state, Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count, BailoutId bailout_id, int parameter_count, BailoutId bailout_id,
......
...@@ -1406,10 +1406,11 @@ struct GraphBuilderPhase { ...@@ -1406,10 +1406,11 @@ struct GraphBuilderPhase {
JSFunctionRef closure(data->broker(), data->info()->closure()); JSFunctionRef closure(data->broker(), data->info()->closure());
CallFrequency frequency(1.0f); CallFrequency frequency(1.0f);
BuildGraphFromBytecode( BuildGraphFromBytecode(
data->broker(), temp_zone, closure.shared(), closure.feedback_vector(), data->broker(), temp_zone, closure.shared(),
data->info()->osr_offset(), data->jsgraph(), frequency, closure.raw_feedback_cell(), data->info()->osr_offset(),
data->source_positions(), SourcePosition::kNotInlined, data->jsgraph(), frequency, data->source_positions(),
data->info()->code_kind(), flags, &data->info()->tick_counter()); SourcePosition::kNotInlined, data->info()->code_kind(), flags,
&data->info()->tick_counter());
} }
}; };
......
...@@ -92,8 +92,20 @@ void TraceHeuristicOptimizationDisallowed(JSFunction function) { ...@@ -92,8 +92,20 @@ void TraceHeuristicOptimizationDisallowed(JSFunction function) {
} }
} }
// TODO(jgruber): Remove this once we include this tracing with --trace-opt.
void TraceNCIRecompile(JSFunction function, OptimizationReason reason) {
if (FLAG_trace_turbo_nci) {
StdoutStream os;
os << "NCI tierup mark: " << Brief(function) << ", "
<< OptimizationReasonToString(reason) << std::endl;
}
}
void TraceRecompile(JSFunction function, OptimizationReason reason, void TraceRecompile(JSFunction function, OptimizationReason reason,
Isolate* isolate) { CodeKind code_kind, Isolate* isolate) {
if (code_kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT) {
TraceNCIRecompile(function, reason);
}
if (FLAG_trace_opt) { if (FLAG_trace_opt) {
CodeTracer::Scope scope(isolate->GetCodeTracer()); CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[marking "); PrintF(scope.file(), "[marking ");
...@@ -104,22 +116,15 @@ void TraceRecompile(JSFunction function, OptimizationReason reason, ...@@ -104,22 +116,15 @@ void TraceRecompile(JSFunction function, OptimizationReason reason,
} }
} }
void TraceNCIRecompile(JSFunction function, OptimizationReason reason) {
if (FLAG_trace_turbo_nci) {
StdoutStream os;
os << "NCI tierup mark: " << Brief(function) << ", "
<< OptimizationReasonToString(reason) << std::endl;
}
}
} // namespace } // namespace
RuntimeProfiler::RuntimeProfiler(Isolate* isolate) RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
: isolate_(isolate), any_ic_changed_(false) {} : isolate_(isolate), any_ic_changed_(false) {}
void RuntimeProfiler::Optimize(JSFunction function, OptimizationReason reason) { void RuntimeProfiler::Optimize(JSFunction function, OptimizationReason reason,
CodeKind code_kind) {
DCHECK_NE(reason, OptimizationReason::kDoNotOptimize); DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
TraceRecompile(function, reason, isolate_); TraceRecompile(function, reason, code_kind, isolate_);
function.MarkForOptimization(ConcurrencyMode::kConcurrent); function.MarkForOptimization(ConcurrencyMode::kConcurrent);
} }
...@@ -150,43 +155,15 @@ void RuntimeProfiler::AttemptOnStackReplacement(InterpretedFrame* frame, ...@@ -150,43 +155,15 @@ void RuntimeProfiler::AttemptOnStackReplacement(InterpretedFrame* frame,
Min(level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker)); Min(level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker));
} }
void RuntimeProfiler::MaybeOptimizeInterpretedFrame(JSFunction function, void RuntimeProfiler::MaybeOptimizeFrame(JSFunction function,
InterpretedFrame* frame) { JavaScriptFrame* frame,
CodeKind code_kind) {
DCHECK(CodeKindCanTierUp(code_kind));
if (function.IsInOptimizationQueue()) { if (function.IsInOptimizationQueue()) {
TraceInOptimizationQueue(function); TraceInOptimizationQueue(function);
return; return;
} }
if (FLAG_testing_d8_test_runner &&
!PendingOptimizationTable::IsHeuristicOptimizationAllowed(isolate_,
function)) {
TraceHeuristicOptimizationDisallowed(function);
return;
}
if (function.shared().optimization_disabled()) return;
if (FLAG_always_osr) {
AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
// Fall through and do a normal optimized compile as well.
} else if (MaybeOSR(function, frame)) {
return;
}
OptimizationReason reason =
ShouldOptimize(function, function.shared().GetBytecodeArray());
if (reason != OptimizationReason::kDoNotOptimize) {
Optimize(function, reason);
}
}
void RuntimeProfiler::MaybeOptimizeNCIFrame(JSFunction function) {
DCHECK_EQ(function.code().kind(), CodeKind::NATIVE_CONTEXT_INDEPENDENT);
if (function.IsInOptimizationQueue()) {
TraceInOptimizationQueue(function);
return;
}
if (FLAG_testing_d8_test_runner && if (FLAG_testing_d8_test_runner &&
!PendingOptimizationTable::IsHeuristicOptimizationAllowed(isolate_, !PendingOptimizationTable::IsHeuristicOptimizationAllowed(isolate_,
function)) { function)) {
...@@ -196,15 +173,24 @@ void RuntimeProfiler::MaybeOptimizeNCIFrame(JSFunction function) { ...@@ -196,15 +173,24 @@ void RuntimeProfiler::MaybeOptimizeNCIFrame(JSFunction function) {
if (function.shared().optimization_disabled()) return; if (function.shared().optimization_disabled()) return;
// Note: We currently do not trigger OSR compilation from NCI code. // Note: We currently do not trigger OSR compilation from NCI or TP code.
// TODO(jgruber,v8:8888): But we should. // TODO(jgruber,v8:8888): But we should.
if (frame->is_interpreted()) {
DCHECK_EQ(code_kind, CodeKind::INTERPRETED_FUNCTION);
if (FLAG_always_osr) {
AttemptOnStackReplacement(InterpretedFrame::cast(frame),
AbstractCode::kMaxLoopNestingMarker);
// Fall through and do a normal optimized compile as well.
} else if (MaybeOSR(function, InterpretedFrame::cast(frame))) {
return;
}
}
OptimizationReason reason = OptimizationReason reason =
ShouldOptimize(function, function.shared().GetBytecodeArray()); ShouldOptimize(function, function.shared().GetBytecodeArray());
if (reason != OptimizationReason::kDoNotOptimize) { if (reason != OptimizationReason::kDoNotOptimize) {
TraceNCIRecompile(function, reason); Optimize(function, reason, code_kind);
Optimize(function, reason);
} }
} }
...@@ -297,7 +283,7 @@ void RuntimeProfiler::MarkCandidatesForOptimizationFromBytecode() { ...@@ -297,7 +283,7 @@ void RuntimeProfiler::MarkCandidatesForOptimizationFromBytecode() {
if (!function.has_feedback_vector()) continue; if (!function.has_feedback_vector()) continue;
MaybeOptimizeInterpretedFrame(function, InterpretedFrame::cast(frame)); MaybeOptimizeFrame(function, frame, CodeKind::INTERPRETED_FUNCTION);
// TODO(leszeks): Move this increment to before the maybe optimize checks, // TODO(leszeks): Move this increment to before the maybe optimize checks,
// and update the tests to assume the increment has already happened. // and update the tests to assume the increment has already happened.
...@@ -315,7 +301,8 @@ void RuntimeProfiler::MarkCandidatesForOptimizationFromCode() { ...@@ -315,7 +301,8 @@ void RuntimeProfiler::MarkCandidatesForOptimizationFromCode() {
if (!frame->is_optimized()) continue; if (!frame->is_optimized()) continue;
JSFunction function = frame->function(); JSFunction function = frame->function();
if (function.code().kind() != CodeKind::NATIVE_CONTEXT_INDEPENDENT) { auto code_kind = function.code().kind();
if (!CodeKindIsOptimizedAndCanTierUp(code_kind)) {
continue; continue;
} }
...@@ -324,7 +311,7 @@ void RuntimeProfiler::MarkCandidatesForOptimizationFromCode() { ...@@ -324,7 +311,7 @@ void RuntimeProfiler::MarkCandidatesForOptimizationFromCode() {
function.feedback_vector().SaturatingIncrementProfilerTicks(); function.feedback_vector().SaturatingIncrementProfilerTicks();
MaybeOptimizeNCIFrame(function); MaybeOptimizeFrame(function, frame, code_kind);
} }
} }
......
...@@ -15,7 +15,9 @@ namespace internal { ...@@ -15,7 +15,9 @@ namespace internal {
class BytecodeArray; class BytecodeArray;
class Isolate; class Isolate;
class InterpretedFrame; class InterpretedFrame;
class JavaScriptFrame;
class JSFunction; class JSFunction;
enum class CodeKind;
enum class OptimizationReason : uint8_t; enum class OptimizationReason : uint8_t;
class RuntimeProfiler { class RuntimeProfiler {
...@@ -35,15 +37,16 @@ class RuntimeProfiler { ...@@ -35,15 +37,16 @@ class RuntimeProfiler {
private: private:
// Make the decision whether to optimize the given function, and mark it for // Make the decision whether to optimize the given function, and mark it for
// optimization if the decision was 'yes'. // optimization if the decision was 'yes'.
void MaybeOptimizeNCIFrame(JSFunction function); void MaybeOptimizeFrame(JSFunction function, JavaScriptFrame* frame,
void MaybeOptimizeInterpretedFrame(JSFunction function, CodeKind code_kind);
InterpretedFrame* frame);
// Potentially attempts OSR from and returns whether no other // Potentially attempts OSR from and returns whether no other
// optimization attempts should be made. // optimization attempts should be made.
bool MaybeOSR(JSFunction function, InterpretedFrame* frame); bool MaybeOSR(JSFunction function, InterpretedFrame* frame);
OptimizationReason ShouldOptimize(JSFunction function, OptimizationReason ShouldOptimize(JSFunction function,
BytecodeArray bytecode_array); BytecodeArray bytecode_array);
void Optimize(JSFunction function, OptimizationReason reason); void Optimize(JSFunction function, OptimizationReason reason,
CodeKind code_kind);
void Baseline(JSFunction function, OptimizationReason reason); void Baseline(JSFunction function, OptimizationReason reason);
class MarkCandidatesForOptimizationScope final { class MarkCandidatesForOptimizationScope final {
......
...@@ -559,6 +559,9 @@ DEFINE_BOOL(turboprop_mid_tier_reg_alloc, true, ...@@ -559,6 +559,9 @@ DEFINE_BOOL(turboprop_mid_tier_reg_alloc, true,
DEFINE_BOOL(turboprop_dynamic_map_checks, true, DEFINE_BOOL(turboprop_dynamic_map_checks, true,
"use dynamic map checks when generating code for property accesses " "use dynamic map checks when generating code for property accesses "
"if all handlers in an IC are the same for turboprop") "if all handlers in an IC are the same for turboprop")
DEFINE_BOOL(turboprop_as_midtier, false,
"enable experimental turboprop mid-tier compiler")
DEFINE_IMPLICATION(turboprop_as_midtier, turboprop)
DEFINE_NEG_IMPLICATION(turboprop, turbo_inlining) DEFINE_NEG_IMPLICATION(turboprop, turbo_inlining)
DEFINE_IMPLICATION(turboprop, concurrent_inlining) DEFINE_IMPLICATION(turboprop, concurrent_inlining)
DEFINE_VALUE_IMPLICATION(turboprop, interrupt_budget, 15 * KB) DEFINE_VALUE_IMPLICATION(turboprop, interrupt_budget, 15 * KB)
......
...@@ -356,7 +356,7 @@ inline bool Code::checks_optimization_marker() const { ...@@ -356,7 +356,7 @@ inline bool Code::checks_optimization_marker() const {
bool checks_marker = bool checks_marker =
(builtin_index() == Builtins::kCompileLazy || (builtin_index() == Builtins::kCompileLazy ||
builtin_index() == Builtins::kInterpreterEntryTrampoline || builtin_index() == Builtins::kInterpreterEntryTrampoline ||
CodeKindChecksOptimizationMarker(kind())); CodeKindCanTierUp(kind()));
return checks_marker || return checks_marker ||
(CodeKindCanDeoptimize(kind()) && marked_for_deoptimization()); (CodeKindCanDeoptimize(kind()) && marked_for_deoptimization());
} }
......
...@@ -74,9 +74,14 @@ inline constexpr bool CodeKindCanOSR(CodeKind kind) { ...@@ -74,9 +74,14 @@ inline constexpr bool CodeKindCanOSR(CodeKind kind) {
return kind == CodeKind::TURBOFAN || kind == CodeKind::TURBOPROP; return kind == CodeKind::TURBOFAN || kind == CodeKind::TURBOPROP;
} }
inline constexpr bool CodeKindChecksOptimizationMarker(CodeKind kind) { inline constexpr bool CodeKindIsOptimizedAndCanTierUp(CodeKind kind) {
return kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT ||
(FLAG_turboprop_as_midtier && kind == CodeKind::TURBOPROP);
}
inline constexpr bool CodeKindCanTierUp(CodeKind kind) {
return kind == CodeKind::INTERPRETED_FUNCTION || return kind == CodeKind::INTERPRETED_FUNCTION ||
kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT; CodeKindIsOptimizedAndCanTierUp(kind);
} }
// The optimization marker field on the feedback vector has a dual purpose of // The optimization marker field on the feedback vector has a dual purpose of
......
...@@ -71,7 +71,8 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) { ...@@ -71,7 +71,8 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
mode = ConcurrencyMode::kNotConcurrent; mode = ConcurrencyMode::kNotConcurrent;
} }
DCHECK(!is_compiled() || ActiveTierIsIgnition() || ActiveTierIsNCI()); DCHECK(!is_compiled() || ActiveTierIsIgnition() || ActiveTierIsNCI() ||
(ActiveTierIsTurboprop() && FLAG_turboprop_as_midtier));
DCHECK(!ActiveTierIsTurbofan()); DCHECK(!ActiveTierIsTurbofan());
DCHECK(shared().IsInterpreted()); DCHECK(shared().IsInterpreted());
DCHECK(shared().allows_lazy_compilation() || DCHECK(shared().allows_lazy_compilation() ||
......
...@@ -140,7 +140,10 @@ bool JSFunction::ActiveTierIsTurboprop() const { ...@@ -140,7 +140,10 @@ bool JSFunction::ActiveTierIsTurboprop() const {
CodeKind JSFunction::NextTier() const { CodeKind JSFunction::NextTier() const {
if (V8_UNLIKELY(FLAG_turbo_nci_as_midtier && ActiveTierIsIgnition())) { if (V8_UNLIKELY(FLAG_turbo_nci_as_midtier && ActiveTierIsIgnition())) {
return CodeKind::NATIVE_CONTEXT_INDEPENDENT; return CodeKind::NATIVE_CONTEXT_INDEPENDENT;
} else if (V8_UNLIKELY(FLAG_turboprop)) { } else if (V8_UNLIKELY(FLAG_turboprop_as_midtier &&
ActiveTierIsTurboprop())) {
return CodeKind::TURBOFAN;
} else if (V8_UNLIKELY(FLAG_turboprop) && ActiveTierIsIgnition()) {
return CodeKind::TURBOPROP; return CodeKind::TURBOPROP;
} }
return CodeKind::TURBOFAN; return CodeKind::TURBOFAN;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment