Commit be286bfa authored by Georg Neis's avatar Georg Neis Committed by Commit Bot

[turbofan] Make broker own the is_concurrent_inlining flag

... and consult it there from the various reducers. The flag makes no
sense without the broker and the reducers already have access to the
broker, so we can avoid an additional flag per reducer.

Bug: v8:7790
Change-Id: I448050a55951b94d5313c1a79a502be906b98b25
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2013108
Auto-Submit: Georg Neis <neis@chromium.org>
Reviewed-by: 's avatarMaya Lekova <mslekova@chromium.org>
Commit-Queue: Maya Lekova <mslekova@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65918}
parent a2b902dd
...@@ -865,13 +865,10 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function, ...@@ -865,13 +865,10 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// tolerate the lack of a script without bytecode. // tolerate the lack of a script without bytecode.
DCHECK_IMPLIES(!has_script, shared->HasBytecodeArray()); DCHECK_IMPLIES(!has_script, shared->HasBytecodeArray());
std::unique_ptr<OptimizedCompilationJob> job( std::unique_ptr<OptimizedCompilationJob> job(
compiler::Pipeline::NewCompilationJob( compiler::Pipeline::NewCompilationJob(isolate, function, has_script,
isolate, function, has_script, osr_offset, osr_frame));
FLAG_concurrent_inlining && osr_offset.IsNone()));
OptimizedCompilationInfo* compilation_info = job->compilation_info(); OptimizedCompilationInfo* compilation_info = job->compilation_info();
compilation_info->SetOptimizingForOsr(osr_offset, osr_frame);
// Do not use TurboFan if we need to be able to set break points. // Do not use TurboFan if we need to be able to set break points.
if (compilation_info->shared_info()->HasBreakInfo()) { if (compilation_info->shared_info()->HasBreakInfo()) {
compilation_info->AbortOptimization(BailoutReason::kFunctionBeingDebugged); compilation_info->AbortOptimization(BailoutReason::kFunctionBeingDebugged);
......
...@@ -40,7 +40,6 @@ class BytecodeGraphBuilder { ...@@ -40,7 +40,6 @@ class BytecodeGraphBuilder {
CallFrequency const& invocation_frequency, CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions, int inlining_id, SourcePositionTable* source_positions, int inlining_id,
BytecodeGraphBuilderFlags flags, BytecodeGraphBuilderFlags flags,
JSTypeHintLowering::Flags type_hint_lowering_flags,
TickCounter* tick_counter); TickCounter* tick_counter);
// Creates a graph by visiting bytecodes. // Creates a graph by visiting bytecodes.
...@@ -371,7 +370,7 @@ class BytecodeGraphBuilder { ...@@ -371,7 +370,7 @@ class BytecodeGraphBuilder {
SharedFunctionInfoRef shared_info() const { return shared_info_; } SharedFunctionInfoRef shared_info() const { return shared_info_; }
bool should_disallow_heap_access() const { bool should_disallow_heap_access() const {
return flags_ & BytecodeGraphBuilderFlag::kConcurrentInlining; return broker_->is_concurrent_inlining();
} }
#define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name(); #define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
...@@ -438,8 +437,6 @@ class BytecodeGraphBuilder { ...@@ -438,8 +437,6 @@ class BytecodeGraphBuilder {
SourcePosition const start_position_; SourcePosition const start_position_;
BytecodeGraphBuilderFlags const flags_;
TickCounter* const tick_counter_; TickCounter* const tick_counter_;
static int const kBinaryOperationHintIndex = 1; static int const kBinaryOperationHintIndex = 1;
...@@ -947,9 +944,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( ...@@ -947,9 +944,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
FeedbackVectorRef const& feedback_vector, BailoutId osr_offset, FeedbackVectorRef const& feedback_vector, BailoutId osr_offset,
JSGraph* jsgraph, CallFrequency const& invocation_frequency, JSGraph* jsgraph, CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions, int inlining_id, SourcePositionTable* source_positions, int inlining_id,
BytecodeGraphBuilderFlags flags, BytecodeGraphBuilderFlags flags, TickCounter* tick_counter)
JSTypeHintLowering::Flags type_hint_lowering_flags,
TickCounter* tick_counter)
: broker_(broker), : broker_(broker),
local_zone_(local_zone), local_zone_(local_zone),
jsgraph_(jsgraph), jsgraph_(jsgraph),
...@@ -957,8 +952,11 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( ...@@ -957,8 +952,11 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
shared_info_(shared_info), shared_info_(shared_info),
feedback_vector_(feedback_vector), feedback_vector_(feedback_vector),
invocation_frequency_(invocation_frequency), invocation_frequency_(invocation_frequency),
type_hint_lowering_(broker, jsgraph, feedback_vector, type_hint_lowering_(
type_hint_lowering_flags), broker, jsgraph, feedback_vector,
(flags & BytecodeGraphBuilderFlag::kBailoutOnUninitialized)
? JSTypeHintLowering::kBailoutOnUninitialized
: JSTypeHintLowering::kNoFlags),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo( frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kInterpretedFunction, FrameStateType::kInterpretedFunction,
bytecode_array().parameter_count(), bytecode_array().register_count(), bytecode_array().parameter_count(), bytecode_array().register_count(),
...@@ -968,7 +966,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( ...@@ -968,7 +966,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
bytecode_analysis_(broker_->GetBytecodeAnalysis( bytecode_analysis_(broker_->GetBytecodeAnalysis(
bytecode_array().object(), osr_offset, bytecode_array().object(), osr_offset,
flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness, flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness,
(flags & BytecodeGraphBuilderFlag::kConcurrentInlining) should_disallow_heap_access()
? SerializationPolicy::kAssumeSerialized ? SerializationPolicy::kAssumeSerialized
: SerializationPolicy::kSerializeIfNeeded)), : SerializationPolicy::kSerializeIfNeeded)),
environment_(nullptr), environment_(nullptr),
...@@ -987,9 +985,8 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( ...@@ -987,9 +985,8 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
state_values_cache_(jsgraph), state_values_cache_(jsgraph),
source_positions_(source_positions), source_positions_(source_positions),
start_position_(shared_info.StartPosition(), inlining_id), start_position_(shared_info.StartPosition(), inlining_id),
flags_(flags),
tick_counter_(tick_counter) { tick_counter_(tick_counter) {
if (flags & BytecodeGraphBuilderFlag::kConcurrentInlining) { if (should_disallow_heap_access()) {
// With concurrent inlining on, the source position address doesn't change // With concurrent inlining on, the source position address doesn't change
// because it's been copied from the heap. // because it's been copied from the heap.
source_position_iterator_ = std::make_unique<SourcePositionTableIterator>( source_position_iterator_ = std::make_unique<SourcePositionTableIterator>(
...@@ -4168,17 +4165,10 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone, ...@@ -4168,17 +4165,10 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
int inlining_id, BytecodeGraphBuilderFlags flags, int inlining_id, BytecodeGraphBuilderFlags flags,
TickCounter* tick_counter) { TickCounter* tick_counter) {
DCHECK(broker->IsSerializedForCompilation(shared_info, feedback_vector)); DCHECK(broker->IsSerializedForCompilation(shared_info, feedback_vector));
JSTypeHintLowering::Flags type_hint_lowering_flags =
JSTypeHintLowering::kNoFlags;
if (flags & BytecodeGraphBuilderFlag::kBailoutOnUninitialized) {
type_hint_lowering_flags |= JSTypeHintLowering::kBailoutOnUninitialized;
}
BytecodeGraphBuilder builder( BytecodeGraphBuilder builder(
broker, local_zone, broker->target_native_context(), shared_info, broker, local_zone, broker->target_native_context(), shared_info,
feedback_vector, osr_offset, jsgraph, invocation_frequency, feedback_vector, osr_offset, jsgraph, invocation_frequency,
source_positions, inlining_id, flags, type_hint_lowering_flags, source_positions, inlining_id, flags, tick_counter);
tick_counter);
builder.CreateGraph(); builder.CreateGraph();
} }
......
...@@ -33,7 +33,6 @@ enum class BytecodeGraphBuilderFlag : uint8_t { ...@@ -33,7 +33,6 @@ enum class BytecodeGraphBuilderFlag : uint8_t {
// bytecode analysis. // bytecode analysis.
kAnalyzeEnvironmentLiveness = 1 << 1, kAnalyzeEnvironmentLiveness = 1 << 1,
kBailoutOnUninitialized = 1 << 2, kBailoutOnUninitialized = 1 << 2,
kConcurrentInlining = 1 << 3,
}; };
using BytecodeGraphBuilderFlags = base::Flags<BytecodeGraphBuilderFlag>; using BytecodeGraphBuilderFlags = base::Flags<BytecodeGraphBuilderFlag>;
......
...@@ -2040,6 +2040,10 @@ TNode<Object> PromiseBuiltinReducerAssembler::ReducePromiseConstructor( ...@@ -2040,6 +2040,10 @@ TNode<Object> PromiseBuiltinReducerAssembler::ReducePromiseConstructor(
#undef _ #undef _
bool JSCallReducer::should_disallow_heap_access() const {
return broker_->is_concurrent_inlining();
}
Reduction JSCallReducer::ReplaceWithSubgraph(JSCallReducerAssembler* gasm, Reduction JSCallReducer::ReplaceWithSubgraph(JSCallReducerAssembler* gasm,
Node* subgraph) { Node* subgraph) {
// TODO(jgruber): Consider a less fiddly way of integrating the new subgraph // TODO(jgruber): Consider a less fiddly way of integrating the new subgraph
......
...@@ -43,7 +43,6 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { ...@@ -43,7 +43,6 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
enum Flag { enum Flag {
kNoFlags = 0u, kNoFlags = 0u,
kBailoutOnUninitialized = 1u << 0, kBailoutOnUninitialized = 1u << 0,
kConcurrentInlining = 1u << 1
}; };
using Flags = base::Flags<Flag>; using Flags = base::Flags<Flag>;
...@@ -230,10 +229,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { ...@@ -230,10 +229,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
SimplifiedOperatorBuilder* simplified() const; SimplifiedOperatorBuilder* simplified() const;
Flags flags() const { return flags_; } Flags flags() const { return flags_; }
CompilationDependencies* dependencies() const { return dependencies_; } CompilationDependencies* dependencies() const { return dependencies_; }
bool should_disallow_heap_access() const;
bool should_disallow_heap_access() const {
return flags() & kConcurrentInlining;
}
JSGraph* const jsgraph_; JSGraph* const jsgraph_;
JSHeapBroker* const broker_; JSHeapBroker* const broker_;
......
...@@ -4602,7 +4602,7 @@ FeedbackSlotKind JSHeapBroker::GetFeedbackSlotKind( ...@@ -4602,7 +4602,7 @@ FeedbackSlotKind JSHeapBroker::GetFeedbackSlotKind(
} }
bool JSHeapBroker::FeedbackIsInsufficient(FeedbackSource const& source) const { bool JSHeapBroker::FeedbackIsInsufficient(FeedbackSource const& source) const {
return (is_concurrent_inlining_) return is_concurrent_inlining_
? GetFeedback(source).IsInsufficient() ? GetFeedback(source).IsInsufficient()
: FeedbackNexus(source.vector, source.slot).IsUninitialized(); : FeedbackNexus(source.vector, source.slot).IsUninitialized();
} }
...@@ -4801,8 +4801,8 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall( ...@@ -4801,8 +4801,8 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall(
BinaryOperationHint JSHeapBroker::GetFeedbackForBinaryOperation( BinaryOperationHint JSHeapBroker::GetFeedbackForBinaryOperation(
FeedbackSource const& source) { FeedbackSource const& source) {
ProcessedFeedback const& feedback = ProcessedFeedback const& feedback =
(is_concurrent_inlining_) ? GetFeedback(source) is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForBinaryOperation(source); : ProcessFeedbackForBinaryOperation(source);
return feedback.IsInsufficient() ? BinaryOperationHint::kNone return feedback.IsInsufficient() ? BinaryOperationHint::kNone
: feedback.AsBinaryOperation().value(); : feedback.AsBinaryOperation().value();
} }
...@@ -4810,14 +4810,14 @@ BinaryOperationHint JSHeapBroker::GetFeedbackForBinaryOperation( ...@@ -4810,14 +4810,14 @@ BinaryOperationHint JSHeapBroker::GetFeedbackForBinaryOperation(
CompareOperationHint JSHeapBroker::GetFeedbackForCompareOperation( CompareOperationHint JSHeapBroker::GetFeedbackForCompareOperation(
FeedbackSource const& source) { FeedbackSource const& source) {
ProcessedFeedback const& feedback = ProcessedFeedback const& feedback =
(is_concurrent_inlining_) ? GetFeedback(source) is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForCompareOperation(source); : ProcessFeedbackForCompareOperation(source);
return feedback.IsInsufficient() ? CompareOperationHint::kNone return feedback.IsInsufficient() ? CompareOperationHint::kNone
: feedback.AsCompareOperation().value(); : feedback.AsCompareOperation().value();
} }
ForInHint JSHeapBroker::GetFeedbackForForIn(FeedbackSource const& source) { ForInHint JSHeapBroker::GetFeedbackForForIn(FeedbackSource const& source) {
ProcessedFeedback const& feedback = (is_concurrent_inlining_) ProcessedFeedback const& feedback = is_concurrent_inlining_
? GetFeedback(source) ? GetFeedback(source)
: ProcessFeedbackForForIn(source); : ProcessFeedbackForForIn(source);
return feedback.IsInsufficient() ? ForInHint::kNone return feedback.IsInsufficient() ? ForInHint::kNone
...@@ -4827,46 +4827,46 @@ ForInHint JSHeapBroker::GetFeedbackForForIn(FeedbackSource const& source) { ...@@ -4827,46 +4827,46 @@ ForInHint JSHeapBroker::GetFeedbackForForIn(FeedbackSource const& source) {
ProcessedFeedback const& JSHeapBroker::GetFeedbackForPropertyAccess( ProcessedFeedback const& JSHeapBroker::GetFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode, FeedbackSource const& source, AccessMode mode,
base::Optional<NameRef> static_name) { base::Optional<NameRef> static_name) {
return (is_concurrent_inlining_) return is_concurrent_inlining_
? GetFeedback(source) ? GetFeedback(source)
: ProcessFeedbackForPropertyAccess(source, mode, static_name); : ProcessFeedbackForPropertyAccess(source, mode, static_name);
} }
ProcessedFeedback const& JSHeapBroker::GetFeedbackForInstanceOf( ProcessedFeedback const& JSHeapBroker::GetFeedbackForInstanceOf(
FeedbackSource const& source) { FeedbackSource const& source) {
return (is_concurrent_inlining_) ? GetFeedback(source) return is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForInstanceOf(source); : ProcessFeedbackForInstanceOf(source);
} }
ProcessedFeedback const& JSHeapBroker::GetFeedbackForCall( ProcessedFeedback const& JSHeapBroker::GetFeedbackForCall(
FeedbackSource const& source) { FeedbackSource const& source) {
return (is_concurrent_inlining_) ? GetFeedback(source) return is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForCall(source); : ProcessFeedbackForCall(source);
} }
ProcessedFeedback const& JSHeapBroker::GetFeedbackForGlobalAccess( ProcessedFeedback const& JSHeapBroker::GetFeedbackForGlobalAccess(
FeedbackSource const& source) { FeedbackSource const& source) {
return (is_concurrent_inlining_) ? GetFeedback(source) return is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForGlobalAccess(source); : ProcessFeedbackForGlobalAccess(source);
} }
ProcessedFeedback const& JSHeapBroker::GetFeedbackForArrayOrObjectLiteral( ProcessedFeedback const& JSHeapBroker::GetFeedbackForArrayOrObjectLiteral(
FeedbackSource const& source) { FeedbackSource const& source) {
return (is_concurrent_inlining_) return is_concurrent_inlining_
? GetFeedback(source) ? GetFeedback(source)
: ProcessFeedbackForArrayOrObjectLiteral(source); : ProcessFeedbackForArrayOrObjectLiteral(source);
} }
ProcessedFeedback const& JSHeapBroker::GetFeedbackForRegExpLiteral( ProcessedFeedback const& JSHeapBroker::GetFeedbackForRegExpLiteral(
FeedbackSource const& source) { FeedbackSource const& source) {
return (is_concurrent_inlining_) ? GetFeedback(source) return is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForRegExpLiteral(source); : ProcessFeedbackForRegExpLiteral(source);
} }
ProcessedFeedback const& JSHeapBroker::GetFeedbackForTemplateObject( ProcessedFeedback const& JSHeapBroker::GetFeedbackForTemplateObject(
FeedbackSource const& source) { FeedbackSource const& source) {
return (is_concurrent_inlining_) ? GetFeedback(source) return is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForTemplateObject(source); : ProcessFeedbackForTemplateObject(source);
} }
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForArrayOrObjectLiteral( ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForArrayOrObjectLiteral(
......
...@@ -88,6 +88,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker { ...@@ -88,6 +88,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Isolate* isolate() const { return isolate_; } Isolate* isolate() const { return isolate_; }
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
bool tracing_enabled() const { return tracing_enabled_; } bool tracing_enabled() const { return tracing_enabled_; }
bool is_concurrent_inlining() const { return is_concurrent_inlining_; }
enum BrokerMode { kDisabled, kSerializing, kSerialized, kRetired }; enum BrokerMode { kDisabled, kSerializing, kSerialized, kRetired };
BrokerMode mode() const { return mode_; } BrokerMode mode() const { return mode_; }
......
...@@ -127,7 +127,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions( ...@@ -127,7 +127,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
} }
Reduction JSInliningHeuristic::Reduce(Node* node) { Reduction JSInliningHeuristic::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_acess(info_->is_concurrent_inlining()); DisallowHeapAccessIf no_heap_acess(broker()->is_concurrent_inlining());
if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange(); if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
...@@ -222,7 +222,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { ...@@ -222,7 +222,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
} }
void JSInliningHeuristic::Finalize() { void JSInliningHeuristic::Finalize() {
DisallowHeapAccessIf no_heap_acess(info_->is_concurrent_inlining()); DisallowHeapAccessIf no_heap_acess(broker()->is_concurrent_inlining());
if (candidates_.empty()) return; // Nothing to do without candidates. if (candidates_.empty()) return; // Nothing to do without candidates.
if (FLAG_trace_turbo_inlining) PrintCandidates(); if (FLAG_trace_turbo_inlining) PrintCandidates();
......
...@@ -22,7 +22,6 @@ class JSInliningHeuristic final : public AdvancedReducer { ...@@ -22,7 +22,6 @@ class JSInliningHeuristic final : public AdvancedReducer {
candidates_(local_zone), candidates_(local_zone),
seen_(local_zone), seen_(local_zone),
source_positions_(source_positions), source_positions_(source_positions),
info_(info),
jsgraph_(jsgraph), jsgraph_(jsgraph),
broker_(broker) {} broker_(broker) {}
...@@ -93,7 +92,6 @@ class JSInliningHeuristic final : public AdvancedReducer { ...@@ -93,7 +92,6 @@ class JSInliningHeuristic final : public AdvancedReducer {
Candidates candidates_; Candidates candidates_;
ZoneSet<NodeId> seen_; ZoneSet<NodeId> seen_;
SourcePositionTable* source_positions_; SourcePositionTable* source_positions_;
OptimizedCompilationInfo* info_;
JSGraph* const jsgraph_; JSGraph* const jsgraph_;
JSHeapBroker* const broker_; JSHeapBroker* const broker_;
int total_inlined_bytecode_size_ = 0; int total_inlined_bytecode_size_ = 0;
......
...@@ -419,7 +419,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) { ...@@ -419,7 +419,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// always hold true. // always hold true.
CHECK(shared_info->is_compiled()); CHECK(shared_info->is_compiled());
if (!info_->is_concurrent_inlining() && if (!broker()->is_concurrent_inlining() &&
info_->is_source_positions_enabled()) { info_->is_source_positions_enabled()) {
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(), SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(),
shared_info->object()); shared_info->object());
...@@ -458,9 +458,6 @@ Reduction JSInliner::ReduceJSCall(Node* node) { ...@@ -458,9 +458,6 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
if (info_->is_bailout_on_uninitialized()) { if (info_->is_bailout_on_uninitialized()) {
flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized; flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
} }
if (info_->is_concurrent_inlining()) {
flags |= BytecodeGraphBuilderFlag::kConcurrentInlining;
}
{ {
CallFrequency frequency = call.frequency(); CallFrequency frequency = call.frequency();
BuildGraphFromBytecode(broker(), zone(), *shared_info, feedback_vector, BuildGraphFromBytecode(broker(), zone(), *shared_info, feedback_vector,
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include "src/codegen/code-factory.h" #include "src/codegen/code-factory.h"
#include "src/compiler/access-builder.h" #include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h" #include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/linkage.h" #include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h" #include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h" #include "src/compiler/node-properties.h"
...@@ -22,14 +23,11 @@ namespace internal { ...@@ -22,14 +23,11 @@ namespace internal {
namespace compiler { namespace compiler {
JSIntrinsicLowering::JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph, JSIntrinsicLowering::JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph,
JSHeapBroker* broker, Flags flags) JSHeapBroker* broker)
: AdvancedReducer(editor), : AdvancedReducer(editor), jsgraph_(jsgraph), broker_(broker) {}
jsgraph_(jsgraph),
broker_(broker),
flags_(flags) {}
Reduction JSIntrinsicLowering::Reduce(Node* node) { Reduction JSIntrinsicLowering::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(flags_ & kConcurrentInlining); DisallowHeapAccessIf no_heap_access(broker()->is_concurrent_inlining());
if (node->opcode() != IrOpcode::kJSCallRuntime) return NoChange(); if (node->opcode() != IrOpcode::kJSCallRuntime) return NoChange();
const Runtime::Function* const f = const Runtime::Function* const f =
......
...@@ -31,12 +31,7 @@ class SimplifiedOperatorBuilder; ...@@ -31,12 +31,7 @@ class SimplifiedOperatorBuilder;
class V8_EXPORT_PRIVATE JSIntrinsicLowering final class V8_EXPORT_PRIVATE JSIntrinsicLowering final
: public NON_EXPORTED_BASE(AdvancedReducer) { : public NON_EXPORTED_BASE(AdvancedReducer) {
public: public:
// Flags that control the mode of operation. JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker);
enum Flag { kNoFlags = 0u, kConcurrentInlining = 1u << 0 };
using Flags = base::Flags<Flag>;
JSIntrinsicLowering(Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker,
Flags flags);
~JSIntrinsicLowering() final = default; ~JSIntrinsicLowering() final = default;
const char* reducer_name() const override { return "JSIntrinsicLowering"; } const char* reducer_name() const override { return "JSIntrinsicLowering"; }
...@@ -95,7 +90,6 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final ...@@ -95,7 +90,6 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
JSGraph* const jsgraph_; JSGraph* const jsgraph_;
JSHeapBroker* const broker_; JSHeapBroker* const broker_;
Flags const flags_;
}; };
} // namespace compiler } // namespace compiler
......
...@@ -53,6 +53,10 @@ bool HasOnlyJSArrayMaps(JSHeapBroker* broker, ...@@ -53,6 +53,10 @@ bool HasOnlyJSArrayMaps(JSHeapBroker* broker,
} // namespace } // namespace
bool JSNativeContextSpecialization::should_disallow_heap_access() const {
return broker()->is_concurrent_inlining();
}
JSNativeContextSpecialization::JSNativeContextSpecialization( JSNativeContextSpecialization::JSNativeContextSpecialization(
Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker, Flags flags, Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker, Flags flags,
CompilationDependencies* dependencies, Zone* zone, Zone* shared_zone) CompilationDependencies* dependencies, Zone* zone, Zone* shared_zone)
......
...@@ -47,7 +47,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final ...@@ -47,7 +47,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
enum Flag { enum Flag {
kNoFlags = 0u, kNoFlags = 0u,
kBailoutOnUninitialized = 1u << 0, kBailoutOnUninitialized = 1u << 0,
kConcurrentInlining = 1u << 1
}; };
using Flags = base::Flags<Flag>; using Flags = base::Flags<Flag>;
...@@ -251,10 +250,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final ...@@ -251,10 +250,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
CompilationDependencies* dependencies() const { return dependencies_; } CompilationDependencies* dependencies() const { return dependencies_; }
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
Zone* shared_zone() const { return shared_zone_; } Zone* shared_zone() const { return shared_zone_; }
bool should_disallow_heap_access() const;
bool should_disallow_heap_access() const {
return flags() & kConcurrentInlining;
}
JSGraph* const jsgraph_; JSGraph* const jsgraph_;
JSHeapBroker* const broker_; JSHeapBroker* const broker_;
......
...@@ -978,8 +978,8 @@ class PipelineCompilationJob final : public OptimizedCompilationJob { ...@@ -978,8 +978,8 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
public: public:
PipelineCompilationJob(Isolate* isolate, PipelineCompilationJob(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info, Handle<SharedFunctionInfo> shared_info,
Handle<JSFunction> function, Handle<JSFunction> function, BailoutId osr_offset,
bool is_concurrent_inlining); JavaScriptFrame* osr_frame);
~PipelineCompilationJob() final; ~PipelineCompilationJob() final;
protected: protected:
...@@ -1004,7 +1004,8 @@ class PipelineCompilationJob final : public OptimizedCompilationJob { ...@@ -1004,7 +1004,8 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
PipelineCompilationJob::PipelineCompilationJob( PipelineCompilationJob::PipelineCompilationJob(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info, Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
Handle<JSFunction> function, bool is_concurrent_inlining) Handle<JSFunction> function, BailoutId osr_offset,
JavaScriptFrame* osr_frame)
// Note that the OptimizedCompilationInfo is not initialized at the time // Note that the OptimizedCompilationInfo is not initialized at the time
// we pass it to the CompilationJob constructor, but it is not // we pass it to the CompilationJob constructor, but it is not
// dereferenced there. // dereferenced there.
...@@ -1017,18 +1018,20 @@ PipelineCompilationJob::PipelineCompilationJob( ...@@ -1017,18 +1018,20 @@ PipelineCompilationJob::PipelineCompilationJob(
handle(Script::cast(shared_info->script()), isolate), handle(Script::cast(shared_info->script()), isolate),
compilation_info(), function->GetIsolate(), &zone_stats_)), compilation_info(), function->GetIsolate(), &zone_stats_)),
data_(&zone_stats_, function->GetIsolate(), compilation_info(), data_(&zone_stats_, function->GetIsolate(), compilation_info(),
pipeline_statistics_.get(), is_concurrent_inlining), pipeline_statistics_.get(),
FLAG_concurrent_inlining && osr_offset.IsNone()),
pipeline_(&data_), pipeline_(&data_),
linkage_(nullptr) {} linkage_(nullptr) {
compilation_info_.SetOptimizingForOsr(osr_offset, osr_frame);
PipelineCompilationJob::~PipelineCompilationJob() {
} }
PipelineCompilationJob::~PipelineCompilationJob() {}
namespace { namespace {
// Ensure that the RuntimeStats table is set on the PipelineData for duration of // Ensure that the RuntimeStats table is set on the PipelineData for
// the job phase and unset immediately afterwards. Each job needs to set the // duration of the job phase and unset immediately afterwards. Each job
// correct RuntimeCallStats table depending on whether it is running on a // needs to set the correct RuntimeCallStats table depending on whether it
// background or foreground thread. // is running on a background or foreground thread.
class PipelineJobScope { class PipelineJobScope {
public: public:
PipelineJobScope(PipelineData* data, RuntimeCallStats* stats) : data_(data) { PipelineJobScope(PipelineData* data, RuntimeCallStats* stats) : data_(data) {
...@@ -1062,9 +1065,6 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl( ...@@ -1062,9 +1065,6 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (FLAG_turbo_inlining) { if (FLAG_turbo_inlining) {
compilation_info()->MarkAsInliningEnabled(); compilation_info()->MarkAsInliningEnabled();
} }
if (FLAG_concurrent_inlining && !compilation_info()->is_osr()) {
compilation_info()->MarkAsConcurrentInlining();
}
// This is the bottleneck for computing and setting poisoning level in the // This is the bottleneck for computing and setting poisoning level in the
// optimizing compiler. // optimizing compiler.
...@@ -1113,7 +1113,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl( ...@@ -1113,7 +1113,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
pipeline_.Serialize(); pipeline_.Serialize();
if (!compilation_info()->is_concurrent_inlining()) { if (!data_.broker()->is_concurrent_inlining()) {
if (!pipeline_.CreateGraph()) { if (!pipeline_.CreateGraph()) {
CHECK(!isolate->has_pending_exception()); CHECK(!isolate->has_pending_exception());
return AbortOptimization(BailoutReason::kGraphBuildingFailed); return AbortOptimization(BailoutReason::kGraphBuildingFailed);
...@@ -1128,7 +1128,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl( ...@@ -1128,7 +1128,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl(
// Ensure that the RuntimeCallStats table is only available during execution // Ensure that the RuntimeCallStats table is only available during execution
// and not during finalization as that might be on a different thread. // and not during finalization as that might be on a different thread.
PipelineJobScope scope(&data_, stats); PipelineJobScope scope(&data_, stats);
if (compilation_info()->is_concurrent_inlining()) { if (data_.broker()->is_concurrent_inlining()) {
if (!pipeline_.CreateGraph()) { if (!pipeline_.CreateGraph()) {
return AbortOptimization(BailoutReason::kGraphBuildingFailed); return AbortOptimization(BailoutReason::kGraphBuildingFailed);
} }
...@@ -1344,9 +1344,6 @@ struct GraphBuilderPhase { ...@@ -1344,9 +1344,6 @@ struct GraphBuilderPhase {
if (data->info()->is_bailout_on_uninitialized()) { if (data->info()->is_bailout_on_uninitialized()) {
flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized; flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
} }
if (data->info()->is_concurrent_inlining()) {
flags |= BytecodeGraphBuilderFlag::kConcurrentInlining;
}
JSFunctionRef closure(data->broker(), data->info()->closure()); JSFunctionRef closure(data->broker(), data->info()->closure());
CallFrequency frequency(1.0f); CallFrequency frequency(1.0f);
...@@ -1375,9 +1372,6 @@ struct InliningPhase { ...@@ -1375,9 +1372,6 @@ struct InliningPhase {
if (data->info()->is_bailout_on_uninitialized()) { if (data->info()->is_bailout_on_uninitialized()) {
call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized; call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
} }
if (data->info()->is_concurrent_inlining()) {
call_reducer_flags |= JSCallReducer::kConcurrentInlining;
}
JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(), JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(),
temp_zone, call_reducer_flags, temp_zone, call_reducer_flags,
data->dependencies()); data->dependencies());
...@@ -1392,9 +1386,6 @@ struct InliningPhase { ...@@ -1392,9 +1386,6 @@ struct InliningPhase {
if (data->info()->is_bailout_on_uninitialized()) { if (data->info()->is_bailout_on_uninitialized()) {
flags |= JSNativeContextSpecialization::kBailoutOnUninitialized; flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
} }
if (data->info()->is_concurrent_inlining()) {
flags |= JSNativeContextSpecialization::kConcurrentInlining;
}
// Passing the OptimizedCompilationInfo's shared zone here as // Passing the OptimizedCompilationInfo's shared zone here as
// JSNativeContextSpecialization allocates out-of-heap objects // JSNativeContextSpecialization allocates out-of-heap objects
// that need to live until code generation. // that need to live until code generation.
...@@ -1405,14 +1396,8 @@ struct InliningPhase { ...@@ -1405,14 +1396,8 @@ struct InliningPhase {
temp_zone, data->info(), data->jsgraph(), temp_zone, data->info(), data->jsgraph(),
data->broker(), data->source_positions()); data->broker(), data->source_positions());
JSIntrinsicLowering::Flags intrinsic_lowering_flags =
JSIntrinsicLowering::kNoFlags;
if (data->info()->is_concurrent_inlining()) {
intrinsic_lowering_flags |= JSIntrinsicLowering::kConcurrentInlining;
}
JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph(), JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph(),
data->broker(), data->broker());
intrinsic_lowering_flags);
AddReducer(data, &graph_reducer, &dead_code_elimination); AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &checkpoint_elimination); AddReducer(data, &graph_reducer, &checkpoint_elimination);
AddReducer(data, &graph_reducer, &common_reducer); AddReducer(data, &graph_reducer, &common_reducer);
...@@ -2388,7 +2373,7 @@ void PipelineImpl::Serialize() { ...@@ -2388,7 +2373,7 @@ void PipelineImpl::Serialize() {
} }
data->broker()->SetTargetNativeContextRef(data->native_context()); data->broker()->SetTargetNativeContextRef(data->native_context());
if (data->info()->is_concurrent_inlining()) { if (data->broker()->is_concurrent_inlining()) {
Run<HeapBrokerInitializationPhase>(); Run<HeapBrokerInitializationPhase>();
Run<SerializationPhase>(); Run<SerializationPhase>();
data->broker()->StopSerializing(); data->broker()->StopSerializing();
...@@ -2428,7 +2413,7 @@ bool PipelineImpl::CreateGraph() { ...@@ -2428,7 +2413,7 @@ bool PipelineImpl::CreateGraph() {
// Run the type-sensitive lowerings and optimizations on the graph. // Run the type-sensitive lowerings and optimizations on the graph.
{ {
if (!data->info()->is_concurrent_inlining()) { if (!data->broker()->is_concurrent_inlining()) {
Run<HeapBrokerInitializationPhase>(); Run<HeapBrokerInitializationPhase>();
Run<CopyMetadataForConcurrentCompilePhase>(); Run<CopyMetadataForConcurrentCompilePhase>();
data->broker()->StopSerializing(); data->broker()->StopSerializing();
...@@ -2825,12 +2810,9 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting( ...@@ -2825,12 +2810,9 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
std::unique_ptr<PipelineStatistics> pipeline_statistics( std::unique_ptr<PipelineStatistics> pipeline_statistics(
CreatePipelineStatistics(Handle<Script>::null(), info, isolate, CreatePipelineStatistics(Handle<Script>::null(), info, isolate,
&zone_stats)); &zone_stats));
if (i::FLAG_concurrent_inlining) {
info->MarkAsConcurrentInlining();
}
PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get(), PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get(),
info->is_concurrent_inlining()); i::FLAG_concurrent_inlining);
PipelineImpl pipeline(&data); PipelineImpl pipeline(&data);
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info)); Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
...@@ -2893,11 +2875,11 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting( ...@@ -2893,11 +2875,11 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
// static // static
std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob( std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob(
Isolate* isolate, Handle<JSFunction> function, bool has_script, Isolate* isolate, Handle<JSFunction> function, bool has_script,
bool is_concurrent_inlining) { BailoutId osr_offset, JavaScriptFrame* osr_frame) {
Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo> shared =
handle(function->shared(), function->GetIsolate()); handle(function->shared(), function->GetIsolate());
return std::make_unique<PipelineCompilationJob>(isolate, shared, function, return std::make_unique<PipelineCompilationJob>(isolate, shared, function,
is_concurrent_inlining); osr_offset, osr_frame);
} }
// static // static
......
...@@ -46,7 +46,8 @@ class Pipeline : public AllStatic { ...@@ -46,7 +46,8 @@ class Pipeline : public AllStatic {
// Returns a new compilation job for the given JavaScript function. // Returns a new compilation job for the given JavaScript function.
static std::unique_ptr<OptimizedCompilationJob> NewCompilationJob( static std::unique_ptr<OptimizedCompilationJob> NewCompilationJob(
Isolate* isolate, Handle<JSFunction> function, bool has_script, Isolate* isolate, Handle<JSFunction> function, bool has_script,
bool is_concurrent_inlining); BailoutId osr_offset = BailoutId::None(),
JavaScriptFrame* osr_frame = nullptr);
// Run the pipeline for the WebAssembly compilation info. // Run the pipeline for the WebAssembly compilation info.
static void GenerateCodeForWasmFunction( static void GenerateCodeForWasmFunction(
......
...@@ -37,8 +37,7 @@ class JSIntrinsicLoweringTest : public GraphTest { ...@@ -37,8 +37,7 @@ class JSIntrinsicLoweringTest : public GraphTest {
&machine); &machine);
// TODO(titzer): mock the GraphReducer here for better unit testing. // TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph(), tick_counter()); GraphReducer graph_reducer(zone(), graph(), tick_counter());
JSIntrinsicLowering reducer(&graph_reducer, &jsgraph, broker(), JSIntrinsicLowering reducer(&graph_reducer, &jsgraph, broker());
JSIntrinsicLowering::kNoFlags);
return reducer.Reduce(node); return reducer.Reduce(node);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment