Commit 86ce461e authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[maglev] Towards concurrent compilation

Add LocalIsolate use, broker()->GetFeedbackForPropertyAccess, and
generating persistent/canonical handles to fix maglev concurrent
compilation.

Bug: v8:7700
Change-Id: Ifd1156c72710047b5f2930837a04709419b23bc3
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3578546
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarJakob Linke <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79918}
parent a3aad69c
...@@ -69,11 +69,17 @@ MaglevCompilationInfo::MaglevCompilationInfo(Isolate* isolate, ...@@ -69,11 +69,17 @@ MaglevCompilationInfo::MaglevCompilationInfo(Isolate* isolate,
zone()->New<compiler::CompilationDependencies>(broker(), zone()); zone()->New<compiler::CompilationDependencies>(broker(), zone());
USE(deps); // The deps register themselves in the heap broker. USE(deps); // The deps register themselves in the heap broker.
// Heap broker initialization may already use IsPendingAllocation.
isolate->heap()->PublishPendingAllocations();
broker()->SetTargetNativeContextRef( broker()->SetTargetNativeContextRef(
handle(function->native_context(), isolate)); handle(function->native_context(), isolate));
broker()->InitializeAndStartSerializing(); broker()->InitializeAndStartSerializing();
broker()->StopSerializing(); broker()->StopSerializing();
// Serialization may have allocated.
isolate->heap()->PublishPendingAllocations();
toplevel_compilation_unit_ = toplevel_compilation_unit_ =
MaglevCompilationUnit::New(zone(), this, function); MaglevCompilationUnit::New(zone(), this, function);
} }
......
...@@ -30,6 +30,7 @@ class MaglevCompilationUnit : public ZoneObject { ...@@ -30,6 +30,7 @@ class MaglevCompilationUnit : public ZoneObject {
MaglevCompilationInfo* info() const { return info_; } MaglevCompilationInfo* info() const { return info_; }
compiler::JSHeapBroker* broker() const; compiler::JSHeapBroker* broker() const;
Isolate* isolate() const; Isolate* isolate() const;
LocalIsolate* local_isolate() const;
Zone* zone() const; Zone* zone() const;
int register_count() const { return register_count_; } int register_count() const { return register_count_; }
int parameter_count() const { return parameter_count_; } int parameter_count() const { return parameter_count_; }
......
...@@ -138,8 +138,9 @@ class UseMarkingProcessor { ...@@ -138,8 +138,9 @@ class UseMarkingProcessor {
}; };
// static // static
void MaglevCompiler::Compile(MaglevCompilationUnit* toplevel_compilation_unit) { void MaglevCompiler::Compile(LocalIsolate* local_isolate,
MaglevCompiler compiler(toplevel_compilation_unit); MaglevCompilationUnit* toplevel_compilation_unit) {
MaglevCompiler compiler(local_isolate, toplevel_compilation_unit);
compiler.Compile(); compiler.Compile();
} }
...@@ -153,7 +154,7 @@ void MaglevCompiler::Compile() { ...@@ -153,7 +154,7 @@ void MaglevCompiler::Compile() {
new MaglevGraphLabeller()); new MaglevGraphLabeller());
} }
MaglevGraphBuilder graph_builder(toplevel_compilation_unit_); MaglevGraphBuilder graph_builder(local_isolate(), toplevel_compilation_unit_);
graph_builder.Build(); graph_builder.Build();
......
...@@ -24,7 +24,8 @@ class Graph; ...@@ -24,7 +24,8 @@ class Graph;
class MaglevCompiler { class MaglevCompiler {
public: public:
// May be called from any thread. // May be called from any thread.
static void Compile(MaglevCompilationUnit* toplevel_compilation_unit); static void Compile(LocalIsolate* local_isolate,
MaglevCompilationUnit* toplevel_compilation_unit);
// Called on the main thread after Compile has completed. // Called on the main thread after Compile has completed.
// TODO(v8:7700): Move this to a different class? // TODO(v8:7700): Move this to a different class?
...@@ -32,8 +33,10 @@ class MaglevCompiler { ...@@ -32,8 +33,10 @@ class MaglevCompiler {
MaglevCompilationUnit* toplevel_compilation_unit); MaglevCompilationUnit* toplevel_compilation_unit);
private: private:
explicit MaglevCompiler(MaglevCompilationUnit* toplevel_compilation_unit) explicit MaglevCompiler(LocalIsolate* local_isolate,
: toplevel_compilation_unit_(toplevel_compilation_unit) {} MaglevCompilationUnit* toplevel_compilation_unit)
: local_isolate_(local_isolate),
toplevel_compilation_unit_(toplevel_compilation_unit) {}
void Compile(); void Compile();
...@@ -41,8 +44,9 @@ class MaglevCompiler { ...@@ -41,8 +44,9 @@ class MaglevCompiler {
return toplevel_compilation_unit_->broker(); return toplevel_compilation_unit_->broker();
} }
Zone* zone() { return toplevel_compilation_unit_->zone(); } Zone* zone() { return toplevel_compilation_unit_->zone(); }
Isolate* isolate() { return toplevel_compilation_unit_->isolate(); } LocalIsolate* local_isolate() { return local_isolate_; }
LocalIsolate* const local_isolate_;
MaglevCompilationUnit* const toplevel_compilation_unit_; MaglevCompilationUnit* const toplevel_compilation_unit_;
}; };
......
...@@ -101,7 +101,8 @@ CompilationJob::Status MaglevCompilationJob::PrepareJobImpl(Isolate* isolate) { ...@@ -101,7 +101,8 @@ CompilationJob::Status MaglevCompilationJob::PrepareJobImpl(Isolate* isolate) {
CompilationJob::Status MaglevCompilationJob::ExecuteJobImpl( CompilationJob::Status MaglevCompilationJob::ExecuteJobImpl(
RuntimeCallStats* stats, LocalIsolate* local_isolate) { RuntimeCallStats* stats, LocalIsolate* local_isolate) {
LocalIsolateScope scope{info(), local_isolate}; LocalIsolateScope scope{info(), local_isolate};
maglev::MaglevCompiler::Compile(info()->toplevel_compilation_unit()); maglev::MaglevCompiler::Compile(local_isolate,
info()->toplevel_compilation_unit());
// TODO(v8:7700): Actual return codes. // TODO(v8:7700): Actual return codes.
return CompilationJob::SUCCEEDED; return CompilationJob::SUCCEEDED;
} }
......
...@@ -21,8 +21,22 @@ namespace internal { ...@@ -21,8 +21,22 @@ namespace internal {
namespace maglev { namespace maglev {
MaglevGraphBuilder::MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit) namespace {
: compilation_unit_(compilation_unit),
int LoadSimpleFieldHandler(FieldIndex field_index) {
int config = LoadHandler::KindBits::encode(LoadHandler::Kind::kField) |
LoadHandler::IsInobjectBits::encode(field_index.is_inobject()) |
LoadHandler::IsDoubleBits::encode(field_index.is_double()) |
LoadHandler::FieldIndexBits::encode(field_index.index());
return config;
}
} // namespace
MaglevGraphBuilder::MaglevGraphBuilder(LocalIsolate* local_isolate,
MaglevCompilationUnit* compilation_unit)
: local_isolate_(local_isolate),
compilation_unit_(compilation_unit),
iterator_(bytecode().object()), iterator_(bytecode().object()),
jump_targets_(zone()->NewArray<BasicBlockRef>(bytecode().length())), jump_targets_(zone()->NewArray<BasicBlockRef>(bytecode().length())),
// Overallocate merge_states_ by one to allow always looking up the // Overallocate merge_states_ by one to allow always looking up the
...@@ -160,7 +174,7 @@ void MaglevGraphBuilder::VisitUnaryOperation() { ...@@ -160,7 +174,7 @@ void MaglevGraphBuilder::VisitUnaryOperation() {
template <Operation kOperation> template <Operation kOperation>
void MaglevGraphBuilder::VisitBinaryOperation() { void MaglevGraphBuilder::VisitBinaryOperation() {
FeedbackNexus nexus = feedback_nexus(1); FeedbackNexus nexus = FeedbackNexusForOperand(1);
if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) { if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) {
if (nexus.kind() == FeedbackSlotKind::kBinaryOp) { if (nexus.kind() == FeedbackSlotKind::kBinaryOp) {
...@@ -185,7 +199,7 @@ void MaglevGraphBuilder::VisitBinaryOperation() { ...@@ -185,7 +199,7 @@ void MaglevGraphBuilder::VisitBinaryOperation() {
template <Operation kOperation> template <Operation kOperation>
void MaglevGraphBuilder::VisitBinarySmiOperation() { void MaglevGraphBuilder::VisitBinarySmiOperation() {
FeedbackNexus nexus = feedback_nexus(1); FeedbackNexus nexus = FeedbackNexusForOperand(1);
if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) { if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) {
if (nexus.kind() == FeedbackSlotKind::kBinaryOp) { if (nexus.kind() == FeedbackSlotKind::kBinaryOp) {
...@@ -253,11 +267,9 @@ void MaglevGraphBuilder::VisitLdaCurrentContextSlot() { ...@@ -253,11 +267,9 @@ void MaglevGraphBuilder::VisitLdaCurrentContextSlot() {
// TODO(leszeks): Passing a LoadHandler to LoadField here is a bit of // TODO(leszeks): Passing a LoadHandler to LoadField here is a bit of
// a hack, maybe we should have a LoadRawOffset or similar. // a hack, maybe we should have a LoadRawOffset or similar.
SetAccumulator(AddNewNode<LoadField>( SetAccumulator(AddNewNode<LoadField>(
{context}, LoadHandler::LoadField( {context},
isolate(), FieldIndex::ForInObjectOffset( LoadSimpleFieldHandler(FieldIndex::ForInObjectOffset(
Context::OffsetOfElementAt(slot_index), Context::OffsetOfElementAt(slot_index), FieldIndex::kTagged))));
FieldIndex::kTagged))
->value()));
} }
void MaglevGraphBuilder::VisitLdaImmutableCurrentContextSlot() { void MaglevGraphBuilder::VisitLdaImmutableCurrentContextSlot() {
// TODO(leszeks): Consider context specialising. // TODO(leszeks): Consider context specialising.
...@@ -333,10 +345,8 @@ void MaglevGraphBuilder::BuildPropertyCellAccess( ...@@ -333,10 +345,8 @@ void MaglevGraphBuilder::BuildPropertyCellAccess(
// a hack, maybe we should have a LoadRawOffset or similar. // a hack, maybe we should have a LoadRawOffset or similar.
SetAccumulator(AddNewNode<LoadField>( SetAccumulator(AddNewNode<LoadField>(
{property_cell_node}, {property_cell_node},
LoadHandler::LoadField( LoadSimpleFieldHandler(FieldIndex::ForInObjectOffset(
isolate(), FieldIndex::ForInObjectOffset(PropertyCell::kValueOffset, PropertyCell::kValueOffset, FieldIndex::kTagged))));
FieldIndex::kTagged))
->value()));
} }
void MaglevGraphBuilder::VisitLdaGlobal() { void MaglevGraphBuilder::VisitLdaGlobal() {
...@@ -381,35 +391,50 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(StaLookupSlot) ...@@ -381,35 +391,50 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(StaLookupSlot)
void MaglevGraphBuilder::VisitGetNamedProperty() { void MaglevGraphBuilder::VisitGetNamedProperty() {
// GetNamedProperty <object> <name_index> <slot> // GetNamedProperty <object> <name_index> <slot>
ValueNode* object = LoadRegister(0); ValueNode* object = LoadRegister(0);
FeedbackSlot slot_index = GetSlotOperand(2); compiler::NameRef name = GetRefOperand<Name>(1);
FeedbackNexus nexus(feedback().object(), slot_index); FeedbackSlot slot = GetSlotOperand(2);
compiler::FeedbackSource feedback_source{feedback(), slot};
if (nexus.ic_state() == InlineCacheState::UNINITIALIZED) {
AddNewNode<EagerDeopt>({}); const compiler::ProcessedFeedback& processed_feedback =
return; broker()->GetFeedbackForPropertyAccess(feedback_source,
} else if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) { compiler::AccessMode::kLoad, name);
std::vector<MapAndHandler> maps_and_handlers;
nexus.ExtractMapsAndHandlers(&maps_and_handlers); switch (processed_feedback.kind()) {
DCHECK_EQ(maps_and_handlers.size(), 1); case compiler::ProcessedFeedback::kInsufficient:
MapAndHandler& map_and_handler = maps_and_handlers[0]; AddNewNode<EagerDeopt>({});
if (map_and_handler.second->IsSmi()) { return;
int handler = map_and_handler.second->ToSmi().value();
LoadHandler::Kind kind = LoadHandler::KindBits::decode(handler); case compiler::ProcessedFeedback::kNamedAccess: {
if (kind == LoadHandler::Kind::kField && const compiler::NamedAccessFeedback& named_feedback =
!LoadHandler::IsWasmStructBits::decode(handler)) { processed_feedback.AsNamedAccess();
AddNewNode<CheckMaps>({object}, if (named_feedback.maps().size() == 1) {
MakeRef(broker(), map_and_handler.first)); // Monomorphic load, check the handler.
SetAccumulator(AddNewNode<LoadField>({object}, handler)); // TODO(leszeks): Make GetFeedbackForPropertyAccess read the handler.
return; MaybeObjectHandle handler =
FeedbackNexusForSlot(slot).FindHandlerForMap(
named_feedback.maps()[0].object());
if (!handler.is_null() && handler->IsSmi()) {
// Smi handler, emit a map check and LoadField.
int smi_handler = handler->ToSmi().value();
LoadHandler::Kind kind = LoadHandler::KindBits::decode(smi_handler);
if (kind == LoadHandler::Kind::kField &&
!LoadHandler::IsWasmStructBits::decode(smi_handler)) {
AddNewNode<CheckMaps>({object}, named_feedback.maps()[0]);
SetAccumulator(AddNewNode<LoadField>({object}, smi_handler));
return;
}
}
} }
} } break;
default:
break;
} }
// Create a generic load in the fallthrough.
ValueNode* context = GetContext(); ValueNode* context = GetContext();
compiler::NameRef name = GetRefOperand<Name>(1); SetAccumulator(
SetAccumulator(AddNewNode<LoadNamedGeneric>( AddNewNode<LoadNamedGeneric>({context, object}, name, feedback_source));
{context, object}, name,
compiler::FeedbackSource{feedback(), slot_index}));
} }
MAGLEV_UNIMPLEMENTED_BYTECODE(GetNamedPropertyFromSuper) MAGLEV_UNIMPLEMENTED_BYTECODE(GetNamedPropertyFromSuper)
...@@ -420,27 +445,43 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(StaModuleVariable) ...@@ -420,27 +445,43 @@ MAGLEV_UNIMPLEMENTED_BYTECODE(StaModuleVariable)
void MaglevGraphBuilder::VisitSetNamedProperty() { void MaglevGraphBuilder::VisitSetNamedProperty() {
// SetNamedProperty <object> <name_index> <slot> // SetNamedProperty <object> <name_index> <slot>
ValueNode* object = LoadRegister(0); ValueNode* object = LoadRegister(0);
FeedbackNexus nexus = feedback_nexus(2); compiler::NameRef name = GetRefOperand<Name>(1);
FeedbackSlot slot = GetSlotOperand(2);
if (nexus.ic_state() == InlineCacheState::UNINITIALIZED) { compiler::FeedbackSource feedback_source{feedback(), slot};
AddNewNode<EagerDeopt>({});
return; const compiler::ProcessedFeedback& processed_feedback =
} else if (nexus.ic_state() == InlineCacheState::MONOMORPHIC) { broker()->GetFeedbackForPropertyAccess(
std::vector<MapAndHandler> maps_and_handlers; feedback_source, compiler::AccessMode::kStore, name);
nexus.ExtractMapsAndHandlers(&maps_and_handlers);
DCHECK_EQ(maps_and_handlers.size(), 1); switch (processed_feedback.kind()) {
MapAndHandler& map_and_handler = maps_and_handlers[0]; case compiler::ProcessedFeedback::kInsufficient:
if (map_and_handler.second->IsSmi()) { AddNewNode<EagerDeopt>({});
int handler = map_and_handler.second->ToSmi().value(); return;
StoreHandler::Kind kind = StoreHandler::KindBits::decode(handler);
if (kind == StoreHandler::Kind::kField) { case compiler::ProcessedFeedback::kNamedAccess: {
AddNewNode<CheckMaps>({object}, const compiler::NamedAccessFeedback& named_feedback =
MakeRef(broker(), map_and_handler.first)); processed_feedback.AsNamedAccess();
ValueNode* value = GetAccumulator(); if (named_feedback.maps().size() == 1) {
AddNewNode<StoreField>({object, value}, handler); // Monomorphic store, check the handler.
return; // TODO(leszeks): Make GetFeedbackForPropertyAccess read the handler.
MaybeObjectHandle handler =
FeedbackNexusForSlot(slot).FindHandlerForMap(
named_feedback.maps()[0].object());
if (!handler.is_null() && handler->IsSmi()) {
int smi_handler = handler->ToSmi().value();
StoreHandler::Kind kind = StoreHandler::KindBits::decode(smi_handler);
if (kind == StoreHandler::Kind::kField) {
AddNewNode<CheckMaps>({object}, named_feedback.maps()[0]);
ValueNode* value = GetAccumulator();
AddNewNode<StoreField>({object, value}, smi_handler);
return;
}
}
} }
} } break;
default:
break;
} }
// TODO(victorgomes): Generic store. // TODO(victorgomes): Generic store.
......
...@@ -25,7 +25,8 @@ namespace maglev { ...@@ -25,7 +25,8 @@ namespace maglev {
class MaglevGraphBuilder { class MaglevGraphBuilder {
public: public:
explicit MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit); explicit MaglevGraphBuilder(LocalIsolate* local_isolate,
MaglevCompilationUnit* compilation_unit);
void Build() { void Build() {
for (iterator_.Reset(); !iterator_.done(); iterator_.Advance()) { for (iterator_.Reset(); !iterator_.done(); iterator_.Advance()) {
...@@ -182,9 +183,12 @@ class MaglevGraphBuilder { ...@@ -182,9 +183,12 @@ class MaglevGraphBuilder {
template <class T, typename = std::enable_if_t< template <class T, typename = std::enable_if_t<
std::is_convertible<T*, Object*>::value>> std::is_convertible<T*, Object*>::value>>
typename compiler::ref_traits<T>::ref_type GetRefOperand(int operand_index) { typename compiler::ref_traits<T>::ref_type GetRefOperand(int operand_index) {
return MakeRef(broker(), // The BytecodeArray itself was fetched by using a barrier so all reads
Handle<T>::cast(iterator_.GetConstantForIndexOperand( // from the constant pool are safe.
operand_index, isolate()))); return MakeRefAssumeMemoryFence(
broker(), broker()->CanonicalPersistentHandle(
Handle<T>::cast(iterator_.GetConstantForIndexOperand(
operand_index, local_isolate()))));
} }
ValueNode* GetConstant(const compiler::ObjectRef& ref) { ValueNode* GetConstant(const compiler::ObjectRef& ref) {
...@@ -394,10 +398,14 @@ class MaglevGraphBuilder { ...@@ -394,10 +398,14 @@ class MaglevGraphBuilder {
const compiler::FeedbackVectorRef& feedback() const { const compiler::FeedbackVectorRef& feedback() const {
return compilation_unit_->feedback(); return compilation_unit_->feedback();
} }
const FeedbackNexus feedback_nexus(int slot_operand_index) const { const FeedbackNexus FeedbackNexusForOperand(int slot_operand_index) const {
// TODO(leszeks): Use JSHeapBroker here.
return FeedbackNexus(feedback().object(), return FeedbackNexus(feedback().object(),
GetSlotOperand(slot_operand_index)); GetSlotOperand(slot_operand_index),
broker()->feedback_nexus_config());
}
const FeedbackNexus FeedbackNexusForSlot(FeedbackSlot slot) const {
return FeedbackNexus(feedback().object(), slot,
broker()->feedback_nexus_config());
} }
const compiler::BytecodeArrayRef& bytecode() const { const compiler::BytecodeArrayRef& bytecode() const {
return compilation_unit_->bytecode(); return compilation_unit_->bytecode();
...@@ -405,7 +413,7 @@ class MaglevGraphBuilder { ...@@ -405,7 +413,7 @@ class MaglevGraphBuilder {
const compiler::BytecodeAnalysis& bytecode_analysis() const { const compiler::BytecodeAnalysis& bytecode_analysis() const {
return compilation_unit_->bytecode_analysis(); return compilation_unit_->bytecode_analysis();
} }
Isolate* isolate() const { return compilation_unit_->isolate(); } LocalIsolate* local_isolate() const { return local_isolate_; }
Zone* zone() const { return compilation_unit_->zone(); } Zone* zone() const { return compilation_unit_->zone(); }
int parameter_count() const { return compilation_unit_->parameter_count(); } int parameter_count() const { return compilation_unit_->parameter_count(); }
int register_count() const { return compilation_unit_->register_count(); } int register_count() const { return compilation_unit_->register_count(); }
...@@ -416,6 +424,7 @@ class MaglevGraphBuilder { ...@@ -416,6 +424,7 @@ class MaglevGraphBuilder {
return compilation_unit_->graph_labeller(); return compilation_unit_->graph_labeller();
} }
LocalIsolate* const local_isolate_;
MaglevCompilationUnit* const compilation_unit_; MaglevCompilationUnit* const compilation_unit_;
interpreter::BytecodeArrayIterator iterator_; interpreter::BytecodeArrayIterator iterator_;
uint32_t* predecessors_; uint32_t* predecessors_;
......
...@@ -482,7 +482,7 @@ void CheckMaps::GenerateCode(MaglevCodeGenState* code_gen_state, ...@@ -482,7 +482,7 @@ void CheckMaps::GenerateCode(MaglevCodeGenState* code_gen_state,
__ Cmp(map_tmp, map().object()); __ Cmp(map_tmp, map().object());
// TODO(leszeks): Encode as a bit on CheckMaps. // TODO(leszeks): Encode as a bit on CheckMaps.
if (map().object()->is_migration_target()) { if (map().is_migration_target()) {
JumpToDeferredIf( JumpToDeferredIf(
not_equal, code_gen_state, not_equal, code_gen_state,
[](MaglevCodeGenState* code_gen_state, Label* return_label, [](MaglevCodeGenState* code_gen_state, Label* return_label,
......
...@@ -19,7 +19,7 @@ MaybeHandle<CodeT> Maglev::Compile(Isolate* isolate, ...@@ -19,7 +19,7 @@ MaybeHandle<CodeT> Maglev::Compile(Isolate* isolate,
// TODO(v8:7700): Support exceptions in maglev. We currently bail if exception // TODO(v8:7700): Support exceptions in maglev. We currently bail if exception
// handler table is non-empty. // handler table is non-empty.
if (unit->bytecode().handler_table_size() > 0) return {}; if (unit->bytecode().handler_table_size() > 0) return {};
maglev::MaglevCompiler::Compile(unit); maglev::MaglevCompiler::Compile(isolate->main_thread_local_isolate(), unit);
return maglev::MaglevCompiler::GenerateCode(unit); return maglev::MaglevCompiler::GenerateCode(unit);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment