Commit 697a1aa3 authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

[maglev] Towards concurrent Maglev compilation

This CL implements most parts of the concurrent maglev pipeline.

- MaglevConcurrentDispatcher: controls concurrent jobs.
- MaglevCompilationInfo: holds job-global data, controls handle
  fiddling between the main isolate and local isolates, owns
  job-global state like the Zone.
- MaglevCompilationUnit: same as before, holds per-unit data.

Still missing: job finalization.

Bug: v8:7700
Change-Id: I281178d945e79a0ba97fa2ac7023285d84a16641
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3516036Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79489}
parent 14e05cbc
......@@ -3472,7 +3472,8 @@ v8_header_set("v8_internal_headers") {
"src/maglev/maglev-basic-block.h",
"src/maglev/maglev-code-gen-state.h",
"src/maglev/maglev-code-generator.h",
"src/maglev/maglev-compilation-data.h",
"src/maglev/maglev-compilation-info.h",
"src/maglev/maglev-compilation-unit.h",
"src/maglev/maglev-compiler.h",
"src/maglev/maglev-concurrent-dispatcher.h",
"src/maglev/maglev-graph-builder.h",
......@@ -4509,7 +4510,8 @@ v8_source_set("v8_base_without_compiler") {
if (v8_enable_maglev) {
sources += [
"src/maglev/maglev-code-generator.cc",
"src/maglev/maglev-compilation-data.cc",
"src/maglev/maglev-compilation-info.cc",
"src/maglev/maglev-compilation-unit.cc",
"src/maglev/maglev-compiler.cc",
"src/maglev/maglev-concurrent-dispatcher.cc",
"src/maglev/maglev-graph-builder.cc",
......
......@@ -48,6 +48,7 @@
#include "src/logging/counters-scopes.h"
#include "src/logging/log-inl.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/maglev/maglev-concurrent-dispatcher.h"
#include "src/objects/feedback-cell-inl.h"
#include "src/objects/js-function-inl.h"
#include "src/objects/map.h"
......@@ -910,7 +911,7 @@ void InsertCodeIntoOptimizedCodeCache(
bool PrepareJobWithHandleScope(OptimizedCompilationJob* job, Isolate* isolate,
OptimizedCompilationInfo* compilation_info) {
CompilationHandleScope compilation(isolate, compilation_info);
CanonicalHandleScope canonical(isolate, compilation_info);
CanonicalHandleScopeForTurbofan canonical(isolate, compilation_info);
compilation_info->ReopenHandlesInNewHandleScope(isolate);
return job->PrepareJob(isolate) == CompilationJob::SUCCEEDED;
}
......@@ -1085,8 +1086,9 @@ MaybeHandle<CodeT> CompileMaglev(
Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode,
BytecodeOffset osr_offset, JavaScriptFrame* osr_frame,
GetOptimizedCodeResultHandling result_handling) {
#ifdef V8_ENABLE_MAGLEV
DCHECK(FLAG_maglev);
// TODO(v8:7700): Add missing support.
CHECK(mode == ConcurrencyMode::kNotConcurrent);
CHECK(osr_offset.IsNone());
CHECK(osr_frame == nullptr);
CHECK(result_handling == GetOptimizedCodeResultHandling::kDefault);
......@@ -1096,11 +1098,35 @@ MaybeHandle<CodeT> CompileMaglev(
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
#ifdef V8_ENABLE_MAGLEV
return Maglev::Compile(isolate, function);
#else
return {};
#endif
if (mode == ConcurrencyMode::kNotConcurrent) {
function->ClearOptimizationMarker();
return Maglev::Compile(isolate, function);
}
DCHECK_EQ(mode, ConcurrencyMode::kConcurrent);
// TODO(v8:7700): See everything in GetOptimizedCodeLater.
// - Tracing,
// - timers,
// - aborts on memory pressure,
// ...
// Prepare the job.
auto job = maglev::MaglevCompilationJob::New(isolate, function);
CompilationJob::Status status = job->PrepareJob(isolate);
CHECK_EQ(status, CompilationJob::SUCCEEDED); // TODO(v8:7700): Use status.
// Enqueue it.
isolate->maglev_concurrent_dispatcher()->EnqueueJob(std::move(job));
// Remember that the function is currently being processed.
function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
// The code that triggered optimization continues execution here.
return ContinuationForConcurrentOptimization(isolate, function);
#else // V8_ENABLE_MAGLEV
UNREACHABLE();
#endif // V8_ENABLE_MAGLEV
}
MaybeHandle<CodeT> GetOptimizedCode(
......
......@@ -32,6 +32,11 @@
namespace v8 {
namespace internal {
namespace maglev {
class MaglevCompilationInfo;
}
namespace compiler {
class ObjectRef;
......@@ -150,6 +155,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
// them.
void DetachLocalIsolate(OptimizedCompilationInfo* info);
// TODO(v8:7700): Refactor this once the broker is no longer
// Turbofan-specific.
void AttachLocalIsolateForMaglev(maglev::MaglevCompilationInfo* info,
LocalIsolate* local_isolate);
void DetachLocalIsolateForMaglev(maglev::MaglevCompilationInfo* info);
bool StackHasOverflowed() const;
#ifdef DEBUG
......
......@@ -3244,7 +3244,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
{
CompilationHandleScope compilation_scope(isolate, info);
CanonicalHandleScope canonical(isolate, info);
CanonicalHandleScopeForTurbofan canonical(isolate, info);
info->ReopenHandlesInNewHandleScope(isolate);
pipeline.InitializeHeapBroker();
}
......
......@@ -10,6 +10,7 @@
#include "src/execution/isolate.h"
#include "src/execution/thread-id.h"
#include "src/handles/maybe-handles.h"
#include "src/maglev/maglev-concurrent-dispatcher.h"
#include "src/objects/objects-inl.h"
#include "src/roots/roots-inl.h"
#include "src/utils/address-map.h"
......@@ -149,11 +150,9 @@ Address HandleScope::current_limit_address(Isolate* isolate) {
return reinterpret_cast<Address>(&isolate->handle_scope_data()->limit);
}
CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate,
OptimizedCompilationInfo* info)
: isolate_(isolate),
info_(info),
zone_(info ? info->zone() : new Zone(isolate->allocator(), ZONE_NAME)) {
CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate, Zone* zone)
: zone_(zone == nullptr ? new Zone(isolate->allocator(), ZONE_NAME) : zone),
isolate_(isolate) {
HandleScopeData* handle_scope_data = isolate_->handle_scope_data();
prev_canonical_scope_ = handle_scope_data->canonical_scope;
handle_scope_data->canonical_scope = this;
......@@ -165,18 +164,12 @@ CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate,
CanonicalHandleScope::~CanonicalHandleScope() {
delete root_index_map_;
if (info_) {
// If we passed a compilation info as parameter, we created the identity map
// on its zone(). Then, we pass it to the compilation info which is
// responsible for the disposal.
info_->set_canonical_handles(DetachCanonicalHandles());
} else {
// If we don't have a compilation info, we created the zone manually. To
// properly dispose of said zone, we need to first free the identity_map_.
// Then we do so manually even though identity_map_ is a unique_ptr.
identity_map_.reset();
delete zone_;
}
// Note: both the identity_map_ (zone-allocated) and the zone_ itself may
// have custom ownership semantics, controlled by subclasses. For example, in
// case of external ownership, the subclass destructor may 'steal' both by
// resetting the identity map pointer and nulling the zone.
identity_map_.reset();
delete zone_;
isolate_->handle_scope_data()->canonical_scope = prev_canonical_scope_;
}
......@@ -206,5 +199,26 @@ CanonicalHandleScope::DetachCanonicalHandles() {
return std::move(identity_map_);
}
template <class CompilationInfoT>
CanonicalHandleScopeForOptimization<CompilationInfoT>::
CanonicalHandleScopeForOptimization(Isolate* isolate,
CompilationInfoT* info)
: CanonicalHandleScope(isolate, info->zone()), info_(info) {}
template <class CompilationInfoT>
CanonicalHandleScopeForOptimization<
CompilationInfoT>::~CanonicalHandleScopeForOptimization() {
// We created the identity map on the compilation info's zone(). Pass
// ownership to the compilation info which is responsible for the disposal.
info_->set_canonical_handles(DetachCanonicalHandles());
zone_ = nullptr; // We don't own the zone, null it.
}
template class CanonicalHandleScopeForOptimization<OptimizedCompilationInfo>;
#ifdef V8_ENABLE_MAGLEV
template class CanonicalHandleScopeForOptimization<
maglev::ExportedMaglevCompilationInfo>;
#endif // V8_ENABLE_MAGLEV
} // namespace internal
} // namespace v8
......@@ -278,6 +278,10 @@ class IdentityMap;
class RootIndexMap;
class OptimizedCompilationInfo;
namespace maglev {
class ExportedMaglevCompilationInfo;
} // namespace maglev
using CanonicalHandlesMap = IdentityMap<Address*, ZoneAllocationPolicy>;
// A CanonicalHandleScope does not open a new HandleScope. It changes the
......@@ -285,27 +289,23 @@ using CanonicalHandlesMap = IdentityMap<Address*, ZoneAllocationPolicy>;
// This does not apply to nested inner HandleScopes unless a nested
// CanonicalHandleScope is introduced. Handles are only canonicalized within
// the same CanonicalHandleScope, but not across nested ones.
class V8_EXPORT_PRIVATE V8_NODISCARD CanonicalHandleScope final {
class V8_EXPORT_PRIVATE V8_NODISCARD CanonicalHandleScope {
public:
// If we passed a compilation info as parameter, we created the
// CanonicalHandlesMap on said compilation info's zone(). If so, in the
// CanonicalHandleScope destructor we hand off the canonical handle map to the
// compilation info. The compilation info is responsible for the disposal. If
// we don't have a compilation info, we create a zone in this constructor. To
// properly dispose of said zone, we need to first free the identity_map_
// If no Zone is passed to this constructor, we create (and own) a new zone.
// To properly dispose of said zone, we need to first free the identity_map_
// which is done manually even though identity_map_ is a unique_ptr.
explicit CanonicalHandleScope(Isolate* isolate,
OptimizedCompilationInfo* info = nullptr);
explicit CanonicalHandleScope(Isolate* isolate, Zone* zone = nullptr);
~CanonicalHandleScope();
protected:
std::unique_ptr<CanonicalHandlesMap> DetachCanonicalHandles();
Zone* zone_; // *Not* const, may be mutated by subclasses.
private:
Address* Lookup(Address object);
std::unique_ptr<CanonicalHandlesMap> DetachCanonicalHandles();
Isolate* isolate_;
OptimizedCompilationInfo* info_;
Zone* zone_;
Isolate* const isolate_;
RootIndexMap* root_index_map_;
std::unique_ptr<CanonicalHandlesMap> identity_map_;
// Ordinary nested handle scopes within the current one are not canonical.
......@@ -316,6 +316,27 @@ class V8_EXPORT_PRIVATE V8_NODISCARD CanonicalHandleScope final {
friend class HandleScope;
};
template <class CompilationInfoT>
class V8_EXPORT_PRIVATE V8_NODISCARD CanonicalHandleScopeForOptimization final
: public CanonicalHandleScope {
public:
// We created the
// CanonicalHandlesMap on the compilation info's zone(). In the
// CanonicalHandleScope destructor we hand off the canonical handle map to the
// compilation info. The compilation info is responsible for the disposal.
explicit CanonicalHandleScopeForOptimization(Isolate* isolate,
CompilationInfoT* info);
~CanonicalHandleScopeForOptimization();
private:
CompilationInfoT* const info_;
};
using CanonicalHandleScopeForTurbofan =
CanonicalHandleScopeForOptimization<OptimizedCompilationInfo>;
using CanonicalHandleScopeForMaglev =
CanonicalHandleScopeForOptimization<maglev::ExportedMaglevCompilationInfo>;
// Seal off the current HandleScope so that new handles can only be created
// if a new HandleScope is entered.
class V8_NODISCARD SealHandleScope final {
......
......@@ -12,7 +12,7 @@
#include "src/common/globals.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/js-heap-broker.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-ir.h"
namespace v8 {
......@@ -56,11 +56,11 @@ class MaglevCodeGenState {
int parameter_count() const { return compilation_unit_->parameter_count(); }
int register_count() const { return compilation_unit_->register_count(); }
const compiler::BytecodeAnalysis& bytecode_analysis() const {
return compilation_unit_->bytecode_analysis;
return compilation_unit_->bytecode_analysis();
}
compiler::JSHeapBroker* broker() const { return compilation_unit_->broker(); }
const compiler::BytecodeArrayRef& bytecode() const {
return compilation_unit_->bytecode;
return compilation_unit_->bytecode();
}
MaglevGraphLabeller* graph_labeller() const {
return compilation_unit_->graph_labeller();
......
......@@ -8,7 +8,7 @@
#include "src/codegen/register.h"
#include "src/codegen/safepoint-table.h"
#include "src/maglev/maglev-code-gen-state.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-printer.h"
#include "src/maglev/maglev-graph-processor.h"
......
......@@ -12,7 +12,7 @@ namespace internal {
namespace maglev {
class Graph;
struct MaglevCompilationUnit;
class MaglevCompilationUnit;
class MaglevCodeGenerator : public AllStatic {
public:
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-compilation-info.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-heap-broker.h"
#include "src/execution/isolate.h"
#include "src/flags/flags.h"
#include "src/handles/persistent-handles.h"
#include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-compiler.h"
#include "src/maglev/maglev-concurrent-dispatcher.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/objects/js-function-inl.h"
#include "src/utils/identity-map.h"
#include "src/utils/locked-queue-inl.h"
namespace v8 {
namespace internal {
namespace maglev {
namespace {
constexpr char kMaglevZoneName[] = "maglev-compilation-job-zone";
class V8_NODISCARD MaglevCompilationHandleScope final {
public:
MaglevCompilationHandleScope(Isolate* isolate,
maglev::MaglevCompilationInfo* info)
: info_(info),
persistent_(isolate),
exported_info_(info),
canonical_(isolate, &exported_info_) {
info->ReopenHandlesInNewHandleScope(isolate);
}
~MaglevCompilationHandleScope() {
info_->set_persistent_handles(persistent_.Detach());
}
private:
maglev::MaglevCompilationInfo* const info_;
PersistentHandlesScope persistent_;
ExportedMaglevCompilationInfo exported_info_;
CanonicalHandleScopeForMaglev canonical_;
};
} // namespace
MaglevCompilationInfo::MaglevCompilationInfo(Isolate* isolate,
Handle<JSFunction> function)
: zone_(isolate->allocator(), kMaglevZoneName),
isolate_(isolate),
broker_(new compiler::JSHeapBroker(
isolate, zone(), FLAG_trace_heap_broker, CodeKind::MAGLEV)),
shared_(function->shared(), isolate),
function_(function)
#define V(Name) , Name##_(FLAG_##Name)
MAGLEV_COMPILATION_FLAG_LIST(V)
#undef V
{
DCHECK(FLAG_maglev);
MaglevCompilationHandleScope compilation(isolate, this);
compiler::CompilationDependencies* deps =
zone()->New<compiler::CompilationDependencies>(broker(), zone());
USE(deps); // The deps register themselves in the heap broker.
broker()->SetTargetNativeContextRef(
handle(function->native_context(), isolate));
broker()->InitializeAndStartSerializing();
broker()->StopSerializing();
toplevel_compilation_unit_ =
MaglevCompilationUnit::New(zone(), this, function);
}
MaglevCompilationInfo::~MaglevCompilationInfo() = default;
void MaglevCompilationInfo::set_graph_labeller(
MaglevGraphLabeller* graph_labeller) {
graph_labeller_.reset(graph_labeller);
}
void MaglevCompilationInfo::ReopenHandlesInNewHandleScope(Isolate* isolate) {
DCHECK(!shared_.is_null());
shared_ = handle(*shared_, isolate);
DCHECK(!function_.is_null());
function_ = handle(*function_, isolate);
}
void MaglevCompilationInfo::set_persistent_handles(
std::unique_ptr<PersistentHandles>&& persistent_handles) {
DCHECK_NULL(ph_);
ph_ = std::move(persistent_handles);
DCHECK_NOT_NULL(ph_);
}
std::unique_ptr<PersistentHandles>
MaglevCompilationInfo::DetachPersistentHandles() {
DCHECK_NOT_NULL(ph_);
return std::move(ph_);
}
void MaglevCompilationInfo::set_canonical_handles(
std::unique_ptr<CanonicalHandlesMap>&& canonical_handles) {
DCHECK_NULL(canonical_handles_);
canonical_handles_ = std::move(canonical_handles);
DCHECK_NOT_NULL(canonical_handles_);
}
std::unique_ptr<CanonicalHandlesMap>
MaglevCompilationInfo::DetachCanonicalHandles() {
DCHECK_NOT_NULL(canonical_handles_);
return std::move(canonical_handles_);
}
} // namespace maglev
} // namespace internal
} // namespace v8
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_COMPILATION_INFO_H_
#define V8_MAGLEV_MAGLEV_COMPILATION_INFO_H_
#include <memory>
#include "src/handles/handles.h"
#include "src/handles/maybe-handles.h"
namespace v8 {
namespace internal {
class Isolate;
class PersistentHandles;
class SharedFunctionInfo;
class Zone;
namespace compiler {
class JSHeapBroker;
}
namespace maglev {
class Graph;
class MaglevCompilationUnit;
class MaglevGraphLabeller;
#define MAGLEV_COMPILATION_FLAG_LIST(V) \
V(code_comments) \
V(maglev) \
V(print_maglev_code) \
V(print_maglev_graph) \
V(trace_maglev_regalloc)
class MaglevCompilationInfo final {
public:
static std::unique_ptr<MaglevCompilationInfo> New(
Isolate* isolate, Handle<JSFunction> function) {
// Doesn't use make_unique due to the private ctor.
return std::unique_ptr<MaglevCompilationInfo>(
new MaglevCompilationInfo(isolate, function));
}
~MaglevCompilationInfo();
Isolate* isolate() const { return isolate_; }
Zone* zone() { return &zone_; }
compiler::JSHeapBroker* broker() const { return broker_.get(); }
MaglevCompilationUnit* toplevel_compilation_unit() const {
return toplevel_compilation_unit_;
}
Handle<JSFunction> function() const { return function_; }
bool has_graph_labeller() const { return !!graph_labeller_; }
void set_graph_labeller(MaglevGraphLabeller* graph_labeller);
MaglevGraphLabeller* graph_labeller() const {
DCHECK(has_graph_labeller());
return graph_labeller_.get();
}
void set_graph(Graph* graph) { graph_ = graph; }
Graph* graph() const { return graph_; }
void set_codet(MaybeHandle<CodeT> codet) { codet_ = codet; }
MaybeHandle<CodeT> codet() const { return codet_; }
// Flag accessors (for thread-safe access to global flags).
// TODO(v8:7700): Consider caching these.
#define V(Name) \
bool Name() const { return Name##_; }
MAGLEV_COMPILATION_FLAG_LIST(V)
#undef V
// Must be called from within a MaglevCompilationHandleScope. Transfers owned
// handles (e.g. shared_, function_) to the new scope.
void ReopenHandlesInNewHandleScope(Isolate* isolate);
// Persistent and canonical handles are passed back and forth between the
// Isolate, this info, and the LocalIsolate.
void set_persistent_handles(
std::unique_ptr<PersistentHandles>&& persistent_handles);
std::unique_ptr<PersistentHandles> DetachPersistentHandles();
void set_canonical_handles(
std::unique_ptr<CanonicalHandlesMap>&& canonical_handles);
std::unique_ptr<CanonicalHandlesMap> DetachCanonicalHandles();
private:
MaglevCompilationInfo(Isolate* isolate, Handle<JSFunction> function);
Zone zone_;
Isolate* const isolate_;
const std::unique_ptr<compiler::JSHeapBroker> broker_;
// Must be initialized late since it requires an initialized heap broker.
MaglevCompilationUnit* toplevel_compilation_unit_ = nullptr;
Handle<SharedFunctionInfo> shared_;
Handle<JSFunction> function_;
std::unique_ptr<MaglevGraphLabeller> graph_labeller_;
// Produced off-thread during ExecuteJobImpl.
Graph* graph_ = nullptr;
// Produced during FinalizeJobImpl.
MaybeHandle<CodeT> codet_;
#define V(Name) const bool Name##_;
MAGLEV_COMPILATION_FLAG_LIST(V)
#undef V
// 1) PersistentHandles created via PersistentHandlesScope inside of
// CompilationHandleScope.
// 2) Owned by MaglevCompilationInfo.
// 3) Owned by the broker's LocalHeap when entering the LocalHeapScope.
// 4) Back to MaglevCompilationInfo when exiting the LocalHeapScope.
//
// TODO(jgruber,v8:7700): Update this comment:
//
// In normal execution it gets destroyed when PipelineData gets destroyed.
// There is a special case in GenerateCodeForTesting where the JSHeapBroker
// will not be retired in that same method. In this case, we need to re-attach
// the PersistentHandles container to the JSHeapBroker.
std::unique_ptr<PersistentHandles> ph_;
// Canonical handles follow the same path as described by the persistent
// handles above. The only difference is that is created in the
// CanonicalHandleScope(i.e step 1) is different).
std::unique_ptr<CanonicalHandlesMap> canonical_handles_;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_COMPILATION_INFO_H_
......@@ -2,33 +2,43 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-compilation-unit.h"
#include "src/compiler/js-heap-broker.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-compilation-info.h"
#include "src/objects/js-function-inl.h"
namespace v8 {
namespace internal {
namespace maglev {
MaglevCompilationData::MaglevCompilationData(compiler::JSHeapBroker* broker)
: broker(broker),
isolate(broker->isolate()),
zone(broker->isolate()->allocator(), "maglev-zone") {}
MaglevCompilationData::~MaglevCompilationData() = default;
MaglevCompilationUnit::MaglevCompilationUnit(MaglevCompilationData* data,
MaglevCompilationUnit::MaglevCompilationUnit(MaglevCompilationInfo* info,
Handle<JSFunction> function)
: compilation_data(data),
bytecode(
: info_(info),
bytecode_(
MakeRef(broker(), function->shared().GetBytecodeArray(isolate()))),
feedback(MakeRef(broker(), function->feedback_vector())),
bytecode_analysis(bytecode.object(), zone(), BytecodeOffset::None(),
true),
register_count_(bytecode.register_count()),
parameter_count_(bytecode.parameter_count()) {}
feedback_(MakeRef(broker(), function->feedback_vector())),
bytecode_analysis_(bytecode_.object(), zone(), BytecodeOffset::None(),
true),
register_count_(bytecode_.register_count()),
parameter_count_(bytecode_.parameter_count()) {}
compiler::JSHeapBroker* MaglevCompilationUnit::broker() const {
return info_->broker();
}
Isolate* MaglevCompilationUnit::isolate() const { return info_->isolate(); }
Zone* MaglevCompilationUnit::zone() const { return info_->zone(); }
bool MaglevCompilationUnit::has_graph_labeller() const {
return info_->has_graph_labeller();
}
MaglevGraphLabeller* MaglevCompilationUnit::graph_labeller() const {
DCHECK(has_graph_labeller());
return info_->graph_labeller();
}
} // namespace maglev
} // namespace internal
......
......@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_COMPILATION_DATA_H_
#define V8_MAGLEV_MAGLEV_COMPILATION_DATA_H_
#ifndef V8_MAGLEV_MAGLEV_COMPILATION_UNIT_H_
#define V8_MAGLEV_MAGLEV_COMPILATION_UNIT_H_
#include "src/common/globals.h"
#include "src/compiler/bytecode-analysis.h"
......@@ -13,41 +13,45 @@ namespace v8 {
namespace internal {
namespace maglev {
class MaglevCompilationInfo;
class MaglevGraphLabeller;
struct MaglevCompilationData {
explicit MaglevCompilationData(compiler::JSHeapBroker* broker);
~MaglevCompilationData();
std::unique_ptr<MaglevGraphLabeller> graph_labeller;
compiler::JSHeapBroker* const broker;
Isolate* const isolate;
Zone zone;
};
struct MaglevCompilationUnit {
MaglevCompilationUnit(MaglevCompilationData* data,
// Per-unit data, i.e. once per top-level function and once per inlined
// function.
class MaglevCompilationUnit : public ZoneObject {
public:
static MaglevCompilationUnit* New(Zone* zone, MaglevCompilationInfo* data,
Handle<JSFunction> function) {
return zone->New<MaglevCompilationUnit>(data, function);
}
MaglevCompilationUnit(MaglevCompilationInfo* data,
Handle<JSFunction> function);
compiler::JSHeapBroker* broker() const { return compilation_data->broker; }
Isolate* isolate() const { return compilation_data->isolate; }
Zone* zone() const { return &compilation_data->zone; }
MaglevCompilationInfo* info() const { return info_; }
compiler::JSHeapBroker* broker() const;
Isolate* isolate() const;
Zone* zone() const;
int register_count() const { return register_count_; }
int parameter_count() const { return parameter_count_; }
bool has_graph_labeller() const { return !!compilation_data->graph_labeller; }
MaglevGraphLabeller* graph_labeller() const {
DCHECK(has_graph_labeller());
return compilation_data->graph_labeller.get();
bool has_graph_labeller() const;
MaglevGraphLabeller* graph_labeller() const;
const compiler::BytecodeArrayRef& bytecode() const { return bytecode_; }
const compiler::FeedbackVectorRef& feedback() const { return feedback_; }
const compiler::BytecodeAnalysis& bytecode_analysis() const {
return bytecode_analysis_;
}
MaglevCompilationData* const compilation_data;
const compiler::BytecodeArrayRef bytecode;
const compiler::FeedbackVectorRef feedback;
compiler::BytecodeAnalysis const bytecode_analysis;
int register_count_;
int parameter_count_;
private:
MaglevCompilationInfo* const info_;
const compiler::BytecodeArrayRef bytecode_;
const compiler::FeedbackVectorRef feedback_;
const compiler::BytecodeAnalysis bytecode_analysis_;
const int register_count_;
const int parameter_count_;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_COMPILATION_DATA_H_
#endif // V8_MAGLEV_MAGLEV_COMPILATION_UNIT_H_
......@@ -26,7 +26,7 @@
#include "src/ic/handler-configuration.h"
#include "src/maglev/maglev-basic-block.h"
#include "src/maglev/maglev-code-generator.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-graph-builder.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-printer.h"
......@@ -126,69 +126,82 @@ class UseMarkingProcessor {
}
};
MaglevCompiler::MaglevCompiler(compiler::JSHeapBroker* broker,
Handle<JSFunction> function)
: compilation_data_(broker),
toplevel_compilation_unit_(&compilation_data_, function) {}
// static
void MaglevCompiler::Compile(MaglevCompilationUnit* toplevel_compilation_unit) {
MaglevCompiler compiler(toplevel_compilation_unit);
compiler.Compile();
}
void MaglevCompiler::Compile() {
compiler::UnparkedScopeIfNeeded unparked_scope(broker());
MaybeHandle<Code> MaglevCompiler::Compile() {
// Build graph.
if (FLAG_print_maglev_code || FLAG_code_comments || FLAG_print_maglev_graph ||
FLAG_trace_maglev_regalloc) {
compilation_data_.graph_labeller.reset(new MaglevGraphLabeller());
toplevel_compilation_unit_->info()->set_graph_labeller(
new MaglevGraphLabeller());
}
MaglevGraphBuilder graph_builder(&toplevel_compilation_unit_);
MaglevGraphBuilder graph_builder(toplevel_compilation_unit_);
graph_builder.Build();
// TODO(v8:7700): Clean up after all bytecodes are supported.
if (graph_builder.found_unsupported_bytecode()) {
return {};
return;
}
if (FLAG_print_maglev_graph) {
std::cout << "After graph buiding" << std::endl;
PrintGraph(std::cout, &toplevel_compilation_unit_, graph_builder.graph());
PrintGraph(std::cout, toplevel_compilation_unit_, graph_builder.graph());
}
{
GraphMultiProcessor<NumberingProcessor, UseMarkingProcessor,
MaglevVregAllocator>
processor(&toplevel_compilation_unit_);
processor(toplevel_compilation_unit_);
processor.ProcessGraph(graph_builder.graph());
}
if (FLAG_print_maglev_graph) {
std::cout << "After node processor" << std::endl;
PrintGraph(std::cout, &toplevel_compilation_unit_, graph_builder.graph());
PrintGraph(std::cout, toplevel_compilation_unit_, graph_builder.graph());
}
StraightForwardRegisterAllocator allocator(&toplevel_compilation_unit_,
StraightForwardRegisterAllocator allocator(toplevel_compilation_unit_,
graph_builder.graph());
if (FLAG_print_maglev_graph) {
std::cout << "After register allocation" << std::endl;
PrintGraph(std::cout, &toplevel_compilation_unit_, graph_builder.graph());
PrintGraph(std::cout, toplevel_compilation_unit_, graph_builder.graph());
}
Handle<Code> code;
// Stash the compiled graph on the compilation info.
toplevel_compilation_unit_->info()->set_graph(graph_builder.graph());
}
// static
MaybeHandle<CodeT> MaglevCompiler::GenerateCode(
MaglevCompilationUnit* toplevel_compilation_unit) {
Graph* const graph = toplevel_compilation_unit->info()->graph();
if (graph == nullptr) return {}; // Compilation failed.
if (!MaglevCodeGenerator::Generate(&toplevel_compilation_unit_,
graph_builder.graph())
Handle<Code> code;
if (!MaglevCodeGenerator::Generate(toplevel_compilation_unit, graph)
.ToHandle(&code)) {
return {};
}
const bool deps_committed_successfully =
broker()->dependencies()->Commit(code);
compiler::JSHeapBroker* const broker = toplevel_compilation_unit->broker();
const bool deps_committed_successfully = broker->dependencies()->Commit(code);
CHECK(deps_committed_successfully);
if (FLAG_print_maglev_code) {
code->Print();
}
return code;
Isolate* const isolate = toplevel_compilation_unit->isolate();
return ToCodeT(code, isolate);
}
} // namespace maglev
......
......@@ -8,7 +8,7 @@
#include "src/common/globals.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/heap-refs.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-compilation-unit.h"
namespace v8 {
namespace internal {
......@@ -19,20 +19,31 @@ class JSHeapBroker;
namespace maglev {
class Graph;
class MaglevCompiler {
public:
explicit MaglevCompiler(compiler::JSHeapBroker* broker,
Handle<JSFunction> function);
MaybeHandle<Code> Compile();
// May be called from any thread.
static void Compile(MaglevCompilationUnit* toplevel_compilation_unit);
compiler::JSHeapBroker* broker() const { return compilation_data_.broker; }
Zone* zone() { return &compilation_data_.zone; }
Isolate* isolate() { return compilation_data_.isolate; }
// Called on the main thread after Compile has completed.
// TODO(v8:7700): Move this to a different class?
static MaybeHandle<CodeT> GenerateCode(
MaglevCompilationUnit* toplevel_compilation_unit);
private:
MaglevCompilationData compilation_data_;
MaglevCompilationUnit toplevel_compilation_unit_;
explicit MaglevCompiler(MaglevCompilationUnit* toplevel_compilation_unit)
: toplevel_compilation_unit_(toplevel_compilation_unit) {}
void Compile();
compiler::JSHeapBroker* broker() const {
return toplevel_compilation_unit_->broker();
}
Zone* zone() { return toplevel_compilation_unit_->zone(); }
Isolate* isolate() { return toplevel_compilation_unit_->isolate(); }
MaglevCompilationUnit* const toplevel_compilation_unit_;
};
} // namespace maglev
......
......@@ -4,36 +4,146 @@
#include "src/maglev/maglev-concurrent-dispatcher.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-heap-broker.h"
#include "src/execution/isolate.h"
#include "src/flags/flags.h"
#include "src/objects/js-function.h"
#include "src/handles/persistent-handles.h"
#include "src/maglev/maglev-compilation-info.h"
#include "src/maglev/maglev-compiler.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/objects/js-function-inl.h"
#include "src/utils/identity-map.h"
#include "src/utils/locked-queue-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
void JSHeapBroker::AttachLocalIsolateForMaglev(
maglev::MaglevCompilationInfo* info, LocalIsolate* local_isolate) {
set_canonical_handles(info->DetachCanonicalHandles());
DCHECK_NULL(local_isolate_);
local_isolate_ = local_isolate;
DCHECK_NOT_NULL(local_isolate_);
local_isolate_->heap()->AttachPersistentHandles(
info->DetachPersistentHandles());
}
void JSHeapBroker::DetachLocalIsolateForMaglev(
maglev::MaglevCompilationInfo* info) {
DCHECK_NULL(ph_);
DCHECK_NOT_NULL(local_isolate_);
std::unique_ptr<PersistentHandles> ph =
local_isolate_->heap()->DetachPersistentHandles();
local_isolate_ = nullptr;
info->set_canonical_handles(DetachCanonicalHandles());
info->set_persistent_handles(std::move(ph));
}
} // namespace compiler
namespace maglev {
class MaglevConcurrentDispatcher::Job final {
namespace {
constexpr char kMaglevCompilerName[] = "Maglev";
// LocalIsolateScope encapsulates the phase where persistent handles are
// attached to the LocalHeap inside {local_isolate}.
class V8_NODISCARD LocalIsolateScope final {
public:
explicit Job(Handle<JSFunction> function) : function_(function) {}
explicit LocalIsolateScope(MaglevCompilationInfo* info,
LocalIsolate* local_isolate)
: info_(info) {
info_->broker()->AttachLocalIsolateForMaglev(info_, local_isolate);
}
void Install(Isolate* isolate) { UNIMPLEMENTED(); }
~LocalIsolateScope() { info_->broker()->DetachLocalIsolateForMaglev(info_); }
private:
const Handle<JSFunction> function_;
MaglevCompilationInfo* const info_;
};
} // namespace
Zone* ExportedMaglevCompilationInfo::zone() const { return info_->zone(); }
void ExportedMaglevCompilationInfo::set_canonical_handles(
std::unique_ptr<CanonicalHandlesMap>&& canonical_handles) {
info_->set_canonical_handles(std::move(canonical_handles));
}
// static
std::unique_ptr<MaglevCompilationJob> MaglevCompilationJob::New(
Isolate* isolate, Handle<JSFunction> function) {
auto info = maglev::MaglevCompilationInfo::New(isolate, function);
return std::unique_ptr<MaglevCompilationJob>(
new MaglevCompilationJob(std::move(info)));
}
MaglevCompilationJob::MaglevCompilationJob(
std::unique_ptr<MaglevCompilationInfo>&& info)
: OptimizedCompilationJob(nullptr, kMaglevCompilerName),
info_(std::move(info)) {
// TODO(jgruber, v8:7700): Remove the OptimizedCompilationInfo (which should
// be renamed to TurbofanCompilationInfo) from OptimizedCompilationJob.
DCHECK(FLAG_maglev);
}
MaglevCompilationJob::~MaglevCompilationJob() = default;
CompilationJob::Status MaglevCompilationJob::PrepareJobImpl(Isolate* isolate) {
// TODO(v8:7700): Actual return codes.
return CompilationJob::SUCCEEDED;
}
CompilationJob::Status MaglevCompilationJob::ExecuteJobImpl(
RuntimeCallStats* stats, LocalIsolate* local_isolate) {
LocalIsolateScope scope{info(), local_isolate};
maglev::MaglevCompiler::Compile(info()->toplevel_compilation_unit());
// TODO(v8:7700): Actual return codes.
return CompilationJob::SUCCEEDED;
}
CompilationJob::Status MaglevCompilationJob::FinalizeJobImpl(Isolate* isolate) {
info()->set_codet(maglev::MaglevCompiler::GenerateCode(
info()->toplevel_compilation_unit()));
// TODO(v8:7700): Actual return codes.
return CompilationJob::SUCCEEDED;
}
// The JobTask is posted to V8::GetCurrentPlatform(). It's responsible for
// processing the incoming queue on a worker thread.
class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask {
public:
explicit JobTask(MaglevConcurrentDispatcher* dispatcher)
: dispatcher_(dispatcher) {}
void Run(JobDelegate* delegate) override {}
void Run(JobDelegate* delegate) override {
LocalIsolate local_isolate(isolate(), ThreadKind::kBackground);
DCHECK(local_isolate.heap()->IsParked());
while (!incoming_queue()->IsEmpty() && !delegate->ShouldYield()) {
std::unique_ptr<MaglevCompilationJob> job;
if (!incoming_queue()->Dequeue(&job)) break;
DCHECK_NOT_NULL(job);
RuntimeCallStats* rcs = nullptr; // TODO(v8:7700): Implement.
CompilationJob::Status status = job->ExecuteJob(rcs, &local_isolate);
CHECK_EQ(status, CompilationJob::SUCCEEDED);
outgoing_queue()->Enqueue(std::move(job));
}
// TODO(v8:7700):
// isolate_->stack_guard()->RequestInstallMaglevCode();
}
size_t GetMaxConcurrency(size_t) const override {
return incoming_queue()->size();
}
private:
Isolate* isolate() const { return dispatcher_->isolate_; }
QueueT* incoming_queue() const { return &dispatcher_->incoming_queue_; }
QueueT* outgoing_queue() const { return &dispatcher_->outgoing_queue_; }
......@@ -60,19 +170,22 @@ MaglevConcurrentDispatcher::~MaglevConcurrentDispatcher() {
}
}
void MaglevConcurrentDispatcher::EnqueueJob(Handle<JSFunction> function) {
void MaglevConcurrentDispatcher::EnqueueJob(
std::unique_ptr<MaglevCompilationJob>&& job) {
DCHECK(is_enabled());
// TODO(v8:7700): RCS.
// RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileMaglev);
incoming_queue_.Enqueue(std::make_unique<Job>(function));
incoming_queue_.Enqueue(std::move(job));
job_handle_->NotifyConcurrencyIncrease();
}
void MaglevConcurrentDispatcher::ProcessFinishedJobs() {
void MaglevConcurrentDispatcher::FinalizeFinishedJobs() {
while (!outgoing_queue_.IsEmpty()) {
std::unique_ptr<Job> job;
std::unique_ptr<MaglevCompilationJob> job;
outgoing_queue_.Dequeue(&job);
job->Install(isolate_);
CompilationJob::Status status = job->FinalizeJob(isolate_);
// TODO(v8:7700): Use the result.
CHECK_EQ(status, CompilationJob::SUCCEEDED);
}
}
......
......@@ -9,6 +9,7 @@
#include <memory>
#include "src/codegen/compiler.h" // For OptimizedCompilationJob.
#include "src/utils/locked-queue.h"
namespace v8 {
......@@ -18,23 +19,60 @@ class Isolate;
namespace maglev {
class MaglevCompilationInfo;
// Exports needed functionality without exposing implementation details.
class ExportedMaglevCompilationInfo final {
public:
explicit ExportedMaglevCompilationInfo(MaglevCompilationInfo* info)
: info_(info) {}
Zone* zone() const;
void set_canonical_handles(
std::unique_ptr<CanonicalHandlesMap>&& canonical_handles);
private:
MaglevCompilationInfo* const info_;
};
// The job is a single actual compilation task.
class MaglevCompilationJob final : public OptimizedCompilationJob {
public:
static std::unique_ptr<MaglevCompilationJob> New(Isolate* isolate,
Handle<JSFunction> function);
virtual ~MaglevCompilationJob();
Status PrepareJobImpl(Isolate* isolate) override;
Status ExecuteJobImpl(RuntimeCallStats* stats,
LocalIsolate* local_isolate) override;
Status FinalizeJobImpl(Isolate* isolate) override;
private:
explicit MaglevCompilationJob(std::unique_ptr<MaglevCompilationInfo>&& info);
MaglevCompilationInfo* info() const { return info_.get(); }
const std::unique_ptr<MaglevCompilationInfo> info_;
};
// The public API for Maglev concurrent compilation.
// Keep this as minimal as possible.
class MaglevConcurrentDispatcher final {
class Job;
class JobTask;
// TODO(jgruber): There's no reason to use locking queues here, we only use
// them for simplicity - consider replacing with lock-free data structures.
using QueueT = LockedQueue<std::unique_ptr<Job>>;
using QueueT = LockedQueue<std::unique_ptr<MaglevCompilationJob>>;
public:
explicit MaglevConcurrentDispatcher(Isolate* isolate);
~MaglevConcurrentDispatcher();
// Called from the main thread.
void EnqueueJob(Handle<JSFunction> function);
void EnqueueJob(std::unique_ptr<MaglevCompilationJob>&& job);
// Called from the main thread.
void ProcessFinishedJobs();
void FinalizeFinishedJobs();
bool is_enabled() const { return static_cast<bool>(job_handle_); }
......
......@@ -17,6 +17,86 @@ namespace internal {
namespace maglev {
MaglevGraphBuilder::MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit)
: compilation_unit_(compilation_unit),
iterator_(bytecode().object()),
jump_targets_(zone()->NewArray<BasicBlockRef>(bytecode().length())),
// Overallocate merge_states_ by one to allow always looking up the
// next offset.
merge_states_(zone()->NewArray<MergePointInterpreterFrameState*>(
bytecode().length() + 1)),
graph_(Graph::New(zone())),
current_interpreter_frame_(*compilation_unit_) {
memset(merge_states_, 0,
bytecode().length() * sizeof(InterpreterFrameState*));
// Default construct basic block refs.
// TODO(leszeks): This could be a memset of nullptr to ..._jump_targets_.
for (int i = 0; i < bytecode().length(); ++i) {
new (&jump_targets_[i]) BasicBlockRef();
}
CalculatePredecessorCounts();
for (auto& offset_and_info : bytecode_analysis().GetLoopInfos()) {
int offset = offset_and_info.first;
const compiler::LoopInfo& loop_info = offset_and_info.second;
const compiler::BytecodeLivenessState* liveness =
bytecode_analysis().GetInLivenessFor(offset);
merge_states_[offset] = zone()->New<MergePointInterpreterFrameState>(
*compilation_unit_, offset, NumPredecessors(offset), liveness,
&loop_info);
}
current_block_ = zone()->New<BasicBlock>(nullptr);
block_offset_ = -1;
for (int i = 0; i < parameter_count(); i++) {
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
current_interpreter_frame_.set(reg, AddNewNode<InitialValue>({}, reg));
}
// TODO(leszeks): Extract out a separate "incoming context/closure" nodes,
// to be able to read in the machine register but also use the frame-spilled
// slot.
interpreter::Register regs[] = {interpreter::Register::current_context(),
interpreter::Register::function_closure()};
for (interpreter::Register& reg : regs) {
current_interpreter_frame_.set(reg, AddNewNode<InitialValue>({}, reg));
}
interpreter::Register new_target_or_generator_register =
bytecode().incoming_new_target_or_generator_register();
const compiler::BytecodeLivenessState* liveness =
bytecode_analysis().GetInLivenessFor(0);
int register_index = 0;
// TODO(leszeks): Don't emit if not needed.
ValueNode* undefined_value =
AddNewNode<RootConstant>({}, RootIndex::kUndefinedValue);
if (new_target_or_generator_register.is_valid()) {
int new_target_index = new_target_or_generator_register.index();
for (; register_index < new_target_index; register_index++) {
StoreRegister(interpreter::Register(register_index), undefined_value,
liveness);
}
StoreRegister(
new_target_or_generator_register,
// TODO(leszeks): Expose in Graph.
AddNewNode<RegisterInput>({}, kJavaScriptCallNewTargetRegister),
liveness);
register_index++;
}
for (; register_index < register_count(); register_index++) {
StoreRegister(interpreter::Register(register_index), undefined_value,
liveness);
}
BasicBlock* first_block = CreateBlock<Jump>({}, &jump_targets_[0]);
MergeIntoFrameState(first_block, 0);
}
// TODO(v8:7700): Clean up after all bytecodes are supported.
#define MAGLEV_UNIMPLEMENTED(BytecodeName) \
do { \
......
......@@ -11,7 +11,7 @@
#include "src/compiler/bytecode-liveness-map.h"
#include "src/compiler/heap-refs.h"
#include "src/compiler/js-heap-broker.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-compilation-info.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir.h"
......@@ -23,85 +23,7 @@ namespace maglev {
class MaglevGraphBuilder {
public:
explicit MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit)
: compilation_unit_(compilation_unit),
iterator_(bytecode().object()),
jump_targets_(zone()->NewArray<BasicBlockRef>(bytecode().length())),
// Overallocate merge_states_ by one to allow always looking up the
// next offset.
merge_states_(zone()->NewArray<MergePointInterpreterFrameState*>(
bytecode().length() + 1)),
graph_(zone()),
current_interpreter_frame_(*compilation_unit_) {
memset(merge_states_, 0,
bytecode().length() * sizeof(InterpreterFrameState*));
// Default construct basic block refs.
// TODO(leszeks): This could be a memset of nullptr to ..._jump_targets_.
for (int i = 0; i < bytecode().length(); ++i) {
new (&jump_targets_[i]) BasicBlockRef();
}
CalculatePredecessorCounts();
for (auto& offset_and_info : bytecode_analysis().GetLoopInfos()) {
int offset = offset_and_info.first;
const compiler::LoopInfo& loop_info = offset_and_info.second;
const compiler::BytecodeLivenessState* liveness =
bytecode_analysis().GetInLivenessFor(offset);
merge_states_[offset] = zone()->New<MergePointInterpreterFrameState>(
*compilation_unit_, offset, NumPredecessors(offset), liveness,
&loop_info);
}
current_block_ = zone()->New<BasicBlock>(nullptr);
block_offset_ = -1;
for (int i = 0; i < parameter_count(); i++) {
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
current_interpreter_frame_.set(reg, AddNewNode<InitialValue>({}, reg));
}
// TODO(leszeks): Extract out a separate "incoming context/closure" nodes,
// to be able to read in the machine register but also use the frame-spilled
// slot.
interpreter::Register regs[] = {interpreter::Register::current_context(),
interpreter::Register::function_closure()};
for (interpreter::Register& reg : regs) {
current_interpreter_frame_.set(reg, AddNewNode<InitialValue>({}, reg));
}
interpreter::Register new_target_or_generator_register =
bytecode().incoming_new_target_or_generator_register();
const compiler::BytecodeLivenessState* liveness =
bytecode_analysis().GetInLivenessFor(0);
int register_index = 0;
// TODO(leszeks): Don't emit if not needed.
ValueNode* undefined_value =
AddNewNode<RootConstant>({}, RootIndex::kUndefinedValue);
if (new_target_or_generator_register.is_valid()) {
int new_target_index = new_target_or_generator_register.index();
for (; register_index < new_target_index; register_index++) {
StoreRegister(interpreter::Register(register_index), undefined_value,
liveness);
}
StoreRegister(
new_target_or_generator_register,
// TODO(leszeks): Expose in Graph.
AddNewNode<RegisterInput>({}, kJavaScriptCallNewTargetRegister),
liveness);
register_index++;
}
for (; register_index < register_count(); register_index++) {
StoreRegister(interpreter::Register(register_index), undefined_value,
liveness);
}
BasicBlock* first_block = CreateBlock<Jump>({}, &jump_targets_[0]);
MergeIntoFrameState(first_block, 0);
}
explicit MaglevGraphBuilder(MaglevCompilationUnit* compilation_unit);
void Build() {
for (iterator_.Reset(); !iterator_.done(); iterator_.Advance()) {
......@@ -111,7 +33,7 @@ class MaglevGraphBuilder {
}
}
Graph* graph() { return &graph_; }
Graph* graph() const { return graph_; }
// TODO(v8:7700): Clean up after all bytecodes are supported.
bool found_unsupported_bytecode() const {
......@@ -181,7 +103,7 @@ class MaglevGraphBuilder {
merge_states_[offset]->Merge(*compilation_unit_,
current_interpreter_frame_,
graph_.last_block(), offset);
graph()->last_block(), offset);
}
ProcessMergePoint(offset);
StartNewBlock(offset);
......@@ -317,7 +239,7 @@ class MaglevGraphBuilder {
BasicBlock* block = current_block_;
current_block_ = nullptr;
graph_.Add(block);
graph()->Add(block);
if (has_graph_labeller()) {
graph_labeller()->RegisterBasicBlock(block);
}
......@@ -408,7 +330,7 @@ class MaglevGraphBuilder {
compiler::JSHeapBroker* broker() const { return compilation_unit_->broker(); }
const compiler::FeedbackVectorRef& feedback() const {
return compilation_unit_->feedback;
return compilation_unit_->feedback();
}
const FeedbackNexus feedback_nexus(int slot_operand_index) const {
// TODO(leszeks): Use JSHeapBroker here.
......@@ -416,10 +338,10 @@ class MaglevGraphBuilder {
GetSlotOperand(slot_operand_index));
}
const compiler::BytecodeArrayRef& bytecode() const {
return compilation_unit_->bytecode;
return compilation_unit_->bytecode();
}
const compiler::BytecodeAnalysis& bytecode_analysis() const {
return compilation_unit_->bytecode_analysis;
return compilation_unit_->bytecode_analysis();
}
Isolate* isolate() const { return compilation_unit_->isolate(); }
Zone* zone() const { return compilation_unit_->zone(); }
......@@ -444,7 +366,7 @@ class MaglevGraphBuilder {
BasicBlockRef* jump_targets_;
MergePointInterpreterFrameState** merge_states_;
Graph graph_;
Graph* const graph_;
InterpreterFrameState current_interpreter_frame_;
// Allow marking some bytecodes as unsupported during graph building, so that
......
......@@ -17,10 +17,10 @@ namespace maglev {
class BasicBlock;
class ControlNode;
class Graph;
struct MaglevCompilationUnit;
class MaglevCompilationUnit;
class MaglevGraphLabeller;
class NodeBase;
class Node;
class NodeBase;
class Phi;
class ProcessingState;
......
......@@ -342,7 +342,7 @@ class GraphProcessor {
int register_count() const { return compilation_unit_->register_count(); }
const compiler::BytecodeAnalysis& bytecode_analysis() const {
return compilation_unit_->bytecode_analysis;
return compilation_unit_->bytecode_analysis();
}
MaglevCompilationUnit* const compilation_unit_;
......
......@@ -20,8 +20,11 @@ using BlockConstReverseIterator =
std::vector<BasicBlock*,
ZoneAllocator<BasicBlock*>>::const_reverse_iterator;
class Graph {
class Graph final : public ZoneObject {
public:
static Graph* New(Zone* zone) { return zone->New<Graph>(zone); }
// Shouldn't be used directly; public so that Zone::New can access it.
explicit Graph(Zone* zone) : blocks_(zone) {}
BasicBlock* operator[](int i) { return blocks_[i]; }
......
......@@ -112,15 +112,14 @@ class MergePointInterpreterFrameState {
int merge_offset, interpreter::Register reg,
ValueNode* value) {
#ifdef DEBUG
if (!compilation_unit.bytecode_analysis.IsLoopHeader(merge_offset)) return;
auto& assignments =
compilation_unit.bytecode_analysis.GetLoopInfoFor(merge_offset)
.assignments();
const auto& analysis = compilation_unit.bytecode_analysis();
if (!analysis.IsLoopHeader(merge_offset)) return;
auto& assignments = analysis.GetLoopInfoFor(merge_offset).assignments();
if (reg.is_parameter()) {
if (!assignments.ContainsParameter(reg.ToParameterIndex())) return;
} else {
DCHECK(compilation_unit.bytecode_analysis.GetInLivenessFor(merge_offset)
->RegisterIsLive(reg.index()));
DCHECK(
analysis.GetInLivenessFor(merge_offset)->RegisterIsLive(reg.index()));
if (!assignments.ContainsLocal(reg.index())) return;
}
DCHECK(value->Is<Phi>());
......
......@@ -294,7 +294,7 @@ void EmitDeopt(MaglevCodeGenState* code_gen_state, Node* node,
__ RecordComment("Materialize bytecode array and offset");
__ Move(MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp),
compilation_unit->bytecode.object());
compilation_unit->bytecode().object());
__ Move(MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
Smi::FromInt(deopt_bytecode_position +
(BytecodeArray::kHeaderSize - kHeapObjectTag)));
......
......@@ -9,7 +9,7 @@
#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
#include "src/compiler/backend/instruction.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-printer.h"
#include "src/maglev/maglev-graph-processor.h"
......
......@@ -7,7 +7,6 @@
#include "src/codegen/reglist.h"
#include "src/compiler/backend/instruction.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir.h"
#include "src/maglev/maglev-regalloc-data.h"
......@@ -16,6 +15,7 @@ namespace v8 {
namespace internal {
namespace maglev {
class MaglevCompilationUnit;
class MaglevPrintingVisitor;
class MergePointRegisterState;
......
......@@ -6,7 +6,7 @@
#define V8_MAGLEV_MAGLEV_REGISTER_FRAME_ARRAY_H_
#include "src/interpreter/bytecode-register.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-compilation-unit.h"
#include "src/zone/zone.h"
namespace v8 {
......
......@@ -5,11 +5,8 @@
#include "src/maglev/maglev.h"
#include "src/common/globals.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-heap-broker.h"
#include "src/maglev/maglev-compilation-info.h"
#include "src/maglev/maglev-compiler.h"
#include "src/objects/js-function-inl.h"
#include "src/objects/shared-function-info-inl.h"
namespace v8 {
namespace internal {
......@@ -17,22 +14,10 @@ namespace internal {
MaybeHandle<CodeT> Maglev::Compile(Isolate* isolate,
Handle<JSFunction> function) {
DCHECK(FLAG_maglev);
CanonicalHandleScope canonical_handle_scope(isolate);
Zone broker_zone(isolate->allocator(), "maglev-broker-zone");
compiler::JSHeapBroker broker(isolate, &broker_zone, FLAG_trace_heap_broker,
CodeKind::MAGLEV);
compiler::CompilationDependencies* deps =
broker_zone.New<compiler::CompilationDependencies>(&broker, &broker_zone);
USE(deps); // The deps register themselves in the heap broker.
broker.SetTargetNativeContextRef(handle(function->native_context(), isolate));
broker.InitializeAndStartSerializing();
broker.StopSerializing();
maglev::MaglevCompiler compiler(&broker, function);
return ToCodeT(compiler.Compile(), isolate);
auto info = maglev::MaglevCompilationInfo::New(isolate, function);
maglev::MaglevCompilationUnit* const unit = info->toplevel_compilation_unit();
maglev::MaglevCompiler::Compile(unit);
return maglev::MaglevCompiler::GenerateCode(unit);
}
} // namespace internal
......
......@@ -145,7 +145,7 @@ TEST(TestConcurrentSharedFunctionInfo) {
// Prepare job.
{
CompilationHandleScope compilation(isolate, job->compilation_info());
CanonicalHandleScope canonical(isolate, job->compilation_info());
CanonicalHandleScopeForTurbofan canonical(isolate, job->compilation_info());
job->compilation_info()->ReopenHandlesInNewHandleScope(isolate);
const CompilationJob::Status status = job->PrepareJob(isolate);
CHECK_EQ(status, CompilationJob::SUCCEEDED);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment