Commit 406bcd69 authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

[maglev] Finish & enable basic Maglev concurrent tierups

This implements the last bits of basic concurrent Maglev compilation.
When jobs have been processed, schedule an interrupt to trigger codegen
and building the Code object on the main thread.

Bug: v8:7700
Change-Id: I348ade4777ddddf7c3a6b0575d9f51e5fa00c9fb
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3528494Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79516}
parent 27708001
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include "src/execution/isolate.h" #include "src/execution/isolate.h"
#include "src/execution/simulator.h" #include "src/execution/simulator.h"
#include "src/logging/counters.h" #include "src/logging/counters.h"
#include "src/maglev/maglev-concurrent-dispatcher.h"
#include "src/objects/backing-store.h" #include "src/objects/backing-store.h"
#include "src/roots/roots-inl.h" #include "src/roots/roots-inl.h"
#include "src/tracing/trace-event.h" #include "src/tracing/trace-event.h"
...@@ -325,6 +326,14 @@ Object StackGuard::HandleInterrupts() { ...@@ -325,6 +326,14 @@ Object StackGuard::HandleInterrupts() {
isolate_->baseline_batch_compiler()->InstallBatch(); isolate_->baseline_batch_compiler()->InstallBatch();
} }
#ifdef V8_ENABLE_MAGLEV
if (TestAndClear(&interrupt_flags, INSTALL_MAGLEV_CODE)) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.FinalizeMaglevConcurrentCompilation");
isolate_->maglev_concurrent_dispatcher()->FinalizeFinishedJobs();
}
#endif // V8_ENABLE_MAGLEV
if (TestAndClear(&interrupt_flags, API_INTERRUPT)) { if (TestAndClear(&interrupt_flags, API_INTERRUPT)) {
TRACE_EVENT0("v8.execute", "V8.InvokeApiInterruptCallbacks"); TRACE_EVENT0("v8.execute", "V8.InvokeApiInterruptCallbacks");
// Callbacks must be invoked outside of ExecutionAccess lock. // Callbacks must be invoked outside of ExecutionAccess lock.
......
...@@ -54,7 +54,8 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final { ...@@ -54,7 +54,8 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final {
V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 5) \ V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 5) \
V(GROW_SHARED_MEMORY, GrowSharedMemory, 6) \ V(GROW_SHARED_MEMORY, GrowSharedMemory, 6) \
V(LOG_WASM_CODE, LogWasmCode, 7) \ V(LOG_WASM_CODE, LogWasmCode, 7) \
V(WASM_CODE_GC, WasmCodeGC, 8) V(WASM_CODE_GC, WasmCodeGC, 8) \
V(INSTALL_MAGLEV_CODE, InstallMaglevCode, 9)
#define V(NAME, Name, id) \ #define V(NAME, Name, id) \
inline bool Check##Name() { return CheckInterrupt(NAME); } \ inline bool Check##Name() { return CheckInterrupt(NAME); } \
......
...@@ -59,9 +59,8 @@ class OptimizationDecision { ...@@ -59,9 +59,8 @@ class OptimizationDecision {
public: public:
static constexpr OptimizationDecision Maglev() { static constexpr OptimizationDecision Maglev() {
// TODO(v8:7700): Consider using another reason here. // TODO(v8:7700): Consider using another reason here.
// TODO(v8:7700): Support concurrency.
return {OptimizationReason::kHotAndStable, CodeKind::MAGLEV, return {OptimizationReason::kHotAndStable, CodeKind::MAGLEV,
ConcurrencyMode::kNotConcurrent}; ConcurrencyMode::kConcurrent};
} }
static constexpr OptimizationDecision TurbofanHotAndStable() { static constexpr OptimizationDecision TurbofanHotAndStable() {
return {OptimizationReason::kHotAndStable, CodeKind::TURBOFAN, return {OptimizationReason::kHotAndStable, CodeKind::TURBOFAN,
......
...@@ -63,9 +63,6 @@ class MaglevCompilationInfo final { ...@@ -63,9 +63,6 @@ class MaglevCompilationInfo final {
void set_graph(Graph* graph) { graph_ = graph; } void set_graph(Graph* graph) { graph_ = graph; }
Graph* graph() const { return graph_; } Graph* graph() const { return graph_; }
void set_codet(MaybeHandle<CodeT> codet) { codet_ = codet; }
MaybeHandle<CodeT> codet() const { return codet_; }
// Flag accessors (for thread-safe access to global flags). // Flag accessors (for thread-safe access to global flags).
// TODO(v8:7700): Consider caching these. // TODO(v8:7700): Consider caching these.
#define V(Name) \ #define V(Name) \
...@@ -103,9 +100,6 @@ class MaglevCompilationInfo final { ...@@ -103,9 +100,6 @@ class MaglevCompilationInfo final {
// Produced off-thread during ExecuteJobImpl. // Produced off-thread during ExecuteJobImpl.
Graph* graph_ = nullptr; Graph* graph_ = nullptr;
// Produced during FinalizeJobImpl.
MaybeHandle<CodeT> codet_;
#define V(Name) const bool Name##_; #define V(Name) const bool Name##_;
MAGLEV_COMPILATION_FLAG_LIST(V) MAGLEV_COMPILATION_FLAG_LIST(V)
#undef V #undef V
......
...@@ -106,9 +106,12 @@ CompilationJob::Status MaglevCompilationJob::ExecuteJobImpl( ...@@ -106,9 +106,12 @@ CompilationJob::Status MaglevCompilationJob::ExecuteJobImpl(
} }
CompilationJob::Status MaglevCompilationJob::FinalizeJobImpl(Isolate* isolate) { CompilationJob::Status MaglevCompilationJob::FinalizeJobImpl(Isolate* isolate) {
info()->set_codet(maglev::MaglevCompiler::GenerateCode( Handle<CodeT> codet;
info()->toplevel_compilation_unit())); if (!maglev::MaglevCompiler::GenerateCode(info()->toplevel_compilation_unit())
// TODO(v8:7700): Actual return codes. .ToHandle(&codet)) {
return CompilationJob::FAILED;
}
info()->function()->set_code(*codet);
return CompilationJob::SUCCEEDED; return CompilationJob::SUCCEEDED;
} }
...@@ -132,8 +135,7 @@ class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask { ...@@ -132,8 +135,7 @@ class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask {
CHECK_EQ(status, CompilationJob::SUCCEEDED); CHECK_EQ(status, CompilationJob::SUCCEEDED);
outgoing_queue()->Enqueue(std::move(job)); outgoing_queue()->Enqueue(std::move(job));
} }
// TODO(v8:7700): isolate()->stack_guard()->RequestInstallMaglevCode();
// isolate_->stack_guard()->RequestInstallMaglevCode();
} }
size_t GetMaxConcurrency(size_t) const override { size_t GetMaxConcurrency(size_t) const override {
...@@ -178,6 +180,7 @@ void MaglevConcurrentDispatcher::EnqueueJob( ...@@ -178,6 +180,7 @@ void MaglevConcurrentDispatcher::EnqueueJob(
} }
void MaglevConcurrentDispatcher::FinalizeFinishedJobs() { void MaglevConcurrentDispatcher::FinalizeFinishedJobs() {
HandleScope handle_scope(isolate_);
while (!outgoing_queue_.IsEmpty()) { while (!outgoing_queue_.IsEmpty()) {
std::unique_ptr<MaglevCompilationJob> job; std::unique_ptr<MaglevCompilationJob> job;
outgoing_queue_.Dequeue(&job); outgoing_queue_.Dequeue(&job);
......
...@@ -21,6 +21,13 @@ namespace maglev { ...@@ -21,6 +21,13 @@ namespace maglev {
class MaglevCompilationInfo; class MaglevCompilationInfo;
// TODO(v8:7700): While basic infrastructure now exists, there are many TODOs
// that should still be addressed soon:
// - Full tracing support through --trace-opt.
// - Concurrent codegen.
// - Concurrent Code object creation (optional?).
// - Test support for concurrency (see %FinalizeOptimization).
// Exports needed functionality without exposing implementation details. // Exports needed functionality without exposing implementation details.
class ExportedMaglevCompilationInfo final { class ExportedMaglevCompilationInfo final {
public: public:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment