Commit 0b28b6e6 authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

Reland `[maglev] Finish & enable basic Maglev concurrent tierups`

This implements the last bits of basic concurrent Maglev compilation.
When jobs have been processed, schedule an interrupt to trigger codegen
and building the Code object on the main thread.

Changed since the initial version:
- Put the include behind V8_ENABLE_MAGLEV.
- Skip 18.js until we have deterministic test helpers for concurrent
  tiering.

Bug: v8:7700
Change-Id: Ibc103f097fe00f7df93a33a785939e43901f3734
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3536662Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79539}
parent bdc4f54a
......@@ -15,6 +15,10 @@
#include "src/tracing/trace-event.h"
#include "src/utils/memcopy.h"
#ifdef V8_ENABLE_MAGLEV
#include "src/maglev/maglev-concurrent-dispatcher.h"
#endif // V8_ENABLE_MAGLEV
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-engine.h"
#endif // V8_ENABLE_WEBASSEMBLY
......@@ -325,6 +329,14 @@ Object StackGuard::HandleInterrupts() {
isolate_->baseline_batch_compiler()->InstallBatch();
}
#ifdef V8_ENABLE_MAGLEV
if (TestAndClear(&interrupt_flags, INSTALL_MAGLEV_CODE)) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.FinalizeMaglevConcurrentCompilation");
isolate_->maglev_concurrent_dispatcher()->FinalizeFinishedJobs();
}
#endif // V8_ENABLE_MAGLEV
if (TestAndClear(&interrupt_flags, API_INTERRUPT)) {
TRACE_EVENT0("v8.execute", "V8.InvokeApiInterruptCallbacks");
// Callbacks must be invoked outside of ExecutionAccess lock.
......
......@@ -54,7 +54,8 @@ class V8_EXPORT_PRIVATE V8_NODISCARD StackGuard final {
V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 5) \
V(GROW_SHARED_MEMORY, GrowSharedMemory, 6) \
V(LOG_WASM_CODE, LogWasmCode, 7) \
V(WASM_CODE_GC, WasmCodeGC, 8)
V(WASM_CODE_GC, WasmCodeGC, 8) \
V(INSTALL_MAGLEV_CODE, InstallMaglevCode, 9)
#define V(NAME, Name, id) \
inline bool Check##Name() { return CheckInterrupt(NAME); } \
......
......@@ -59,9 +59,8 @@ class OptimizationDecision {
public:
static constexpr OptimizationDecision Maglev() {
// TODO(v8:7700): Consider using another reason here.
// TODO(v8:7700): Support concurrency.
return {OptimizationReason::kHotAndStable, CodeKind::MAGLEV,
ConcurrencyMode::kNotConcurrent};
ConcurrencyMode::kConcurrent};
}
static constexpr OptimizationDecision TurbofanHotAndStable() {
return {OptimizationReason::kHotAndStable, CodeKind::TURBOFAN,
......
......@@ -63,9 +63,6 @@ class MaglevCompilationInfo final {
void set_graph(Graph* graph) { graph_ = graph; }
Graph* graph() const { return graph_; }
void set_codet(MaybeHandle<CodeT> codet) { codet_ = codet; }
MaybeHandle<CodeT> codet() const { return codet_; }
// Flag accessors (for thread-safe access to global flags).
// TODO(v8:7700): Consider caching these.
#define V(Name) \
......@@ -103,9 +100,6 @@ class MaglevCompilationInfo final {
// Produced off-thread during ExecuteJobImpl.
Graph* graph_ = nullptr;
// Produced during FinalizeJobImpl.
MaybeHandle<CodeT> codet_;
#define V(Name) const bool Name##_;
MAGLEV_COMPILATION_FLAG_LIST(V)
#undef V
......
......@@ -106,9 +106,12 @@ CompilationJob::Status MaglevCompilationJob::ExecuteJobImpl(
}
CompilationJob::Status MaglevCompilationJob::FinalizeJobImpl(Isolate* isolate) {
info()->set_codet(maglev::MaglevCompiler::GenerateCode(
info()->toplevel_compilation_unit()));
// TODO(v8:7700): Actual return codes.
Handle<CodeT> codet;
if (!maglev::MaglevCompiler::GenerateCode(info()->toplevel_compilation_unit())
.ToHandle(&codet)) {
return CompilationJob::FAILED;
}
info()->function()->set_code(*codet);
return CompilationJob::SUCCEEDED;
}
......@@ -132,8 +135,7 @@ class MaglevConcurrentDispatcher::JobTask final : public v8::JobTask {
CHECK_EQ(status, CompilationJob::SUCCEEDED);
outgoing_queue()->Enqueue(std::move(job));
}
// TODO(v8:7700):
// isolate_->stack_guard()->RequestInstallMaglevCode();
isolate()->stack_guard()->RequestInstallMaglevCode();
}
size_t GetMaxConcurrency(size_t) const override {
......@@ -178,6 +180,7 @@ void MaglevConcurrentDispatcher::EnqueueJob(
}
void MaglevConcurrentDispatcher::FinalizeFinishedJobs() {
HandleScope handle_scope(isolate_);
while (!outgoing_queue_.IsEmpty()) {
std::unique_ptr<MaglevCompilationJob> job;
outgoing_queue_.Dequeue(&job);
......
......@@ -21,6 +21,13 @@ namespace maglev {
class MaglevCompilationInfo;
// TODO(v8:7700): While basic infrastructure now exists, there are many TODOs
// that should still be addressed soon:
// - Full tracing support through --trace-opt.
// - Concurrent codegen.
// - Concurrent Code object creation (optional?).
// - Test support for concurrency (see %FinalizeOptimization).
// Exports needed functionality without exposing implementation details.
class ExportedMaglevCompilationInfo final {
public:
......
......@@ -280,6 +280,10 @@
# BUG(v8:12645)
'shared-memory/shared-struct-workers': [SKIP],
'shared-memory/shared-struct-atomics-workers': [SKIP],
# Needs deterministic test helpers for concurrent maglev tiering.
# TODO(jgruber,v8:7700): Implement ASAP.
'maglev/18': [SKIP],
}], # ALWAYS
##############################################################################
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment