Commit a5a87e1e authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[maglev] Initial Maglev commit

Maglev is mid-tier optimising compiler designed mainly for compilation
speed that can still generate good code for straightforward JS.

This initial commit is an MVP for Maglev which can compile and run some
very simple code, and sets up a framework that we can build upon.

Design:
https://docs.google.com/document/d/13CwgSL4yawxuYg3iNlM-4ZPCB8RgJya6b8H_E2F-Aek/edit#

Bug: v8:7700
Change-Id: I5ae074ae099126c2c0d50864ac9b3d6fa5c9e85a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3483664Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79247}
parent 4be0a348
......@@ -198,6 +198,10 @@ declare_args() {
# Sets -dV8_EXTERNAL_CODE_SPACE
v8_enable_external_code_space = ""
# Enable the Maglev compiler.
# Sets -dV8_ENABLE_MAGLEV
v8_enable_maglev = ""
# With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See
# tools/gen-postmortem-metadata.py for details.
......@@ -425,6 +429,9 @@ if (v8_enable_external_code_space == "") {
(target_os != "android" && target_os != "fuchsia" &&
v8_current_cpu == "arm64"))
}
if (v8_enable_maglev == "") {
v8_enable_maglev = v8_current_cpu == "x64" && v8_enable_pointer_compression
}
if (v8_enable_single_generation == "") {
v8_enable_single_generation = v8_disable_write_barriers
}
......@@ -956,6 +963,9 @@ config("features") {
if (v8_enable_external_code_space) {
defines += [ "V8_EXTERNAL_CODE_SPACE" ]
}
if (v8_enable_maglev) {
defines += [ "V8_ENABLE_MAGLEV" ]
}
if (v8_enable_swiss_name_dictionary) {
defines += [ "V8_ENABLE_SWISS_NAME_DICTIONARY" ]
}
......@@ -3452,6 +3462,28 @@ v8_header_set("v8_internal_headers") {
sources -= [ "//base/trace_event/common/trace_event_common.h" ]
}
if (v8_enable_maglev) {
sources += [
"src/maglev/maglev-basic-block.h",
"src/maglev/maglev-code-gen-state.h",
"src/maglev/maglev-code-generator.h",
"src/maglev/maglev-compilation-data.h",
"src/maglev/maglev-compiler.h",
"src/maglev/maglev-graph-builder.h",
"src/maglev/maglev-graph-labeller.h",
"src/maglev/maglev-graph-printer.h",
"src/maglev/maglev-graph-processor.h",
"src/maglev/maglev-graph.h",
"src/maglev/maglev-interpreter-frame-state.h",
"src/maglev/maglev-ir.h",
"src/maglev/maglev-regalloc-data.h",
"src/maglev/maglev-regalloc.h",
"src/maglev/maglev-register-frame-array.h",
"src/maglev/maglev-vreg-allocator.h",
"src/maglev/maglev.h",
]
}
if (v8_enable_webassembly) {
sources += [
"src/asmjs/asm-js.h",
......@@ -4453,6 +4485,19 @@ v8_source_set("v8_base_without_compiler") {
"src/zone/zone.cc",
]
if (v8_enable_maglev) {
sources += [
"src/maglev/maglev-code-generator.cc",
"src/maglev/maglev-compilation-data.cc",
"src/maglev/maglev-compiler.cc",
"src/maglev/maglev-graph-builder.cc",
"src/maglev/maglev-graph-printer.cc",
"src/maglev/maglev-ir.cc",
"src/maglev/maglev-regalloc.cc",
"src/maglev/maglev.cc",
]
}
if (v8_enable_webassembly) {
sources += [ ### gcmole(all) ###
"src/asmjs/asm-js.cc",
......
......@@ -51,6 +51,8 @@ include_rules = [
"+src/interpreter/interpreter.h",
"+src/interpreter/interpreter-generator.h",
"+src/interpreter/setup-interpreter.h",
"-src/maglev",
"+src/maglev/maglev.h",
"-src/regexp",
"+src/regexp/regexp.h",
"+src/regexp/regexp-flags.h",
......
......@@ -160,6 +160,15 @@ class ThreadedListBase final : public BaseClass {
return *this;
}
bool is_null() { return entry_ == nullptr; }
void InsertBefore(T* value) {
T* old_entry_value = *entry_;
*entry_ = value;
entry_ = TLTraits::next(value);
*entry_ = old_entry_value;
}
Iterator() : entry_(nullptr) {}
private:
......@@ -178,6 +187,10 @@ class ThreadedListBase final : public BaseClass {
using reference = const value_type;
using pointer = const value_type*;
// Allow implicit conversion to const iterator.
// NOLINTNEXTLINE
ConstIterator(Iterator& iterator) : entry_(iterator.entry_) {}
public:
ConstIterator& operator++() {
entry_ = TLTraits::next(*entry_);
......
......@@ -952,6 +952,13 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(feedback_vector, rdx, rdi, optimization_marker));
TailCallRuntimeIfMarkerEquals(
masm, optimization_marker,
OptimizationMarker::kCompileMaglev_NotConcurrent,
Runtime::kCompileMaglev_NotConcurrent);
TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
OptimizationMarker::kCompileMaglev_Concurrent,
Runtime::kCompileMaglev_Concurrent);
TailCallRuntimeIfMarkerEquals(
masm, optimization_marker,
OptimizationMarker::kCompileTurbofan_NotConcurrent,
......
This diff is collapsed.
......@@ -77,6 +77,10 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static bool Compile(Isolate* isolate, Handle<JSFunction> function,
ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope);
static MaybeHandle<SharedFunctionInfo> CompileToplevel(
ParseInfo* parse_info, Handle<Script> script, Isolate* isolate,
IsCompiledScope* is_compiled_scope);
static bool CompileSharedWithBaseline(Isolate* isolate,
Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag,
......@@ -84,29 +88,24 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
static bool CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
ClearExceptionFlag flag,
IsCompiledScope* is_compiled_scope);
static bool CompileMaglev(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode,
IsCompiledScope* is_compiled_scope);
static void CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode, CodeKind code_kind);
static MaybeHandle<SharedFunctionInfo> CompileToplevel(
ParseInfo* parse_info, Handle<Script> script, Isolate* isolate,
IsCompiledScope* is_compiled_scope);
static void LogFunctionCompilation(Isolate* isolate,
CodeEventListener::LogEventsAndTags tag,
Handle<Script> script,
Handle<SharedFunctionInfo> shared,
Handle<FeedbackVector> feedback_vector,
Handle<AbstractCode> abstract_code,
CodeKind kind, double time_taken_ms);
V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo>
CompileForLiveEdit(ParseInfo* parse_info, Handle<Script> script,
Isolate* isolate);
// Collect source positions for a function that has already been compiled to
// bytecode, but for which source positions were not collected (e.g. because
// they were not immediately needed).
static bool CollectSourcePositions(Isolate* isolate,
Handle<SharedFunctionInfo> shared);
V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo>
CompileForLiveEdit(ParseInfo* parse_info, Handle<Script> script,
Isolate* isolate);
// Finalize and install code from previously run background compile task.
static bool FinalizeBackgroundCompileTask(BackgroundCompileTask* task,
Isolate* isolate,
......
......@@ -99,6 +99,7 @@ void OptimizedCompilationInfo::ConfigureFlags() {
case CodeKind::WASM_TO_JS_FUNCTION:
break;
case CodeKind::BASELINE:
case CodeKind::MAGLEV:
case CodeKind::INTERPRETED_FUNCTION:
case CodeKind::REGEXP:
UNREACHABLE();
......
......@@ -1651,10 +1651,12 @@ using FileAndLine = std::pair<const char*, int>;
enum class OptimizationMarker : int32_t {
// These values are set so that it is easy to check if there is a marker where
// some processing needs to be done.
kNone = 0b00,
kInOptimizationQueue = 0b01,
kCompileTurbofan_NotConcurrent = 0b10,
kCompileTurbofan_Concurrent = 0b11,
kNone = 0b000,
kInOptimizationQueue = 0b001,
kCompileMaglev_NotConcurrent = 0b010,
kCompileMaglev_Concurrent = 0b011,
kCompileTurbofan_NotConcurrent = 0b100,
kCompileTurbofan_Concurrent = 0b101,
kLastOptimizationMarker = kCompileTurbofan_Concurrent,
};
// For kNone or kInOptimizationQueue we don't need any special processing.
......@@ -1664,18 +1666,18 @@ STATIC_ASSERT(static_cast<int>(OptimizationMarker::kNone) == 0b00 &&
static_cast<int>(OptimizationMarker::kInOptimizationQueue) ==
0b01);
STATIC_ASSERT(static_cast<int>(OptimizationMarker::kLastOptimizationMarker) <=
0b11);
static constexpr uint32_t kNoneOrInOptimizationQueueMask = 0b10;
inline bool IsInOptimizationQueueMarker(OptimizationMarker marker) {
return marker == OptimizationMarker::kInOptimizationQueue;
}
0b111);
static constexpr uint32_t kNoneOrInOptimizationQueueMask = 0b110;
inline std::ostream& operator<<(std::ostream& os,
const OptimizationMarker& marker) {
switch (marker) {
case OptimizationMarker::kNone:
return os << "OptimizationMarker::kNone";
case OptimizationMarker::kCompileMaglev_NotConcurrent:
return os << "OptimizationMarker::kCompileMaglev_NotConcurrent";
case OptimizationMarker::kCompileMaglev_Concurrent:
return os << "OptimizationMarker::kCompileMaglev_Concurrent";
case OptimizationMarker::kCompileTurbofan_NotConcurrent:
return os << "OptimizationMarker::kCompileTurbofan_NotConcurrent";
case OptimizationMarker::kCompileTurbofan_Concurrent:
......@@ -1696,14 +1698,24 @@ inline std::ostream& operator<<(std::ostream& os,
case SpeculationMode::kDisallowSpeculation:
return os << "SpeculationMode::kDisallowSpeculation";
}
UNREACHABLE();
return os;
}
enum class BlockingBehavior { kBlock, kDontBlock };
enum class ConcurrencyMode { kNotConcurrent, kConcurrent };
inline const char* ToString(ConcurrencyMode mode) {
switch (mode) {
case ConcurrencyMode::kNotConcurrent:
return "ConcurrencyMode::kNotConcurrent";
case ConcurrencyMode::kConcurrent:
return "ConcurrencyMode::kConcurrent";
}
}
inline std::ostream& operator<<(std::ostream& os, ConcurrencyMode mode) {
return os << ToString(mode);
}
#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
C(Handler, handler) \
C(CEntryFP, c_entry_fp) \
......
......@@ -140,6 +140,9 @@ class V8_EXPORT_PRIVATE INSTRUCTION_OPERAND_ALIGN InstructionOperand {
// APIs to aid debugging. For general-stream APIs, use operator<<.
void Print() const;
bool operator==(InstructionOperand& other) const { return Equals(other); }
bool operator!=(InstructionOperand& other) const { return !Equals(other); }
protected:
explicit InstructionOperand(Kind kind) : value_(KindField::encode(kind)) {}
......
......@@ -773,6 +773,12 @@ const LoopInfo& BytecodeAnalysis::GetLoopInfoFor(int header_offset) const {
return header_to_info_.find(header_offset)->second;
}
const LoopInfo* BytecodeAnalysis::TryGetLoopInfoFor(int header_offset) const {
auto it = header_to_info_.find(header_offset);
if (it == header_to_info_.end()) return nullptr;
return &it->second;
}
const BytecodeLivenessState* BytecodeAnalysis::GetInLivenessFor(
int offset) const {
if (!analyze_liveness_) return nullptr;
......
......@@ -110,6 +110,11 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
int GetLoopOffsetFor(int offset) const;
// Get the loop info of the loop header at {header_offset}.
const LoopInfo& GetLoopInfoFor(int header_offset) const;
// Try to get the loop info of the loop header at {header_offset}, returning
// null if there isn't any.
const LoopInfo* TryGetLoopInfoFor(int header_offset) const;
const ZoneMap<int, LoopInfo>& GetLoopInfos() const { return header_to_info_; }
// Get the top-level resume jump targets.
const ZoneVector<ResumeJumpTarget>& resume_jump_targets() const {
......
......@@ -19,6 +19,38 @@ namespace compiler {
class BytecodeLivenessState : public ZoneObject {
public:
class Iterator {
public:
int operator*() const {
// Subtract one to compensate for the accumulator at the start of the
// bit vector.
return *it_ - 1;
}
void operator++() { return ++it_; }
bool operator!=(const Iterator& other) const { return it_ != other.it_; }
private:
static constexpr struct StartTag {
} kStartTag = {};
static constexpr struct EndTag {
} kEndTag = {};
explicit Iterator(const BytecodeLivenessState& liveness, StartTag)
: it_(liveness.bit_vector_.begin()) {
// If we're not at the end, and the current value is the accumulator, skip
// over it.
if (it_ != liveness.bit_vector_.end() && *it_ == 0) {
++it_;
}
}
explicit Iterator(const BytecodeLivenessState& liveness, EndTag)
: it_(liveness.bit_vector_.end()) {}
BitVector::Iterator it_;
friend class BytecodeLivenessState;
};
BytecodeLivenessState(int register_count, Zone* zone)
: bit_vector_(register_count + 1, zone) {}
BytecodeLivenessState(const BytecodeLivenessState&) = delete;
......@@ -71,6 +103,13 @@ class BytecodeLivenessState : public ZoneObject {
int register_count() const { return bit_vector_.length() - 1; }
// Number of live values, including the accumulator.
int live_value_count() const { return bit_vector_.Count(); }
Iterator begin() const { return Iterator(*this, Iterator::kStartTag); }
Iterator end() const { return Iterator(*this, Iterator::kEndTag); }
private:
BitVector bit_vector_;
};
......
......@@ -673,6 +673,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
}
return BUILTIN;
case CodeKind::TURBOFAN:
case CodeKind::MAGLEV:
return OPTIMIZED;
case CodeKind::BASELINE:
return Type::BASELINE;
......
......@@ -513,6 +513,20 @@ DEFINE_BOOL(future, FUTURE_BOOL,
"Implies all staged features that we want to ship in the "
"not-too-far future")
#ifdef V8_ENABLE_MAGLEV
#define V8_ENABLE_MAGLEV_BOOL true
#else
#define V8_ENABLE_MAGLEV_BOOL false
#endif // V8_ENABLE_MAGLEV
DEFINE_BOOL(maglev, V8_ENABLE_MAGLEV_BOOL,
"enable the maglev optimizing compiler")
DEFINE_STRING(maglev_filter, "*", "optimization filter for the maglev compiler")
DEFINE_BOOL(maglev_break_on_entry, false, "insert an int3 on maglev entries")
DEFINE_BOOL(print_maglev_graph, false, "print maglev graph")
DEFINE_BOOL(print_maglev_code, false, "print maglev code")
DEFINE_BOOL(trace_maglev_regalloc, false, "trace maglev register allocation")
#if ENABLE_SPARKPLUG
DEFINE_WEAK_IMPLICATION(future, sparkplug)
DEFINE_WEAK_IMPLICATION(future, flush_baseline_code)
......@@ -543,6 +557,8 @@ DEFINE_IMPLICATION(jitless, regexp_interpret_all)
// No Sparkplug compilation.
DEFINE_NEG_IMPLICATION(jitless, sparkplug)
DEFINE_NEG_IMPLICATION(jitless, always_sparkplug)
// No Maglev compilation.
DEFINE_NEG_IMPLICATION(jitless, maglev)
#endif
#ifndef V8_TARGET_ARCH_ARM
......
......@@ -53,14 +53,6 @@ void BytecodeArrayIterator::ApplyDebugBreak() {
*cursor = interpreter::Bytecodes::ToByte(debugbreak);
}
int BytecodeArrayIterator::current_bytecode_size() const {
return prefix_size_ + current_bytecode_size_without_prefix();
}
int BytecodeArrayIterator::current_bytecode_size_without_prefix() const {
return Bytecodes::Size(current_bytecode(), current_operand_scale());
}
uint32_t BytecodeArrayIterator::GetUnsignedOperand(
int operand_index, OperandType operand_type) const {
DCHECK_GE(operand_index, 0);
......
......@@ -77,7 +77,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayIterator {
BytecodeArrayIterator& operator=(const BytecodeArrayIterator&) = delete;
inline void Advance() {
cursor_ += Bytecodes::Size(current_bytecode(), current_operand_scale());
cursor_ += current_bytecode_size_without_prefix();
UpdateOperandScale();
}
void SetOffset(int offset);
......@@ -92,11 +92,16 @@ class V8_EXPORT_PRIVATE BytecodeArrayIterator {
DCHECK(!Bytecodes::IsPrefixScalingBytecode(current_bytecode));
return current_bytecode;
}
int current_bytecode_size() const;
int current_bytecode_size_without_prefix() const;
int current_bytecode_size() const {
return prefix_size_ + current_bytecode_size_without_prefix();
}
int current_bytecode_size_without_prefix() const {
return Bytecodes::Size(current_bytecode(), current_operand_scale());
}
int current_offset() const {
return static_cast<int>(cursor_ - start_ - prefix_size_);
}
int next_offset() const { return current_offset() + current_bytecode_size(); }
OperandScale current_operand_scale() const { return operand_scale_; }
Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
......
......@@ -2152,6 +2152,8 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
case CodeKind::INTERPRETED_FUNCTION:
case CodeKind::TURBOFAN:
case CodeKind::BASELINE:
case CodeKind::MAGLEV:
return; // We log this later using LogCompiledFunctions.
case CodeKind::BYTECODE_HANDLER:
return; // We log it later by walking the dispatch table.
case CodeKind::FOR_TESTING:
......
include_rules = [
# Allow Maglev to depend on TurboFan data structures.
# TODO(v8:7700): Clean up these dependencies by extracting common code to a
# separate directory.
"+src/compiler",
]
leszeks@chromium.org
jgruber@chromium.org
verwaest@chromium.org
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_BASIC_BLOCK_H_
#define V8_MAGLEV_MAGLEV_BASIC_BLOCK_H_
#include <vector>
#include "src/codegen/label.h"
#include "src/maglev/maglev-interpreter-frame-state.h"
#include "src/maglev/maglev-ir.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
namespace maglev {
using NodeIterator = Node::List::Iterator;
using NodeConstIterator = Node::List::Iterator;
class BasicBlock {
public:
explicit BasicBlock(MergePointInterpreterFrameState* state)
: control_node_(nullptr), state_(state) {}
uint32_t first_id() const {
if (has_phi()) return phis()->first()->id();
return nodes_.is_empty() ? control_node()->id() : nodes_.first()->id();
}
uint32_t FirstNonGapMoveId() const {
if (has_phi()) return phis()->first()->id();
if (!nodes_.is_empty()) {
for (const Node* node : nodes_) {
if (node->Is<GapMove>()) continue;
return node->id();
}
}
return control_node()->id();
}
Node::List& nodes() { return nodes_; }
ControlNode* control_node() const { return control_node_; }
void set_control_node(ControlNode* control_node) {
DCHECK_NULL(control_node_);
control_node_ = control_node;
}
bool has_phi() const { return has_state() && state_->has_phi(); }
bool is_empty_block() const { return is_empty_block_; }
BasicBlock* empty_block_predecessor() const {
DCHECK(is_empty_block());
return empty_block_predecessor_;
}
void set_empty_block_predecessor(BasicBlock* predecessor) {
DCHECK(nodes_.is_empty());
DCHECK(control_node()->Is<Jump>());
DCHECK_NULL(state_);
is_empty_block_ = true;
empty_block_predecessor_ = predecessor;
}
Phi::List* phis() const {
DCHECK(has_phi());
return state_->phis();
}
BasicBlock* predecessor_at(int i) const {
DCHECK_NOT_NULL(state_);
return state_->predecessor_at(i);
}
int predecessor_id() const {
return control_node()->Cast<UnconditionalControlNode>()->predecessor_id();
}
void set_predecessor_id(int id) {
control_node()->Cast<UnconditionalControlNode>()->set_predecessor_id(id);
}
Label* label() { return &label_; }
MergePointInterpreterFrameState* state() const {
DCHECK(has_state());
return state_;
}
bool has_state() const { return state_ != nullptr && !is_empty_block(); }
private:
bool is_empty_block_ = false;
Node::List nodes_;
ControlNode* control_node_;
union {
MergePointInterpreterFrameState* state_;
BasicBlock* empty_block_predecessor_;
};
Label label_;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_BASIC_BLOCK_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_CODE_GEN_STATE_H_
#define V8_MAGLEV_MAGLEV_CODE_GEN_STATE_H_
#include "src/codegen/assembler.h"
#include "src/codegen/label.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/safepoint-table.h"
#include "src/common/globals.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/js-heap-broker.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-ir.h"
namespace v8 {
namespace internal {
namespace maglev {
class MaglevCodeGenState {
public:
class DeferredCodeInfo {
public:
virtual void Generate(MaglevCodeGenState* code_gen_state,
Label* return_label) = 0;
Label deferred_code_label;
Label return_label;
};
MaglevCodeGenState(MaglevCompilationUnit* compilation_unit,
SafepointTableBuilder* safepoint_table_builder)
: compilation_unit_(compilation_unit),
safepoint_table_builder_(safepoint_table_builder),
masm_(isolate(), CodeObjectRequired::kNo) {}
void SetVregSlots(int slots) { vreg_slots_ = slots; }
void PushDeferredCode(DeferredCodeInfo* deferred_code) {
deferred_code_.push_back(deferred_code);
}
void EmitDeferredCode() {
for (auto& deferred_code : deferred_code_) {
masm()->RecordComment("-- Deferred block");
masm()->bind(&deferred_code->deferred_code_label);
deferred_code->Generate(this, &deferred_code->return_label);
masm()->int3();
}
}
compiler::NativeContextRef native_context() const {
return broker()->target_native_context();
}
Isolate* isolate() const { return compilation_unit_->isolate(); }
int parameter_count() const { return compilation_unit_->parameter_count(); }
int register_count() const { return compilation_unit_->register_count(); }
const compiler::BytecodeAnalysis& bytecode_analysis() const {
return compilation_unit_->bytecode_analysis;
}
compiler::JSHeapBroker* broker() const { return compilation_unit_->broker(); }
const compiler::BytecodeArrayRef& bytecode() const {
return compilation_unit_->bytecode;
}
MaglevGraphLabeller* graph_labeller() const {
return compilation_unit_->graph_labeller();
}
MacroAssembler* masm() { return &masm_; }
int vreg_slots() const { return vreg_slots_; }
SafepointTableBuilder* safepoint_table_builder() const {
return safepoint_table_builder_;
}
MaglevCompilationUnit* compilation_unit() const { return compilation_unit_; }
private:
MaglevCompilationUnit* const compilation_unit_;
SafepointTableBuilder* const safepoint_table_builder_;
MacroAssembler masm_;
std::vector<DeferredCodeInfo*> deferred_code_;
int vreg_slots_ = 0;
};
// Some helpers for codegen.
// TODO(leszeks): consider moving this to a separate header.
inline MemOperand GetStackSlot(int index) {
return MemOperand(rbp, StandardFrameConstants::kExpressionsOffset -
index * kSystemPointerSize);
}
inline MemOperand GetStackSlot(const compiler::AllocatedOperand& operand) {
return GetStackSlot(operand.index());
}
inline Register ToRegister(const compiler::InstructionOperand& operand) {
return compiler::AllocatedOperand::cast(operand).GetRegister();
}
inline Register ToRegister(const ValueLocation& location) {
return ToRegister(location.operand());
}
inline MemOperand ToMemOperand(const compiler::InstructionOperand& operand) {
return GetStackSlot(compiler::AllocatedOperand::cast(operand));
}
inline MemOperand ToMemOperand(const ValueLocation& location) {
return ToMemOperand(location.operand());
}
inline int GetSafepointIndexForStackSlot(int i) {
// Safepoint tables also contain slots for all fixed frame slots (both
// above and below the fp).
return StandardFrameConstants::kFixedSlotCount + i;
}
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_CODE_GEN_STATE_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-code-generator.h"
#include "src/codegen/code-desc.h"
#include "src/codegen/safepoint-table.h"
#include "src/maglev/maglev-code-gen-state.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-printer.h"
#include "src/maglev/maglev-graph-processor.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir.h"
namespace v8 {
namespace internal {
namespace maglev {
#define __ masm()->
namespace {
class MaglevCodeGeneratingNodeProcessor {
public:
static constexpr bool kNeedsCheckpointStates = true;
explicit MaglevCodeGeneratingNodeProcessor(MaglevCodeGenState* code_gen_state)
: code_gen_state_(code_gen_state) {}
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {
if (FLAG_maglev_break_on_entry) {
__ int3();
}
__ EnterFrame(StackFrame::BASELINE);
// Save arguments in frame.
// TODO(leszeks): Consider eliding this frame if we don't make any calls
// that could clobber these registers.
__ Push(kContextRegister);
__ Push(kJSFunctionRegister); // Callee's JS function.
__ Push(kJavaScriptCallArgCountRegister); // Actual argument count.
// Extend rsp by the size of the frame.
code_gen_state_->SetVregSlots(graph->stack_slots());
__ subq(rsp, Immediate(code_gen_state_->vreg_slots() * kSystemPointerSize));
// Initialize stack slots.
// TODO(jgruber): Update logic once the register allocator is further along.
{
ASM_CODE_COMMENT_STRING(masm(), "Initializing stack slots");
__ Move(rax, Immediate(0));
__ Move(rcx, Immediate(code_gen_state_->vreg_slots()));
__ leaq(rdi, GetStackSlot(code_gen_state_->vreg_slots() - 1));
__ repstosq();
}
// We don't emit proper safepoint data yet; instead, define a single
// safepoint at the end of the code object, with all-tagged stack slots.
// TODO(jgruber): Real safepoint handling.
SafepointTableBuilder::Safepoint safepoint =
safepoint_table_builder()->DefineSafepoint(masm());
for (int i = 0; i < code_gen_state_->vreg_slots(); i++) {
safepoint.DefineTaggedStackSlot(GetSafepointIndexForStackSlot(i));
}
}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {
code_gen_state_->EmitDeferredCode();
}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {
if (FLAG_code_comments) {
std::stringstream ss;
ss << "-- Block b" << graph_labeller()->BlockId(block);
__ RecordComment(ss.str());
}
__ bind(block->label());
}
template <typename NodeT>
void Process(NodeT* node, const ProcessingState& state) {
if (FLAG_code_comments) {
std::stringstream ss;
ss << "-- " << graph_labeller()->NodeId(node) << ": "
<< PrintNode(graph_labeller(), node);
__ RecordComment(ss.str());
}
// Emit Phi moves before visiting the control node.
if (std::is_base_of<UnconditionalControlNode, NodeT>::value) {
BasicBlock* target =
node->template Cast<UnconditionalControlNode>()->target();
if (target->has_state()) {
int predecessor_id = state.block()->predecessor_id();
__ RecordComment("-- Register merge gap moves:");
for (int index = 0; index < kAllocatableGeneralRegisterCount; ++index) {
RegisterMerge* merge;
if (LoadMergeState(target->state()->register_state()[index],
&merge)) {
compiler::AllocatedOperand source = merge->operand(predecessor_id);
Register reg = MapIndexToRegister(index);
if (FLAG_code_comments) {
std::stringstream ss;
ss << "-- * " << source << " → " << reg;
__ RecordComment(ss.str());
}
// TODO(leszeks): Implement parallel moves.
if (source.IsStackSlot()) {
__ movq(reg, GetStackSlot(source));
} else {
__ movq(reg, ToRegister(source));
}
}
}
if (target->has_phi()) {
__ RecordComment("-- Phi gap moves:");
Phi::List* phis = target->phis();
for (Phi* phi : *phis) {
compiler::AllocatedOperand source =
compiler::AllocatedOperand::cast(
phi->input(state.block()->predecessor_id()).operand());
compiler::AllocatedOperand target =
compiler::AllocatedOperand::cast(phi->result().operand());
if (FLAG_code_comments) {
std::stringstream ss;
ss << "-- * " << source << " → " << target << " (n"
<< graph_labeller()->NodeId(phi) << ")";
__ RecordComment(ss.str());
}
if (source.IsRegister()) {
Register source_reg = ToRegister(source);
if (target.IsRegister()) {
__ movq(ToRegister(target), source_reg);
} else {
__ movq(GetStackSlot(target), source_reg);
}
} else {
if (target.IsRegister()) {
__ movq(ToRegister(target), GetStackSlot(source));
} else {
__ movq(kScratchRegister, GetStackSlot(source));
__ movq(GetStackSlot(target), kScratchRegister);
}
}
}
}
} else {
__ RecordComment("-- Target has no state, must be a fallthrough");
}
}
node->GenerateCode(code_gen_state_, state);
if (std::is_base_of<ValueNode, NodeT>::value) {
ValueNode* value_node = node->template Cast<ValueNode>();
if (value_node->is_spilled()) {
if (FLAG_code_comments) __ RecordComment("-- Spill:");
compiler::AllocatedOperand source =
compiler::AllocatedOperand::cast(value_node->result().operand());
// We shouldn't spill nodes which already output to the stack.
DCHECK(!source.IsStackSlot());
__ movq(GetStackSlot(value_node->spill_slot()), ToRegister(source));
}
}
}
Isolate* isolate() const { return code_gen_state_->isolate(); }
MacroAssembler* masm() const { return code_gen_state_->masm(); }
MaglevGraphLabeller* graph_labeller() const {
return code_gen_state_->graph_labeller();
}
SafepointTableBuilder* safepoint_table_builder() const {
return code_gen_state_->safepoint_table_builder();
}
private:
MaglevCodeGenState* code_gen_state_;
};
} // namespace
class MaglevCodeGeneratorImpl final {
public:
static Handle<Code> Generate(MaglevCompilationUnit* compilation_unit,
Graph* graph) {
return MaglevCodeGeneratorImpl(compilation_unit, graph).Generate();
}
private:
MaglevCodeGeneratorImpl(MaglevCompilationUnit* compilation_unit, Graph* graph)
: safepoint_table_builder_(compilation_unit->zone()),
code_gen_state_(compilation_unit, safepoint_table_builder()),
processor_(compilation_unit, &code_gen_state_),
graph_(graph) {}
Handle<Code> Generate() {
EmitCode();
EmitMetadata();
return BuildCodeObject();
}
void EmitCode() { processor_.ProcessGraph(graph_); }
void EmitMetadata() {
// Final alignment before starting on the metadata section.
masm()->Align(Code::kMetadataAlignment);
safepoint_table_builder()->Emit(masm(),
stack_slot_count_with_fixed_frame());
}
Handle<Code> BuildCodeObject() {
CodeDesc desc;
static constexpr int kNoHandlerTableOffset = 0;
masm()->GetCode(isolate(), &desc, safepoint_table_builder(),
kNoHandlerTableOffset);
return Factory::CodeBuilder{isolate(), desc, CodeKind::MAGLEV}
.set_stack_slots(stack_slot_count_with_fixed_frame())
.Build();
}
int stack_slot_count() const { return code_gen_state_.vreg_slots(); }
int stack_slot_count_with_fixed_frame() const {
return stack_slot_count() + StandardFrameConstants::kFixedSlotCount;
}
Isolate* isolate() const {
return code_gen_state_.compilation_unit()->isolate();
}
MacroAssembler* masm() { return code_gen_state_.masm(); }
SafepointTableBuilder* safepoint_table_builder() {
return &safepoint_table_builder_;
}
SafepointTableBuilder safepoint_table_builder_;
MaglevCodeGenState code_gen_state_;
GraphProcessor<MaglevCodeGeneratingNodeProcessor> processor_;
Graph* const graph_;
};
// static
Handle<Code> MaglevCodeGenerator::Generate(
MaglevCompilationUnit* compilation_unit, Graph* graph) {
return MaglevCodeGeneratorImpl::Generate(compilation_unit, graph);
}
} // namespace maglev
} // namespace internal
} // namespace v8
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_CODE_GENERATOR_H_
#define V8_MAGLEV_MAGLEV_CODE_GENERATOR_H_
#include "src/common/globals.h"
namespace v8 {
namespace internal {
namespace maglev {
class Graph;
struct MaglevCompilationUnit;
class MaglevCodeGenerator : public AllStatic {
public:
static Handle<Code> Generate(MaglevCompilationUnit* compilation_unit,
Graph* graph);
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_CODE_GENERATOR_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-compilation-data.h"
#include "src/compiler/js-heap-broker.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/objects/js-function-inl.h"
namespace v8 {
namespace internal {
namespace maglev {
MaglevCompilationData::MaglevCompilationData(compiler::JSHeapBroker* broker)
: broker(broker),
isolate(broker->isolate()),
zone(broker->isolate()->allocator(), "maglev-zone") {}
MaglevCompilationData::~MaglevCompilationData() = default;
MaglevCompilationUnit::MaglevCompilationUnit(MaglevCompilationData* data,
Handle<JSFunction> function)
: compilation_data(data),
bytecode(
MakeRef(broker(), function->shared().GetBytecodeArray(isolate()))),
feedback(MakeRef(broker(), function->feedback_vector())),
bytecode_analysis(bytecode.object(), zone(), BytecodeOffset::None(),
true),
register_count_(bytecode.register_count()),
parameter_count_(bytecode.parameter_count()) {}
} // namespace maglev
} // namespace internal
} // namespace v8
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_COMPILATION_DATA_H_
#define V8_MAGLEV_MAGLEV_COMPILATION_DATA_H_
#include "src/common/globals.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/heap-refs.h"
namespace v8 {
namespace internal {
namespace maglev {
class MaglevGraphLabeller;
struct MaglevCompilationData {
explicit MaglevCompilationData(compiler::JSHeapBroker* broker);
~MaglevCompilationData();
std::unique_ptr<MaglevGraphLabeller> graph_labeller;
compiler::JSHeapBroker* const broker;
Isolate* const isolate;
Zone zone;
};
struct MaglevCompilationUnit {
MaglevCompilationUnit(MaglevCompilationData* data,
Handle<JSFunction> function);
compiler::JSHeapBroker* broker() const { return compilation_data->broker; }
Isolate* isolate() const { return compilation_data->isolate; }
Zone* zone() const { return &compilation_data->zone; }
int register_count() const { return register_count_; }
int parameter_count() const { return parameter_count_; }
bool has_graph_labeller() const { return !!compilation_data->graph_labeller; }
MaglevGraphLabeller* graph_labeller() const {
DCHECK(has_graph_labeller());
return compilation_data->graph_labeller.get();
}
MaglevCompilationData* const compilation_data;
const compiler::BytecodeArrayRef bytecode;
const compiler::FeedbackVectorRef feedback;
compiler::BytecodeAnalysis const bytecode_analysis;
int register_count_;
int parameter_count_;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_COMPILATION_DATA_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-compiler.h"
#include <iomanip>
#include <ostream>
#include <type_traits>
#include "src/base/iterator.h"
#include "src/base/logging.h"
#include "src/base/threaded-list.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/reglist.h"
#include "src/codegen/x64/register-x64.h"
#include "src/common/globals.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/bytecode-liveness-map.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/heap-refs.h"
#include "src/compiler/js-heap-broker.h"
#include "src/execution/frames.h"
#include "src/ic/handler-configuration.h"
#include "src/maglev/maglev-basic-block.h"
#include "src/maglev/maglev-code-generator.h"
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-graph-builder.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-printer.h"
#include "src/maglev/maglev-graph-processor.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-interpreter-frame-state.h"
#include "src/maglev/maglev-ir.h"
#include "src/maglev/maglev-regalloc.h"
#include "src/maglev/maglev-vreg-allocator.h"
#include "src/objects/code-inl.h"
#include "src/objects/js-function.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
namespace maglev {
class NumberingProcessor {
public:
static constexpr bool kNeedsCheckpointStates = false;
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) { node_id_ = 1; }
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
void Process(NodeBase* node, const ProcessingState& state) {
node->set_id(node_id_++);
}
private:
uint32_t node_id_;
};
class UseMarkingProcessor {
public:
static constexpr bool kNeedsCheckpointStates = true;
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
void Process(NodeBase* node, const ProcessingState& state) {
if (node->properties().can_deopt()) MarkCheckpointNodes(node, state);
for (Input& input : *node) {
input.node()->mark_use(node->id(), &input);
}
}
void Process(Phi* node, const ProcessingState& state) {
// Don't mark Phi uses when visiting the node, because of loop phis.
// Instead, they'll be visited while processing Jump/JumpLoop.
}
// Specialize the two unconditional jumps to extend their Phis' inputs' live
// ranges.
void Process(JumpLoop* node, const ProcessingState& state) {
int i = state.block()->predecessor_id();
BasicBlock* target = node->target();
if (!target->has_phi()) return;
uint32_t use = node->id();
for (Phi* phi : *target->phis()) {
ValueNode* input = phi->input(i).node();
input->mark_use(use, &phi->input(i));
}
}
void Process(Jump* node, const ProcessingState& state) {
int i = state.block()->predecessor_id();
BasicBlock* target = node->target();
if (!target->has_phi()) return;
uint32_t use = node->id();
for (Phi* phi : *target->phis()) {
ValueNode* input = phi->input(i).node();
input->mark_use(use, &phi->input(i));
}
}
private:
void MarkCheckpointNodes(NodeBase* node, const ProcessingState& state) {
const InterpreterFrameState* checkpoint_state =
state.checkpoint_frame_state();
int use_id = node->id();
for (int i = 0; i < state.parameter_count(); i++) {
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
ValueNode* node = checkpoint_state->get(reg);
if (node) node->mark_use(use_id, nullptr);
}
for (int i = 0; i < state.register_count(); i++) {
interpreter::Register reg = interpreter::Register(i);
ValueNode* node = checkpoint_state->get(reg);
if (node) node->mark_use(use_id, nullptr);
}
if (checkpoint_state->accumulator()) {
checkpoint_state->accumulator()->mark_use(use_id, nullptr);
}
}
};
MaglevCompiler::MaglevCompiler(compiler::JSHeapBroker* broker,
Handle<JSFunction> function)
: compilation_data_(broker),
toplevel_compilation_unit_(&compilation_data_, function) {}
Handle<Code> MaglevCompiler::Compile() {
// Build graph.
if (FLAG_print_maglev_code || FLAG_code_comments || FLAG_print_maglev_graph ||
FLAG_trace_maglev_regalloc) {
compilation_data_.graph_labeller.reset(new MaglevGraphLabeller());
}
MaglevGraphBuilder graph_builder(&toplevel_compilation_unit_);
graph_builder.Build();
if (FLAG_print_maglev_graph) {
std::cout << "After graph buiding" << std::endl;
PrintGraph(std::cout, &toplevel_compilation_unit_, graph_builder.graph());
}
{
GraphMultiProcessor<NumberingProcessor, UseMarkingProcessor,
MaglevVregAllocator>
processor(&toplevel_compilation_unit_);
processor.ProcessGraph(graph_builder.graph());
}
if (FLAG_print_maglev_graph) {
std::cout << "After node processor" << std::endl;
PrintGraph(std::cout, &toplevel_compilation_unit_, graph_builder.graph());
}
StraightForwardRegisterAllocator allocator(&toplevel_compilation_unit_,
graph_builder.graph());
if (FLAG_print_maglev_graph) {
std::cout << "After register allocation" << std::endl;
PrintGraph(std::cout, &toplevel_compilation_unit_, graph_builder.graph());
}
Handle<Code> code = MaglevCodeGenerator::Generate(&toplevel_compilation_unit_,
graph_builder.graph());
const bool deps_committed_successfully =
broker()->dependencies()->Commit(code);
CHECK(deps_committed_successfully);
if (FLAG_print_maglev_code) {
code->Print();
}
return code;
}
} // namespace maglev
} // namespace internal
} // namespace v8
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_COMPILER_H_
#define V8_MAGLEV_MAGLEV_COMPILER_H_
#include "src/common/globals.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/heap-refs.h"
#include "src/maglev/maglev-compilation-data.h"
namespace v8 {
namespace internal {
namespace compiler {
class JSHeapBroker;
}
namespace maglev {
class MaglevCompiler {
public:
explicit MaglevCompiler(compiler::JSHeapBroker* broker,
Handle<JSFunction> function);
Handle<Code> Compile();
compiler::JSHeapBroker* broker() const { return compilation_data_.broker; }
Zone* zone() { return &compilation_data_.zone; }
Isolate* isolate() { return compilation_data_.isolate; }
private:
MaglevCompilationData compilation_data_;
MaglevCompilationUnit toplevel_compilation_unit_;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_COMPILER_H_
This diff is collapsed.
This diff is collapsed.
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_GRAPH_LABELLER_H_
#define V8_MAGLEV_MAGLEV_GRAPH_LABELLER_H_
#include <map>
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir.h"
namespace v8 {
namespace internal {
namespace maglev {
class MaglevGraphLabeller {
public:
void RegisterNode(const Node* node) {
if (node_ids_.emplace(node, next_node_id_).second) {
next_node_id_++;
}
}
void RegisterBasicBlock(const BasicBlock* block) {
block_ids_[block] = next_block_id_++;
if (node_ids_.emplace(block->control_node(), next_node_id_).second) {
next_node_id_++;
}
}
int BlockId(const BasicBlock* block) { return block_ids_[block]; }
int NodeId(const NodeBase* node) { return node_ids_[node]; }
int max_node_id() const { return next_node_id_ - 1; }
int max_node_id_width() const { return std::ceil(std::log10(max_node_id())); }
void PrintNodeLabel(std::ostream& os, const Node* node) {
auto node_id_it = node_ids_.find(node);
if (node_id_it == node_ids_.end()) {
os << "<invalid node " << node << ">";
return;
}
os << "n" << node_id_it->second;
}
void PrintInput(std::ostream& os, const Input& input) {
PrintNodeLabel(os, input.node());
os << ":" << input.operand();
}
private:
std::map<const BasicBlock*, int> block_ids_;
std::map<const NodeBase*, int> node_ids_;
int next_block_id_ = 1;
int next_node_id_ = 1;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_GRAPH_LABELLER_H_
This diff is collapsed.
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_GRAPH_PRINTER_H_
#define V8_MAGLEV_MAGLEV_GRAPH_PRINTER_H_
#include <ostream>
#include <set>
#include <vector>
namespace v8 {
namespace internal {
namespace maglev {
class BasicBlock;
class ControlNode;
class Graph;
struct MaglevCompilationUnit;
class MaglevGraphLabeller;
class NodeBase;
class Node;
class Phi;
class ProcessingState;
class MaglevPrintingVisitor {
public:
// Could be interesting to print checkpoints too.
static constexpr bool kNeedsCheckpointStates = false;
explicit MaglevPrintingVisitor(std::ostream& os);
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph);
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block);
void Process(Phi* phi, const ProcessingState& state);
void Process(Node* node, const ProcessingState& state);
void Process(ControlNode* node, const ProcessingState& state);
std::ostream& os() { return *os_for_additional_info_; }
private:
std::ostream& os_;
std::unique_ptr<std::ostream> os_for_additional_info_;
std::set<BasicBlock*> loop_headers;
std::vector<BasicBlock*> targets;
};
void PrintGraph(std::ostream& os, MaglevCompilationUnit* compilation_unit,
Graph* const graph);
class PrintNode {
public:
PrintNode(MaglevGraphLabeller* graph_labeller, const NodeBase* node)
: graph_labeller_(graph_labeller), node_(node) {}
void Print(std::ostream& os) const;
private:
MaglevGraphLabeller* graph_labeller_;
const NodeBase* node_;
};
std::ostream& operator<<(std::ostream& os, const PrintNode& printer);
class PrintNodeLabel {
public:
PrintNodeLabel(MaglevGraphLabeller* graph_labeller, const Node* node)
: graph_labeller_(graph_labeller), node_(node) {}
void Print(std::ostream& os) const;
private:
MaglevGraphLabeller* graph_labeller_;
const Node* node_;
};
std::ostream& operator<<(std::ostream& os, const PrintNodeLabel& printer);
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_GRAPH_PRINTER_H_
This diff is collapsed.
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_GRAPH_H_
#define V8_MAGLEV_MAGLEV_GRAPH_H_
#include <vector>
#include "src/maglev/maglev-basic-block.h"
namespace v8 {
namespace internal {
namespace maglev {
using BlockConstIterator = std::vector<BasicBlock*>::const_iterator;
using BlockConstReverseIterator =
std::vector<BasicBlock*>::const_reverse_iterator;
class Graph {
public:
explicit Graph(Zone* zone) : blocks_(zone) {}
BasicBlock* operator[](int i) { return blocks_[i]; }
const BasicBlock* operator[](int i) const { return blocks_[i]; }
int num_blocks() const { return static_cast<int>(blocks_.size()); }
BlockConstIterator begin() const { return blocks_.begin(); }
BlockConstIterator end() const { return blocks_.end(); }
BlockConstReverseIterator rbegin() const { return blocks_.rbegin(); }
BlockConstReverseIterator rend() const { return blocks_.rend(); }
BasicBlock* last_block() const { return blocks_.back(); }
void Add(BasicBlock* block) { blocks_.push_back(block); }
uint32_t stack_slots() const { return stack_slots_; }
void set_stack_slots(uint32_t stack_slots) {
DCHECK_EQ(kMaxUInt32, stack_slots_);
DCHECK_NE(kMaxUInt32, stack_slots);
stack_slots_ = stack_slots;
}
private:
uint32_t stack_slots_ = kMaxUInt32;
ZoneVector<BasicBlock*> blocks_;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_GRAPH_H_
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_REGALLOC_DATA_H_
#define V8_MAGLEV_MAGLEV_REGALLOC_DATA_H_
#include "src/codegen/x64/register-x64.h"
#include "src/compiler/backend/instruction.h"
#include "src/utils/pointer-with-payload.h"
namespace v8 {
namespace internal {
namespace maglev {
struct LiveNodeInfo;
#define COUNT(V) +1
static constexpr int kAllocatableGeneralRegisterCount =
ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(COUNT);
#undef COUNT
constexpr uint8_t MapRegisterToIndex(Register r) {
uint8_t count = 0;
#define EMIT_BRANCH(V) \
if (r == V) return count; \
count++;
ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(EMIT_BRANCH)
#undef EMIT_BRANCH
UNREACHABLE();
}
constexpr Register MapIndexToRegister(int i) {
uint8_t count = 0;
#define EMIT_BRANCH(V) \
if (i == count) return V; \
count++;
ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(EMIT_BRANCH)
#undef EMIT_BRANCH
UNREACHABLE();
}
struct RegisterStateFlags {
// TODO(v8:7700): Use the good old Flags mechanism.
static constexpr int kIsMergeShift = 0;
static constexpr int kIsInitializedShift = 1;
const bool is_initialized = false;
const bool is_merge = false;
explicit constexpr operator uintptr_t() const {
return (is_initialized ? 1 << kIsInitializedShift : 0) |
(is_merge ? 1 << kIsMergeShift : 0);
}
constexpr explicit RegisterStateFlags(uintptr_t state)
: is_initialized((state & (1 << kIsInitializedShift)) != 0),
is_merge((state & (1 << kIsMergeShift)) != 0) {}
constexpr RegisterStateFlags(bool is_initialized, bool is_merge)
: is_initialized(is_initialized), is_merge(is_merge) {}
};
constexpr bool operator==(const RegisterStateFlags& left,
const RegisterStateFlags& right) {
return left.is_initialized == right.is_initialized &&
left.is_merge == right.is_merge;
}
typedef PointerWithPayload<void, RegisterStateFlags, 2> RegisterState;
struct RegisterMerge {
compiler::AllocatedOperand* operands() {
return reinterpret_cast<compiler::AllocatedOperand*>(this + 1);
}
compiler::AllocatedOperand& operand(size_t i) { return operands()[i]; }
LiveNodeInfo* node;
};
inline bool LoadMergeState(RegisterState state, RegisterMerge** merge) {
DCHECK(state.GetPayload().is_initialized);
if (state.GetPayload().is_merge) {
*merge = static_cast<RegisterMerge*>(state.GetPointer());
return true;
}
*merge = nullptr;
return false;
}
inline bool LoadMergeState(RegisterState state, LiveNodeInfo** node,
RegisterMerge** merge) {
DCHECK(state.GetPayload().is_initialized);
if (LoadMergeState(state, merge)) {
*node = (*merge)->node;
return true;
}
*node = static_cast<LiveNodeInfo*>(state.GetPointer());
return false;
}
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_REGALLOC_DATA_H_
This diff is collapsed.
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_REGALLOC_H_
#define V8_MAGLEV_MAGLEV_REGALLOC_H_
#include "src/maglev/maglev-compilation-data.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir.h"
namespace v8 {
namespace internal {
namespace maglev {
class MaglevPrintingVisitor;
struct StackSlot {
StackSlot(MachineRepresentation representation, int index)
: slot(compiler::LocationOperand::STACK_SLOT, representation, index) {}
compiler::AllocatedOperand slot;
StackSlot* next_ = nullptr;
StackSlot** next() { return &next_; }
};
struct LiveNodeInfo {
ValueNode* node;
uint32_t last_use = 0;
uint32_t next_use = 0;
StackSlot* stack_slot = nullptr;
Register reg = Register::no_reg();
compiler::AllocatedOperand allocation() const {
if (reg.is_valid()) {
return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
MachineRepresentation::kTagged,
reg.code());
}
DCHECK_NOT_NULL(stack_slot);
return stack_slot->slot;
}
};
class StraightForwardRegisterAllocator {
public:
StraightForwardRegisterAllocator(MaglevCompilationUnit* compilation_unit,
Graph* graph);
~StraightForwardRegisterAllocator();
int stack_slots() const { return top_of_stack_; }
private:
std::vector<int> future_register_uses_[kAllocatableGeneralRegisterCount];
// Currently live values.
std::map<ValueNode*, LiveNodeInfo> values_;
base::ThreadedList<StackSlot> free_slots_;
int top_of_stack_ = 0;
#define N(V) nullptr,
LiveNodeInfo* register_values_[kAllocatableGeneralRegisterCount] = {
ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(N)};
#undef N
LiveNodeInfo* MakeLive(ValueNode* node) {
uint32_t last_use = node->live_range().end;
// TODO(verwaest): We don't currently have next_use info...
uint32_t next_use = node->next_use();
return &(values_[node] = {node, last_use, next_use});
}
void ComputePostDominatingHoles(Graph* graph);
void AllocateRegisters(Graph* graph);
void PrintLiveRegs() const;
// Update use info and clear now dead registers.
void UpdateInputUseAndClearDead(uint32_t use, const Input& input);
void AllocateControlNode(ControlNode* node, BasicBlock* block);
void AllocateNode(Node* node);
void AllocateNodeResult(ValueNode* node);
void AssignInput(Input& input);
void AssignTemporaries(NodeBase* node);
void TryAllocateToInput(LiveNodeInfo* info, Phi* phi);
RegList GetFreeRegisters(int count);
void AddMoveBeforeCurrentNode(compiler::AllocatedOperand source,
compiler::AllocatedOperand target);
void AllocateSpillSlot(LiveNodeInfo* info);
void Spill(LiveNodeInfo* info);
void SpillAndClearRegisters();
void SpillRegisters();
compiler::AllocatedOperand AllocateRegister(LiveNodeInfo* info);
compiler::AllocatedOperand ForceAllocate(const Register& reg,
LiveNodeInfo* info,
bool try_move = true);
compiler::AllocatedOperand DoAllocate(const Register& reg,
LiveNodeInfo* info);
void SetRegister(Register reg, LiveNodeInfo* info);
void Free(const Register& reg, bool try_move);
compiler::InstructionOperand TryAllocateRegister(LiveNodeInfo* info);
void InitializeRegisterValues(RegisterState* target_state);
void EnsureInRegister(RegisterState* target_state, LiveNodeInfo* incoming);
void InitializeBranchTargetRegisterValues(ControlNode* source,
BasicBlock* target);
void InitializeConditionalBranchRegisters(ConditionalControlNode* source,
BasicBlock* target);
void MergeRegisterValues(ControlNode* control, BasicBlock* target,
int predecessor_id);
MaglevGraphLabeller* graph_labeller() const {
return compilation_unit_->graph_labeller();
}
MaglevCompilationUnit* compilation_unit_;
std::unique_ptr<MaglevPrintingVisitor> printing_visitor_;
BlockConstIterator block_it_;
NodeIterator node_it_;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_REGALLOC_H_
This diff is collapsed.
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_MAGLEV_MAGLEV_VREG_ALLOCATOR_H_
#define V8_MAGLEV_MAGLEV_VREG_ALLOCATOR_H_
#include "src/maglev/maglev-basic-block.h"
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir.h"
namespace v8 {
namespace internal {
namespace maglev {
class ProcessingState;
class MaglevVregAllocationState {
public:
int AllocateVirtualRegister() { return next_virtual_register_++; }
int num_allocated_registers() const { return next_virtual_register_; }
private:
int next_virtual_register_ = 0;
};
class MaglevVregAllocator {
public:
static constexpr bool kNeedsCheckpointStates = true;
void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {
for (BasicBlock* block : *graph) {
if (!block->has_phi()) continue;
for (Phi* phi : *block->phis()) {
phi->AllocateVregInPostProcess(&state_);
}
}
}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {}
#define DEF_PROCESS_NODE(NAME) \
void Process(NAME* node, const ProcessingState& state) { \
node->AllocateVreg(&state_, state); \
}
NODE_BASE_LIST(DEF_PROCESS_NODE)
#undef DEF_PROCESS_NODE
private:
MaglevVregAllocationState state_;
};
} // namespace maglev
} // namespace internal
} // namespace v8
#endif // V8_MAGLEV_MAGLEV_VREG_ALLOCATOR_H_
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment