Commit 30efa315 authored by Toon Verwaest's avatar Toon Verwaest Committed by V8 LUCI CQ

[maglev] Don't spill constants but load them on-demand

This avoids unnecessary spill moves and reduces register pressure.

Bug: v8:7700
Change-Id: I3f2c35f2b6c0a3e64408b40d59696d924af8a9b4
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3647365Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Toon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80527}
parent 59518b08
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/codegen/code-desc.h" #include "src/codegen/code-desc.h"
#include "src/codegen/register.h" #include "src/codegen/register.h"
#include "src/codegen/safepoint-table.h" #include "src/codegen/safepoint-table.h"
#include "src/codegen/x64/register-x64.h"
#include "src/deoptimizer/translation-array.h" #include "src/deoptimizer/translation-array.h"
#include "src/execution/frame-constants.h" #include "src/execution/frame-constants.h"
#include "src/interpreter/bytecode-register.h" #include "src/interpreter/bytecode-register.h"
...@@ -41,8 +42,7 @@ std::array<T, N> repeat(T value) { ...@@ -41,8 +42,7 @@ std::array<T, N> repeat(T value) {
} }
using RegisterMoves = std::array<Register, Register::kNumRegisters>; using RegisterMoves = std::array<Register, Register::kNumRegisters>;
using StackToRegisterMoves = using RegisterReloads = std::array<ValueNode*, Register::kNumRegisters>;
std::array<compiler::InstructionOperand, Register::kNumRegisters>;
class MaglevCodeGeneratingNodeProcessor { class MaglevCodeGeneratingNodeProcessor {
public: public:
...@@ -189,22 +189,15 @@ class MaglevCodeGeneratingNodeProcessor { ...@@ -189,22 +189,15 @@ class MaglevCodeGeneratingNodeProcessor {
RecursivelyEmitParallelMoveChain(source, source, target, moves); RecursivelyEmitParallelMoveChain(source, source, target, moves);
} }
void EmitStackToRegisterGapMove(compiler::InstructionOperand source, void EmitRegisterReload(ValueNode* node, Register target) {
Register target) { if (node == nullptr) return;
if (!source.IsAllocated()) return; node->LoadToRegister(code_gen_state_, target);
__ movq(target, code_gen_state_->GetStackSlot(
compiler::AllocatedOperand::cast(source)));
} }
void RecordGapMove(compiler::AllocatedOperand source, Register target_reg, void RecordGapMove(ValueNode* node, compiler::InstructionOperand source,
RegisterMoves& register_moves, Register target_reg, RegisterMoves& register_moves,
StackToRegisterMoves& stack_to_register_moves) { RegisterReloads& register_reloads) {
if (source.IsStackSlot()) { if (source.IsAnyRegister()) {
// For stack->reg moves, don't emit the move yet, but instead record the
// move in the set of stack-to-register moves, to be executed after the
// reg->reg parallel moves.
stack_to_register_moves[target_reg.code()] = source;
} else {
// For reg->reg moves, don't emit the move yet, but instead record the // For reg->reg moves, don't emit the move yet, but instead record the
// move in the set of parallel register moves, to be resolved later. // move in the set of parallel register moves, to be resolved later.
Register source_reg = ToRegister(source); Register source_reg = ToRegister(source);
...@@ -212,26 +205,31 @@ class MaglevCodeGeneratingNodeProcessor { ...@@ -212,26 +205,31 @@ class MaglevCodeGeneratingNodeProcessor {
DCHECK(!register_moves[source_reg.code()].is_valid()); DCHECK(!register_moves[source_reg.code()].is_valid());
register_moves[source_reg.code()] = target_reg; register_moves[source_reg.code()] = target_reg;
} }
} else {
// For register loads from memory, don't emit the move yet, but instead
// record the move in the set of register reloads, to be executed after
// the reg->reg parallel moves.
register_reloads[target_reg.code()] = node;
} }
} }
void RecordGapMove(compiler::AllocatedOperand source, void RecordGapMove(ValueNode* node, compiler::InstructionOperand source,
compiler::AllocatedOperand target, compiler::AllocatedOperand target,
RegisterMoves& register_moves, RegisterMoves& register_moves,
StackToRegisterMoves& stack_to_register_moves) { RegisterReloads& stack_to_register_moves) {
if (target.IsRegister()) { if (target.IsRegister()) {
RecordGapMove(source, ToRegister(target), register_moves, RecordGapMove(node, source, ToRegister(target), register_moves,
stack_to_register_moves); stack_to_register_moves);
return; return;
} }
// stack->stack and reg->stack moves should be executed before registers are // memory->stack and reg->stack moves should be executed before registers
// clobbered by reg->reg or stack->reg, so emit them immediately. // are clobbered by reg->reg or memory->reg, so emit them immediately.
if (source.IsRegister()) { if (source.IsRegister()) {
Register source_reg = ToRegister(source); Register source_reg = ToRegister(source);
__ movq(code_gen_state_->GetStackSlot(target), source_reg); __ movq(code_gen_state_->GetStackSlot(target), source_reg);
} else { } else {
__ movq(kScratchRegister, code_gen_state_->GetStackSlot(source)); EmitRegisterReload(node, kScratchRegister);
__ movq(code_gen_state_->GetStackSlot(target), kScratchRegister); __ movq(code_gen_state_->GetStackSlot(target), kScratchRegister);
} }
} }
...@@ -253,34 +251,38 @@ class MaglevCodeGeneratingNodeProcessor { ...@@ -253,34 +251,38 @@ class MaglevCodeGeneratingNodeProcessor {
RegisterMoves register_moves = RegisterMoves register_moves =
repeat<Register::kNumRegisters>(Register::no_reg()); repeat<Register::kNumRegisters>(Register::no_reg());
// Save stack to register moves in an array, so that we can execute them // Save registers restored from a memory location in an array, so that we
// after the parallel moves have read the register values. Note that the // can execute them after the parallel moves have read the register values.
// mapping is: // Note that the mapping is:
// //
// stack_to_register_moves[target] = source. // register_reloads[target] = node.
StackToRegisterMoves stack_to_register_moves; ValueNode* n = nullptr;
RegisterReloads register_reloads = repeat<Register::kNumRegisters>(n);
__ RecordComment("-- Gap moves:"); __ RecordComment("-- Gap moves:");
target->state()->register_state().ForEachGeneralRegister( target->state()->register_state().ForEachGeneralRegister(
[&](Register reg, RegisterState& state) { [&](Register reg, RegisterState& state) {
ValueNode* node;
RegisterMerge* merge; RegisterMerge* merge;
if (LoadMergeState(state, &merge)) { if (LoadMergeState(state, &node, &merge)) {
compiler::AllocatedOperand source = merge->operand(predecessor_id); compiler::InstructionOperand source =
merge->operand(predecessor_id);
if (FLAG_code_comments) { if (FLAG_code_comments) {
std::stringstream ss; std::stringstream ss;
ss << "-- * " << source << " → " << reg; ss << "-- * " << source << " → " << reg;
__ RecordComment(ss.str()); __ RecordComment(ss.str());
} }
RecordGapMove(source, reg, register_moves, stack_to_register_moves); RecordGapMove(node, source, reg, register_moves, register_reloads);
} }
}); });
if (target->has_phi()) { if (target->has_phi()) {
Phi::List* phis = target->phis(); Phi::List* phis = target->phis();
for (Phi* phi : *phis) { for (Phi* phi : *phis) {
compiler::AllocatedOperand source = compiler::AllocatedOperand::cast( Input& input = phi->input(state.block()->predecessor_id());
phi->input(state.block()->predecessor_id()).operand()); ValueNode* node = input.node();
compiler::InstructionOperand source = input.operand();
compiler::AllocatedOperand target = compiler::AllocatedOperand target =
compiler::AllocatedOperand::cast(phi->result().operand()); compiler::AllocatedOperand::cast(phi->result().operand());
if (FLAG_code_comments) { if (FLAG_code_comments) {
...@@ -289,7 +291,7 @@ class MaglevCodeGeneratingNodeProcessor { ...@@ -289,7 +291,7 @@ class MaglevCodeGeneratingNodeProcessor {
<< graph_labeller()->NodeId(phi) << ")"; << graph_labeller()->NodeId(phi) << ")";
__ RecordComment(ss.str()); __ RecordComment(ss.str());
} }
RecordGapMove(source, target, register_moves, stack_to_register_moves); RecordGapMove(node, source, target, register_moves, register_reloads);
} }
} }
...@@ -298,7 +300,7 @@ class MaglevCodeGeneratingNodeProcessor { ...@@ -298,7 +300,7 @@ class MaglevCodeGeneratingNodeProcessor {
#undef EMIT_MOVE_FOR_REG #undef EMIT_MOVE_FOR_REG
#define EMIT_MOVE_FOR_REG(Name) \ #define EMIT_MOVE_FOR_REG(Name) \
EmitStackToRegisterGapMove(stack_to_register_moves[Name.code()], Name); EmitRegisterReload(register_reloads[Name.code()], Name);
ALLOCATABLE_GENERAL_REGISTERS(EMIT_MOVE_FOR_REG) ALLOCATABLE_GENERAL_REGISTERS(EMIT_MOVE_FOR_REG)
#undef EMIT_MOVE_FOR_REG #undef EMIT_MOVE_FOR_REG
} }
...@@ -520,13 +522,18 @@ class MaglevCodeGeneratorImpl final { ...@@ -520,13 +522,18 @@ class MaglevCodeGeneratorImpl final {
void EmitDeoptFrameSingleValue(ValueNode* value, void EmitDeoptFrameSingleValue(ValueNode* value,
const InputLocation& input_location) { const InputLocation& input_location) {
const compiler::AllocatedOperand& operand = if (input_location.operand().IsConstant()) {
compiler::AllocatedOperand::cast(input_location.operand()); translation_array_builder_.StoreLiteral(
ValueRepresentation repr = value->properties().value_representation(); GetDeoptLiteral(*value->Reify(isolate())));
if (operand.IsRegister()) {
EmitDeoptStoreRegister(operand, repr);
} else { } else {
EmitDeoptStoreStackSlot(operand, repr); const compiler::AllocatedOperand& operand =
compiler::AllocatedOperand::cast(input_location.operand());
ValueRepresentation repr = value->properties().value_representation();
if (operand.IsRegister()) {
EmitDeoptStoreRegister(operand, repr);
} else {
EmitDeoptStoreStackSlot(operand, repr);
}
} }
} }
......
...@@ -61,6 +61,10 @@ void DefineAsRegister(MaglevVregAllocationState* vreg_state, Node* node) { ...@@ -61,6 +61,10 @@ void DefineAsRegister(MaglevVregAllocationState* vreg_state, Node* node) {
compiler::UnallocatedOperand::MUST_HAVE_REGISTER, compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
vreg_state->AllocateVirtualRegister()); vreg_state->AllocateVirtualRegister());
} }
void DefineAsConstant(MaglevVregAllocationState* vreg_state, Node* node) {
node->result().SetUnallocated(compiler::UnallocatedOperand::NONE,
vreg_state->AllocateVirtualRegister());
}
void DefineAsFixed(MaglevVregAllocationState* vreg_state, Node* node, void DefineAsFixed(MaglevVregAllocationState* vreg_state, Node* node,
Register reg) { Register reg) {
...@@ -98,16 +102,21 @@ void UseFixed(Input& input, DoubleRegister reg) { ...@@ -98,16 +102,21 @@ void UseFixed(Input& input, DoubleRegister reg) {
// --- // ---
void PushInput(MaglevCodeGenState* code_gen_state, const Input& input) { void PushInput(MaglevCodeGenState* code_gen_state, const Input& input) {
// TODO(leszeks): Consider special casing the value. (Toon: could possibly if (input.operand().IsConstant()) {
// be done through Input directly?) input.node()->LoadToRegister(code_gen_state, kScratchRegister);
const compiler::AllocatedOperand& operand = __ Push(kScratchRegister);
compiler::AllocatedOperand::cast(input.operand());
if (operand.IsRegister()) {
__ Push(operand.GetRegister());
} else { } else {
DCHECK(operand.IsStackSlot()); // TODO(leszeks): Consider special casing the value. (Toon: could possibly
__ Push(code_gen_state->GetStackSlot(operand)); // be done through Input directly?)
const compiler::AllocatedOperand& operand =
compiler::AllocatedOperand::cast(input.operand());
if (operand.IsRegister()) {
__ Push(operand.GetRegister());
} else {
DCHECK(operand.IsStackSlot());
__ Push(code_gen_state->GetStackSlot(operand));
}
} }
} }
...@@ -390,13 +399,79 @@ DeoptInfo::DeoptInfo(Zone* zone, const MaglevCompilationUnit& compilation_unit, ...@@ -390,13 +399,79 @@ DeoptInfo::DeoptInfo(Zone* zone, const MaglevCompilationUnit& compilation_unit,
// --- // ---
// Nodes // Nodes
// --- // ---
void ValueNode::LoadToRegister(MaglevCodeGenState* code_gen_state,
compiler::AllocatedOperand op) {
switch (opcode()) {
#define V(Name) \
case Opcode::k##Name: \
return this->Cast<Name>()->DoLoadToRegister(code_gen_state, op);
VALUE_NODE_LIST(V)
#undef V
default:
UNREACHABLE();
}
}
void ValueNode::LoadToRegister(MaglevCodeGenState* code_gen_state,
Register reg) {
switch (opcode()) {
#define V(Name) \
case Opcode::k##Name: \
return this->Cast<Name>()->DoLoadToRegister(code_gen_state, reg);
VALUE_NODE_LIST(V)
#undef V
default:
UNREACHABLE();
}
}
void ValueNode::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
Register reg) {
DCHECK(is_spilled());
__ movq(reg, code_gen_state->GetStackSlot(
compiler::AllocatedOperand::cast(spill_slot())));
}
Handle<Object> ValueNode::Reify(Isolate* isolate) {
switch (opcode()) {
#define V(Name) \
case Opcode::k##Name: \
return this->Cast<Name>()->DoReify(isolate);
VALUE_NODE_LIST(V)
#undef V
default:
UNREACHABLE();
}
}
void ValueNode::SetNoSpillOrHint() {
DCHECK_EQ(state_, kLastUse);
#ifdef DEBUG
state_ = kSpillOrHint;
#endif // DEBUG
if (Is<Constant>() || Is<SmiConstant>() || Is<RootConstant>() ||
Is<Int32Constant>() || Is<Float64Constant>()) {
spill_or_hint_ = compiler::ConstantOperand(
compiler::UnallocatedOperand::cast(result().operand())
.virtual_register());
} else {
spill_or_hint_ = compiler::InstructionOperand();
}
}
void SmiConstant::AllocateVreg(MaglevVregAllocationState* vreg_state, void SmiConstant::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) { const ProcessingState& state) {
DefineAsRegister(vreg_state, this); DefineAsConstant(vreg_state, this);
} }
void SmiConstant::GenerateCode(MaglevCodeGenState* code_gen_state, void SmiConstant::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) { const ProcessingState& state) {}
__ Move(ToRegister(result()), Immediate(value())); void SmiConstant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
compiler::AllocatedOperand op) {
DoLoadToRegister(code_gen_state, op.GetRegister());
}
Handle<Object> SmiConstant::DoReify(Isolate* isolate) {
return handle(value_, isolate);
}
void SmiConstant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
Register reg) {
__ Move(reg, Immediate(value()));
} }
void SmiConstant::PrintParams(std::ostream& os, void SmiConstant::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const { MaglevGraphLabeller* graph_labeller) const {
...@@ -405,11 +480,20 @@ void SmiConstant::PrintParams(std::ostream& os, ...@@ -405,11 +480,20 @@ void SmiConstant::PrintParams(std::ostream& os,
void Float64Constant::AllocateVreg(MaglevVregAllocationState* vreg_state, void Float64Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) { const ProcessingState& state) {
DefineAsRegister(vreg_state, this); DefineAsConstant(vreg_state, this);
} }
void Float64Constant::GenerateCode(MaglevCodeGenState* code_gen_state, void Float64Constant::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) { const ProcessingState& state) {}
__ Move(ToDoubleRegister(result()), value()); void Float64Constant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
compiler::AllocatedOperand op) {
DoLoadToRegister(code_gen_state, op.GetDoubleRegister());
}
Handle<Object> Float64Constant::DoReify(Isolate* isolate) {
return isolate->factory()->NewNumber(value_);
}
void Float64Constant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
DoubleRegister reg) {
__ Move(reg, value());
} }
void Float64Constant::PrintParams(std::ostream& os, void Float64Constant::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const { MaglevGraphLabeller* graph_labeller) const {
...@@ -418,12 +502,19 @@ void Float64Constant::PrintParams(std::ostream& os, ...@@ -418,12 +502,19 @@ void Float64Constant::PrintParams(std::ostream& os,
void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state, void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) { const ProcessingState& state) {
DefineAsRegister(vreg_state, this); DefineAsConstant(vreg_state, this);
} }
void Constant::GenerateCode(MaglevCodeGenState* code_gen_state, void Constant::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) { const ProcessingState& state) {}
__ Move(ToRegister(result()), object_.object()); void Constant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
compiler::AllocatedOperand op) {
DoLoadToRegister(code_gen_state, op.GetRegister());
} }
void Constant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
Register reg) {
__ Move(reg, object_.object());
}
Handle<Object> Constant::DoReify(Isolate* isolate) { return object_.object(); }
void Constant::PrintParams(std::ostream& os, void Constant::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const { MaglevGraphLabeller* graph_labeller) const {
os << "(" << object_ << ")"; os << "(" << object_ << ")";
...@@ -493,15 +584,21 @@ void RegisterInput::PrintParams(std::ostream& os, ...@@ -493,15 +584,21 @@ void RegisterInput::PrintParams(std::ostream& os,
void RootConstant::AllocateVreg(MaglevVregAllocationState* vreg_state, void RootConstant::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) { const ProcessingState& state) {
DefineAsRegister(vreg_state, this); DefineAsConstant(vreg_state, this);
} }
void RootConstant::GenerateCode(MaglevCodeGenState* code_gen_state, void RootConstant::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) { const ProcessingState& state) {}
if (!has_valid_live_range()) return; void RootConstant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
compiler::AllocatedOperand op) {
Register reg = ToRegister(result()); DoLoadToRegister(code_gen_state, op.GetRegister());
}
void RootConstant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
Register reg) {
__ LoadRoot(reg, index()); __ LoadRoot(reg, index());
} }
Handle<Object> RootConstant::DoReify(Isolate* isolate) {
return isolate->root_handle(index());
}
void RootConstant::PrintParams(std::ostream& os, void RootConstant::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const { MaglevGraphLabeller* graph_labeller) const {
os << "(" << RootsTable::name(index()) << ")"; os << "(" << RootsTable::name(index()) << ")";
...@@ -742,7 +839,9 @@ void GapMove::AllocateVreg(MaglevVregAllocationState* vreg_state, ...@@ -742,7 +839,9 @@ void GapMove::AllocateVreg(MaglevVregAllocationState* vreg_state,
} }
void GapMove::GenerateCode(MaglevCodeGenState* code_gen_state, void GapMove::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) { const ProcessingState& state) {
if (source().IsRegister()) { if (source().IsConstant()) {
node_->LoadToRegister(code_gen_state, target());
} else if (source().IsRegister()) {
Register source_reg = ToRegister(source()); Register source_reg = ToRegister(source());
if (target().IsAnyRegister()) { if (target().IsAnyRegister()) {
DCHECK(target().IsRegister()); DCHECK(target().IsRegister());
...@@ -875,11 +974,20 @@ void CheckedSmiTag::GenerateCode(MaglevCodeGenState* code_gen_state, ...@@ -875,11 +974,20 @@ void CheckedSmiTag::GenerateCode(MaglevCodeGenState* code_gen_state,
void Int32Constant::AllocateVreg(MaglevVregAllocationState* vreg_state, void Int32Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) { const ProcessingState& state) {
DefineAsRegister(vreg_state, this); DefineAsConstant(vreg_state, this);
} }
void Int32Constant::GenerateCode(MaglevCodeGenState* code_gen_state, void Int32Constant::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) { const ProcessingState& state) {}
__ Move(ToRegister(result()), Immediate(value())); void Int32Constant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
compiler::AllocatedOperand op) {
DoLoadToRegister(code_gen_state, op.GetRegister());
}
void Int32Constant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
Register reg) {
__ Move(reg, Immediate(value()));
}
Handle<Object> Int32Constant::DoReify(Isolate* isolate) {
return isolate->factory()->NewNumber(value());
} }
void Int32Constant::PrintParams(std::ostream& os, void Int32Constant::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const { MaglevGraphLabeller* graph_labeller) const {
......
...@@ -295,9 +295,8 @@ class ValueLocation { ...@@ -295,9 +295,8 @@ class ValueLocation {
} }
// Only to be used on inputs that inherit allocation. // Only to be used on inputs that inherit allocation.
template <typename... Args> void InjectLocation(compiler::InstructionOperand location) {
void InjectAllocated(Args&&... args) { operand_ = location;
operand_ = compiler::AllocatedOperand(args...);
} }
template <typename... Args> template <typename... Args>
...@@ -703,27 +702,33 @@ class ValueNode : public Node { ...@@ -703,27 +702,33 @@ class ValueNode : public Node {
return spill_or_hint_; return spill_or_hint_;
} }
void SetNoSpillOrHint() { bool is_loadable() const {
DCHECK_EQ(state_, kLastUse);
#ifdef DEBUG
state_ = kSpillOrHint;
#endif // DEBUG
spill_or_hint_ = compiler::InstructionOperand();
}
bool is_spilled() const {
DCHECK_EQ(state_, kSpillOrHint); DCHECK_EQ(state_, kSpillOrHint);
return spill_or_hint_.IsAnyStackSlot(); return spill_or_hint_.IsConstant() || spill_or_hint_.IsAnyStackSlot();
} }
bool is_spilled() const { return spill_or_hint_.IsAnyStackSlot(); }
void SetNoSpillOrHint();
/* For constants only. */
void LoadToRegister(MaglevCodeGenState*, compiler::AllocatedOperand);
void LoadToRegister(MaglevCodeGenState*, Register);
Handle<Object> Reify(Isolate* isolate);
void Spill(compiler::AllocatedOperand operand) { void Spill(compiler::AllocatedOperand operand) {
#ifdef DEBUG #ifdef DEBUG
if (state_ == kLastUse) { if (state_ == kLastUse) {
state_ = kSpillOrHint; state_ = kSpillOrHint;
} else { } else {
DCHECK(!is_spilled()); DCHECK(!is_loadable());
} }
#endif // DEBUG #endif // DEBUG
DCHECK(!Is<Constant>());
DCHECK(!Is<SmiConstant>());
DCHECK(!Is<RootConstant>());
DCHECK(!Is<Int32Constant>());
DCHECK(!Is<Float64Constant>());
DCHECK(operand.IsAnyStackSlot()); DCHECK(operand.IsAnyStackSlot());
spill_or_hint_ = operand; spill_or_hint_ = operand;
DCHECK(spill_or_hint_.IsAnyStackSlot()); DCHECK(spill_or_hint_.IsAnyStackSlot());
...@@ -811,14 +816,14 @@ class ValueNode : public Node { ...@@ -811,14 +816,14 @@ class ValueNode : public Node {
return registers_with_result_ != kEmptyRegList; return registers_with_result_ != kEmptyRegList;
} }
compiler::AllocatedOperand allocation() const { compiler::InstructionOperand allocation() const {
if (has_register()) { if (has_register()) {
return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER, return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
GetMachineRepresentation(), GetMachineRepresentation(),
FirstRegisterCode()); FirstRegisterCode());
} }
DCHECK(is_spilled()); DCHECK(is_loadable());
return compiler::AllocatedOperand::cast(spill_or_hint_); return spill_or_hint_;
} }
protected: protected:
...@@ -844,6 +849,12 @@ class ValueNode : public Node { ...@@ -844,6 +849,12 @@ class ValueNode : public Node {
return registers_with_result_.first().code(); return registers_with_result_.first().code();
} }
void DoLoadToRegister(MaglevCodeGenState*, compiler::AllocatedOperand) {
UNREACHABLE();
}
void DoLoadToRegister(MaglevCodeGenState*, Register);
Handle<Object> DoReify(Isolate* isolate) { UNREACHABLE(); }
// Rename for better pairing with `end_id`. // Rename for better pairing with `end_id`.
NodeIdT start_id() const { return id(); } NodeIdT start_id() const { return id(); }
...@@ -1056,6 +1067,8 @@ class Int32Constant : public FixedInputValueNodeT<0, Int32Constant> { ...@@ -1056,6 +1067,8 @@ class Int32Constant : public FixedInputValueNodeT<0, Int32Constant> {
using Base = FixedInputValueNodeT<0, Int32Constant>; using Base = FixedInputValueNodeT<0, Int32Constant>;
public: public:
using OutputRegister = Register;
explicit Int32Constant(uint32_t bitfield, int32_t value) explicit Int32Constant(uint32_t bitfield, int32_t value)
: Base(bitfield), value_(value) {} : Base(bitfield), value_(value) {}
...@@ -1067,6 +1080,10 @@ class Int32Constant : public FixedInputValueNodeT<0, Int32Constant> { ...@@ -1067,6 +1080,10 @@ class Int32Constant : public FixedInputValueNodeT<0, Int32Constant> {
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const; void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
void DoLoadToRegister(MaglevCodeGenState*, compiler::AllocatedOperand);
void DoLoadToRegister(MaglevCodeGenState*, Register);
Handle<Object> DoReify(Isolate* isolate);
private: private:
const int32_t value_; const int32_t value_;
}; };
...@@ -1075,6 +1092,8 @@ class Float64Constant : public FixedInputValueNodeT<0, Float64Constant> { ...@@ -1075,6 +1092,8 @@ class Float64Constant : public FixedInputValueNodeT<0, Float64Constant> {
using Base = FixedInputValueNodeT<0, Float64Constant>; using Base = FixedInputValueNodeT<0, Float64Constant>;
public: public:
using OutputRegister = DoubleRegister;
explicit Float64Constant(uint32_t bitfield, double value) explicit Float64Constant(uint32_t bitfield, double value)
: Base(bitfield), value_(value) {} : Base(bitfield), value_(value) {}
...@@ -1086,6 +1105,11 @@ class Float64Constant : public FixedInputValueNodeT<0, Float64Constant> { ...@@ -1086,6 +1105,11 @@ class Float64Constant : public FixedInputValueNodeT<0, Float64Constant> {
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const; void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
void DoLoadToRegister(MaglevCodeGenState*, compiler::AllocatedOperand);
void DoLoadToRegister(MaglevCodeGenState*, Register) { UNREACHABLE(); }
void DoLoadToRegister(MaglevCodeGenState*, DoubleRegister);
Handle<Object> DoReify(Isolate* isolate);
private: private:
const double value_; const double value_;
}; };
...@@ -1223,6 +1247,10 @@ class SmiConstant : public FixedInputValueNodeT<0, SmiConstant> { ...@@ -1223,6 +1247,10 @@ class SmiConstant : public FixedInputValueNodeT<0, SmiConstant> {
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const; void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
void DoLoadToRegister(MaglevCodeGenState*, compiler::AllocatedOperand);
void DoLoadToRegister(MaglevCodeGenState*, Register);
Handle<Object> DoReify(Isolate* isolate);
private: private:
const Smi value_; const Smi value_;
}; };
...@@ -1238,6 +1266,10 @@ class Constant : public FixedInputValueNodeT<0, Constant> { ...@@ -1238,6 +1266,10 @@ class Constant : public FixedInputValueNodeT<0, Constant> {
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const; void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
void DoLoadToRegister(MaglevCodeGenState*, compiler::AllocatedOperand);
void DoLoadToRegister(MaglevCodeGenState*, Register);
Handle<Object> DoReify(Isolate* isolate);
private: private:
const compiler::HeapObjectRef object_; const compiler::HeapObjectRef object_;
}; };
...@@ -1255,6 +1287,10 @@ class RootConstant : public FixedInputValueNodeT<0, RootConstant> { ...@@ -1255,6 +1287,10 @@ class RootConstant : public FixedInputValueNodeT<0, RootConstant> {
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const; void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
void DoLoadToRegister(MaglevCodeGenState*, compiler::AllocatedOperand);
void DoLoadToRegister(MaglevCodeGenState*, Register);
Handle<Object> DoReify(Isolate* isolate);
private: private:
const RootIndex index_; const RootIndex index_;
}; };
...@@ -1519,19 +1555,22 @@ class GapMove : public FixedInputNodeT<0, GapMove> { ...@@ -1519,19 +1555,22 @@ class GapMove : public FixedInputNodeT<0, GapMove> {
using Base = FixedInputNodeT<0, GapMove>; using Base = FixedInputNodeT<0, GapMove>;
public: public:
GapMove(uint32_t bitfield, compiler::AllocatedOperand source, GapMove(uint32_t bitfield, ValueNode* node,
compiler::InstructionOperand source,
compiler::AllocatedOperand target) compiler::AllocatedOperand target)
: Base(bitfield), source_(source), target_(target) {} : Base(bitfield), node_(node), source_(source), target_(target) {}
compiler::AllocatedOperand source() const { return source_; } compiler::InstructionOperand source() const { return source_; }
compiler::AllocatedOperand target() const { return target_; } compiler::AllocatedOperand target() const { return target_; }
ValueNode* node() const { return node_; }
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&); void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const; void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
private: private:
compiler::AllocatedOperand source_; ValueNode* node_;
compiler::InstructionOperand source_;
compiler::AllocatedOperand target_; compiler::AllocatedOperand target_;
}; };
......
...@@ -67,10 +67,10 @@ constexpr bool operator==(const RegisterStateFlags& left, ...@@ -67,10 +67,10 @@ constexpr bool operator==(const RegisterStateFlags& left,
typedef base::PointerWithPayload<void, RegisterStateFlags, 2> RegisterState; typedef base::PointerWithPayload<void, RegisterStateFlags, 2> RegisterState;
struct RegisterMerge { struct RegisterMerge {
compiler::AllocatedOperand* operands() { compiler::InstructionOperand* operands() {
return reinterpret_cast<compiler::AllocatedOperand*>(this + 1); return reinterpret_cast<compiler::InstructionOperand*>(this + 1);
} }
compiler::AllocatedOperand& operand(size_t i) { return operands()[i]; } compiler::InstructionOperand& operand(size_t i) { return operands()[i]; }
ValueNode* node; ValueNode* node;
}; };
......
...@@ -386,7 +386,7 @@ void StraightForwardRegisterAllocator::UpdateUse( ...@@ -386,7 +386,7 @@ void StraightForwardRegisterAllocator::UpdateUse(
// Skip over the result location. // Skip over the result location.
if (reg == deopt_info.result_location) return; if (reg == deopt_info.result_location) return;
InputLocation* input = &deopt_info.input_locations[index++]; InputLocation* input = &deopt_info.input_locations[index++];
input->InjectAllocated(node->allocation()); input->InjectLocation(node->allocation());
UpdateUse(node, input); UpdateUse(node, input);
}); });
} }
...@@ -402,7 +402,7 @@ void StraightForwardRegisterAllocator::UpdateUse( ...@@ -402,7 +402,7 @@ void StraightForwardRegisterAllocator::UpdateUse(
checkpoint_state->ForEachValue( checkpoint_state->ForEachValue(
unit, [&](ValueNode* node, interpreter::Register reg) { unit, [&](ValueNode* node, interpreter::Register reg) {
InputLocation* input = &input_locations[index++]; InputLocation* input = &input_locations[index++];
input->InjectAllocated(node->allocation()); input->InjectLocation(node->allocation());
UpdateUse(node, input); UpdateUse(node, input);
}); });
} }
...@@ -495,10 +495,15 @@ void StraightForwardRegisterAllocator::AllocateNodeResult(ValueNode* node) { ...@@ -495,10 +495,15 @@ void StraightForwardRegisterAllocator::AllocateNodeResult(ValueNode* node) {
break; break;
} }
case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
case compiler::UnallocatedOperand::NONE: case compiler::UnallocatedOperand::NONE:
DCHECK(node->Is<Constant>() || node->Is<RootConstant>() ||
node->Is<SmiConstant>() || node->Is<Int32Constant>() ||
node->Is<Float64Constant>());
break;
case compiler::UnallocatedOperand::MUST_HAVE_SLOT: case compiler::UnallocatedOperand::MUST_HAVE_SLOT:
case compiler::UnallocatedOperand::REGISTER_OR_SLOT: case compiler::UnallocatedOperand::REGISTER_OR_SLOT:
case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
UNREACHABLE(); UNREACHABLE();
} }
...@@ -527,8 +532,9 @@ void StraightForwardRegisterAllocator::DropRegisterValue( ...@@ -527,8 +532,9 @@ void StraightForwardRegisterAllocator::DropRegisterValue(
// Remove the register from the node's list. // Remove the register from the node's list.
node->RemoveRegister(reg); node->RemoveRegister(reg);
// Return if the removed value already has another register or is spilled. // Return if the removed value already has another register or is loadable
if (node->has_register() || node->is_spilled()) return; // from memory.
if (node->has_register() || node->is_loadable()) return;
// If we are at the end of the current node, and the last use of the given // If we are at the end of the current node, and the last use of the given
// node is the current node, allow it to be dropped. // node is the current node, allow it to be dropped.
...@@ -551,7 +557,7 @@ void StraightForwardRegisterAllocator::DropRegisterValue( ...@@ -551,7 +557,7 @@ void StraightForwardRegisterAllocator::DropRegisterValue(
<< "gap move: " << PrintNodeLabel(graph_labeller(), node) << ": " << "gap move: " << PrintNodeLabel(graph_labeller(), node) << ": "
<< target << " ← " << source << std::endl; << target << " ← " << source << std::endl;
} }
AddMoveBeforeCurrentNode(source, target); AddMoveBeforeCurrentNode(node, source, target);
return; return;
} }
...@@ -607,7 +613,7 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node, ...@@ -607,7 +613,7 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
Phi::List* phis = target->phis(); Phi::List* phis = target->phis();
for (Phi* phi : *phis) { for (Phi* phi : *phis) {
Input& input = phi->input(block->predecessor_id()); Input& input = phi->input(block->predecessor_id());
input.InjectAllocated(input.node()->allocation()); input.InjectLocation(input.node()->allocation());
} }
for (Phi* phi : *phis) UpdateUse(&phi->input(block->predecessor_id())); for (Phi* phi : *phis) UpdateUse(&phi->input(block->predecessor_id()));
} }
...@@ -661,9 +667,10 @@ void StraightForwardRegisterAllocator::TryAllocateToInput(Phi* phi) { ...@@ -661,9 +667,10 @@ void StraightForwardRegisterAllocator::TryAllocateToInput(Phi* phi) {
} }
void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode( void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode(
compiler::AllocatedOperand source, compiler::AllocatedOperand target) { ValueNode* node, compiler::InstructionOperand source,
compiler::AllocatedOperand target) {
GapMove* gap_move = GapMove* gap_move =
Node::New<GapMove>(compilation_info_->zone(), {}, source, target); Node::New<GapMove>(compilation_info_->zone(), {}, node, source, target);
if (compilation_info_->has_graph_labeller()) { if (compilation_info_->has_graph_labeller()) {
graph_labeller()->RegisterNode(gap_move); graph_labeller()->RegisterNode(gap_move);
} }
...@@ -678,7 +685,7 @@ void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode( ...@@ -678,7 +685,7 @@ void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode(
} }
void StraightForwardRegisterAllocator::Spill(ValueNode* node) { void StraightForwardRegisterAllocator::Spill(ValueNode* node) {
if (node->is_spilled()) return; if (node->is_loadable()) return;
AllocateSpillSlot(node); AllocateSpillSlot(node);
if (FLAG_trace_maglev_regalloc) { if (FLAG_trace_maglev_regalloc) {
printing_visitor_->os() printing_visitor_->os()
...@@ -691,13 +698,12 @@ void StraightForwardRegisterAllocator::AssignInput(Input& input) { ...@@ -691,13 +698,12 @@ void StraightForwardRegisterAllocator::AssignInput(Input& input) {
compiler::UnallocatedOperand operand = compiler::UnallocatedOperand operand =
compiler::UnallocatedOperand::cast(input.operand()); compiler::UnallocatedOperand::cast(input.operand());
ValueNode* node = input.node(); ValueNode* node = input.node();
compiler::AllocatedOperand location = node->allocation(); compiler::InstructionOperand location = node->allocation();
switch (operand.extended_policy()) { switch (operand.extended_policy()) {
case compiler::UnallocatedOperand::REGISTER_OR_SLOT:
case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT: case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
input.SetAllocated(location); input.InjectLocation(location);
break; return;
case compiler::UnallocatedOperand::FIXED_REGISTER: { case compiler::UnallocatedOperand::FIXED_REGISTER: {
Register reg = Register::from_code(operand.fixed_register_index()); Register reg = Register::from_code(operand.fixed_register_index());
...@@ -707,7 +713,7 @@ void StraightForwardRegisterAllocator::AssignInput(Input& input) { ...@@ -707,7 +713,7 @@ void StraightForwardRegisterAllocator::AssignInput(Input& input) {
case compiler::UnallocatedOperand::MUST_HAVE_REGISTER: case compiler::UnallocatedOperand::MUST_HAVE_REGISTER:
if (location.IsAnyRegister()) { if (location.IsAnyRegister()) {
input.SetAllocated(location); input.SetAllocated(compiler::AllocatedOperand::cast(location));
} else { } else {
input.SetAllocated(AllocateRegister(node, AllocationStage::kAtStart)); input.SetAllocated(AllocateRegister(node, AllocationStage::kAtStart));
} }
...@@ -720,6 +726,7 @@ void StraightForwardRegisterAllocator::AssignInput(Input& input) { ...@@ -720,6 +726,7 @@ void StraightForwardRegisterAllocator::AssignInput(Input& input) {
break; break;
} }
case compiler::UnallocatedOperand::REGISTER_OR_SLOT:
case compiler::UnallocatedOperand::SAME_AS_INPUT: case compiler::UnallocatedOperand::SAME_AS_INPUT:
case compiler::UnallocatedOperand::NONE: case compiler::UnallocatedOperand::NONE:
case compiler::UnallocatedOperand::MUST_HAVE_SLOT: case compiler::UnallocatedOperand::MUST_HAVE_SLOT:
...@@ -733,7 +740,7 @@ void StraightForwardRegisterAllocator::AssignInput(Input& input) { ...@@ -733,7 +740,7 @@ void StraightForwardRegisterAllocator::AssignInput(Input& input) {
printing_visitor_->os() printing_visitor_->os()
<< "gap move: " << allocated << " ← " << location << std::endl; << "gap move: " << allocated << " ← " << location << std::endl;
} }
AddMoveBeforeCurrentNode(location, allocated); AddMoveBeforeCurrentNode(node, location, allocated);
} }
} }
...@@ -761,7 +768,7 @@ void StraightForwardRegisterAllocator::SpillAndClearRegisters() { ...@@ -761,7 +768,7 @@ void StraightForwardRegisterAllocator::SpillAndClearRegisters() {
} }
void StraightForwardRegisterAllocator::AllocateSpillSlot(ValueNode* node) { void StraightForwardRegisterAllocator::AllocateSpillSlot(ValueNode* node) {
DCHECK(!node->is_spilled()); DCHECK(!node->is_loadable());
uint32_t free_slot; uint32_t free_slot;
bool is_tagged = (node->properties().value_representation() == bool is_tagged = (node->properties().value_representation() ==
ValueRepresentation::kTagged); ValueRepresentation::kTagged);
...@@ -1062,14 +1069,14 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control, ...@@ -1062,14 +1069,14 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
// If there's a value in the incoming state, that value is either // If there's a value in the incoming state, that value is either
// already spilled or in another place in the merge state. // already spilled or in another place in the merge state.
if (incoming != nullptr && incoming->is_spilled()) { if (incoming != nullptr && incoming->is_loadable()) {
EnsureInRegister(target_state, incoming); EnsureInRegister(target_state, incoming);
} }
return; return;
} }
DCHECK_IMPLIES(node == nullptr, incoming != nullptr); DCHECK_IMPLIES(node == nullptr, incoming != nullptr);
if (node == nullptr && !incoming->is_spilled()) { if (node == nullptr && !incoming->is_loadable()) {
// If the register is unallocated at the merge point, and the incoming // If the register is unallocated at the merge point, and the incoming
// value isn't spilled, that means we must have seen it already in a // value isn't spilled, that means we must have seen it already in a
// different register. // different register.
......
...@@ -133,7 +133,8 @@ class StraightForwardRegisterAllocator { ...@@ -133,7 +133,8 @@ class StraightForwardRegisterAllocator {
void AssignTemporaries(NodeBase* node); void AssignTemporaries(NodeBase* node);
void TryAllocateToInput(Phi* phi); void TryAllocateToInput(Phi* phi);
void AddMoveBeforeCurrentNode(compiler::AllocatedOperand source, void AddMoveBeforeCurrentNode(ValueNode* node,
compiler::InstructionOperand source,
compiler::AllocatedOperand target); compiler::AllocatedOperand target);
void AllocateSpillSlot(ValueNode* node); void AllocateSpillSlot(ValueNode* node);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment