Commit 86c753c3 authored by Victor Gomes's avatar Victor Gomes Committed by V8 LUCI CQ

[maglev] Support SwitchOnSmi

Bug: v8:7700
Change-Id: I0bf8f8e216bd4d73dea4dde0e80b0d7157ccab6a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3811282Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82234}
parent 8b63cc9b
......@@ -593,22 +593,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
ScratchRegisterScope scope(this);
Register table = scope.AcquireScratch();
Label fallthrough, jump_table;
if (case_value_base != 0) {
__ subq(reg, Immediate(case_value_base));
}
__ cmpq(reg, Immediate(num_labels));
__ j(above_equal, &fallthrough);
__ leaq(table, MemOperand(&jump_table));
__ jmp(MemOperand(table, reg, times_8, 0));
// Emit the jump table inline, under the assumption that it's not too big.
__ Align(kSystemPointerSize);
__ bind(&jump_table);
for (int i = 0; i < num_labels; ++i) {
__ dq(labels[i]);
}
__ bind(&fallthrough);
__ Switch(scope.AcquireScratch(), reg, case_value_base, labels, num_labels);
}
#undef __
......
......@@ -1512,6 +1512,26 @@ SmiIndex TurboAssembler::SmiToIndex(Register dst, Register src, int shift) {
}
}
void TurboAssembler::Switch(Register scratch, Register reg, int case_value_base,
Label** labels, int num_labels) {
Register table = scratch;
Label fallthrough, jump_table;
if (case_value_base != 0) {
subq(reg, Immediate(case_value_base));
}
cmpq(reg, Immediate(num_labels));
j(above_equal, &fallthrough);
leaq(table, MemOperand(&jump_table));
jmp(MemOperand(table, reg, times_8, 0));
// Emit the jump table inline, under the assumption that it's not too big.
Align(kSystemPointerSize);
bind(&jump_table);
for (int i = 0; i < num_labels; ++i) {
dq(labels[i]);
}
bind(&fallthrough);
}
void TurboAssembler::Push(Smi source) {
intptr_t smi = static_cast<intptr_t>(source.ptr());
if (is_int32(smi)) {
......
......@@ -280,6 +280,9 @@ class V8_EXPORT_PRIVATE TurboAssembler
j(less, dest);
}
void Switch(Register scrach, Register reg, int case_base_value,
Label** labels, int num_labels);
#ifdef V8_MAP_PACKING
void UnpackMapWord(Register r);
#endif
......
......@@ -2416,7 +2416,28 @@ void MaglevGraphBuilder::VisitJumpIfJSReceiver() {
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
}
MAGLEV_UNIMPLEMENTED_BYTECODE(SwitchOnSmiNoFeedback)
void MaglevGraphBuilder::VisitSwitchOnSmiNoFeedback() {
// SwitchOnSmiNoFeedback <table_start> <table_length> <case_value_base>
interpreter::JumpTableTargetOffsets offsets =
iterator_.GetJumpTableTargetOffsets();
if (offsets.size() == 0) return;
int case_value_base = (*offsets.begin()).case_value;
BasicBlockRef* targets = zone()->NewArray<BasicBlockRef>(offsets.size());
for (interpreter::JumpTableTargetOffset offset : offsets) {
BasicBlockRef* ref = &targets[offset.case_value - case_value_base];
new (ref) BasicBlockRef(&jump_targets_[offset.target_offset]);
}
ValueNode* case_value = GetAccumulatorInt32();
BasicBlock* block =
FinishBlock<Switch>(next_offset(), {case_value}, case_value_base, targets,
offsets.size(), &jump_targets_[next_offset()]);
for (interpreter::JumpTableTargetOffset offset : offsets) {
MergeIntoFrameState(block, offset.target_offset);
}
}
void MaglevGraphBuilder::VisitForInEnumerate() {
// ForInEnumerate <receiver>
......
......@@ -265,13 +265,18 @@ void MaglevPrintingVisitor::PreProcessGraph(
AddTargetIfNotNext(targets_,
node->Cast<UnconditionalControlNode>()->target(),
*(block_it + 1));
} else if (node->Is<ConditionalControlNode>()) {
AddTargetIfNotNext(targets_,
node->Cast<ConditionalControlNode>()->if_true(),
} else if (node->Is<BranchControlNode>()) {
AddTargetIfNotNext(targets_, node->Cast<BranchControlNode>()->if_true(),
*(block_it + 1));
AddTargetIfNotNext(targets_,
node->Cast<ConditionalControlNode>()->if_false(),
AddTargetIfNotNext(targets_, node->Cast<BranchControlNode>()->if_false(),
*(block_it + 1));
} else if (node->Is<Switch>()) {
for (int i = 0; i < node->Cast<Switch>()->size(); i++) {
const BasicBlockRef& target = node->Cast<Switch>()->targets()[i];
AddTargetIfNotNext(targets_, target.block_ptr(), *(block_it + 1));
}
BasicBlock* fallthrough_target = node->Cast<Switch>()->fallthrough();
AddTargetIfNotNext(targets_, fallthrough_target, *(block_it + 1));
}
}
DCHECK(std::all_of(targets_.begin(), targets_.end(),
......@@ -477,11 +482,11 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node,
PrintPaddedId(os_, graph_labeller, max_node_id_, control_node,
has_fallthrough ? " " : "─");
} else if (control_node->Is<ConditionalControlNode>()) {
} else if (control_node->Is<BranchControlNode>()) {
BasicBlock* true_target =
control_node->Cast<ConditionalControlNode>()->if_true();
control_node->Cast<BranchControlNode>()->if_true();
BasicBlock* false_target =
control_node->Cast<ConditionalControlNode>()->if_false();
control_node->Cast<BranchControlNode>()->if_false();
std::set<size_t> arrows_starting_here;
has_fallthrough |= !AddTargetIfNotNext(
......@@ -490,6 +495,23 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node,
targets_, true_target, state.next_block(), &arrows_starting_here);
PrintVerticalArrows(os_, targets_, arrows_starting_here);
PrintPaddedId(os_, graph_labeller, max_node_id_, control_node, "─");
} else if (control_node->Is<Switch>()) {
std::set<size_t> arrows_starting_here;
for (int i = 0; i < control_node->Cast<Switch>()->size(); i++) {
const BasicBlockRef& target = control_node->Cast<Switch>()->targets()[i];
has_fallthrough |=
!AddTargetIfNotNext(targets_, target.block_ptr(), state.next_block(),
&arrows_starting_here);
}
BasicBlock* fallthrough_target =
control_node->Cast<Switch>()->fallthrough();
has_fallthrough |=
!AddTargetIfNotNext(targets_, fallthrough_target, state.next_block(),
&arrows_starting_here);
PrintVerticalArrows(os_, targets_, arrows_starting_here);
PrintPaddedId(os_, graph_labeller, max_node_id_, control_node, "─");
} else {
PrintVerticalArrows(os_, targets_);
......
......@@ -140,6 +140,7 @@ class MaglevGraphVerifier {
DCHECK_EQ(node->input_count(), 1);
CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
break;
case Opcode::kSwitch:
case Opcode::kCheckedSmiTag:
case Opcode::kChangeInt32ToFloat64:
DCHECK_EQ(node->input_count(), 1);
......
......@@ -555,11 +555,21 @@ void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
}
void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const ConditionalControlNode* node) {
const BranchControlNode* node) {
os << " b" << graph_labeller->BlockId(node->if_true()) << " b"
<< graph_labeller->BlockId(node->if_false());
}
void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const Switch* node) {
for (int i = 0; i < node->size(); i++) {
const BasicBlockRef& target = node->Cast<Switch>()->targets()[i];
os << " b" << graph_labeller->BlockId(target.block_ptr());
}
BasicBlock* fallthrough_target = node->Cast<Switch>()->fallthrough();
os << " b" << graph_labeller->BlockId(fallthrough_target);
}
template <typename NodeT>
void PrintImpl(std::ostream& os, MaglevGraphLabeller* graph_labeller,
const NodeT* node) {
......@@ -2988,6 +2998,20 @@ void Deopt::PrintParams(std::ostream& os,
os << "(" << DeoptimizeReasonToString(reason()) << ")";
}
void Switch::AllocateVreg(MaglevVregAllocationState* vreg_state) {
UseRegister(value());
}
void Switch::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
std::unique_ptr<Label*[]> labels = std::make_unique<Label*[]>(size());
for (int i = 0; i < size(); i++) {
labels[i] = (targets())[i].block_ptr()->label();
}
__ Switch(kScratchRegister, ToRegister(value()), value_base(), labels.get(),
size());
DCHECK_EQ(fallthrough(), state.next_block());
}
void Jump::AllocateVreg(MaglevVregAllocationState* vreg_state) {}
void Jump::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
......
......@@ -197,15 +197,19 @@ class CompactInterpreterFrameState;
GAP_MOVE_NODE_LIST(V) \
VALUE_NODE_LIST(V)
#define CONDITIONAL_CONTROL_NODE_LIST(V) \
V(BranchIfRootConstant) \
V(BranchIfToBooleanTrue) \
V(BranchIfReferenceCompare) \
V(BranchIfInt32Compare) \
V(BranchIfFloat64Compare) \
V(BranchIfUndefinedOrNull) \
#define BRANCH_CONTROL_NODE_LIST(V) \
V(BranchIfRootConstant) \
V(BranchIfToBooleanTrue) \
V(BranchIfReferenceCompare) \
V(BranchIfInt32Compare) \
V(BranchIfFloat64Compare) \
V(BranchIfUndefinedOrNull) \
V(BranchIfJSReceiver)
#define CONDITIONAL_CONTROL_NODE_LIST(V) \
V(Switch) \
BRANCH_CONTROL_NODE_LIST(V)
#define UNCONDITIONAL_CONTROL_NODE_LIST(V) \
V(Jump) \
V(JumpLoop) \
......@@ -255,6 +259,11 @@ static constexpr Opcode kLastGapMoveNodeOpcode =
static constexpr Opcode kFirstNodeOpcode = std::min({NODE_LIST(V) kLastOpcode});
static constexpr Opcode kLastNodeOpcode = std::max({NODE_LIST(V) kFirstOpcode});
static constexpr Opcode kFirstBranchControlNodeOpcode =
std::min({BRANCH_CONTROL_NODE_LIST(V) kLastOpcode});
static constexpr Opcode kLastBranchControlNodeOpcode =
std::max({BRANCH_CONTROL_NODE_LIST(V) kFirstOpcode});
static constexpr Opcode kFirstConditionalControlNodeOpcode =
std::min({CONDITIONAL_CONTROL_NODE_LIST(V) kLastOpcode});
static constexpr Opcode kLastConditionalControlNodeOpcode =
......@@ -281,6 +290,10 @@ constexpr bool IsConstantNode(Opcode opcode) {
constexpr bool IsGapMoveNode(Opcode opcode) {
return kFirstGapMoveNodeOpcode <= opcode && opcode <= kLastGapMoveNodeOpcode;
}
constexpr bool IsBranchControlNode(Opcode opcode) {
return kFirstBranchControlNodeOpcode <= opcode &&
opcode <= kLastBranchControlNodeOpcode;
}
constexpr bool IsConditionalControlNode(Opcode opcode) {
return kFirstConditionalControlNodeOpcode <= opcode &&
opcode <= kLastConditionalControlNodeOpcode;
......@@ -294,6 +307,7 @@ constexpr bool IsUnconditionalControlNode(Opcode opcode) {
class Node;
class ControlNode;
class ConditionalControlNode;
class BranchControlNode;
class UnconditionalControlNode;
class ValueNode;
......@@ -846,6 +860,10 @@ constexpr bool NodeBase::Is<ValueNode>() const {
return IsValueNode(opcode());
}
template <>
constexpr bool NodeBase::Is<BranchControlNode>() const {
return IsBranchControlNode(opcode());
}
template <>
constexpr bool NodeBase::Is<ConditionalControlNode>() const {
return IsConditionalControlNode(opcode());
}
......@@ -3413,9 +3431,14 @@ class UnconditionalControlNodeT : public UnconditionalControlNode {
class ConditionalControlNode : public ControlNode {
public:
ConditionalControlNode(uint64_t bitfield, BasicBlockRef* if_true_refs,
BasicBlockRef* if_false_refs)
: ControlNode(bitfield),
ConditionalControlNode(uint64_t bitfield) : ControlNode(bitfield) {}
};
class BranchControlNode : public ConditionalControlNode {
public:
BranchControlNode(uint64_t bitfield, BasicBlockRef* if_true_refs,
BasicBlockRef* if_false_refs)
: ConditionalControlNode(bitfield),
if_true_(if_true_refs),
if_false_(if_false_refs) {}
......@@ -3428,8 +3451,8 @@ class ConditionalControlNode : public ControlNode {
};
template <size_t InputCount, class Derived>
class ConditionalControlNodeT : public ConditionalControlNode {
static_assert(IsConditionalControlNode(opcode_of<Derived>));
class BranchControlNodeT : public BranchControlNode {
static_assert(IsBranchControlNode(opcode_of<Derived>));
static constexpr size_t kInputCount = InputCount;
public:
......@@ -3442,10 +3465,9 @@ class ConditionalControlNodeT : public ConditionalControlNode {
}
protected:
explicit ConditionalControlNodeT(uint64_t bitfield,
BasicBlockRef* if_true_refs,
BasicBlockRef* if_false_refs)
: ConditionalControlNode(bitfield, if_true_refs, if_false_refs) {
explicit BranchControlNodeT(uint64_t bitfield, BasicBlockRef* if_true_refs,
BasicBlockRef* if_false_refs)
: BranchControlNode(bitfield, if_true_refs, if_false_refs) {
DCHECK_EQ(NodeBase::opcode(), opcode_of<Derived>);
DCHECK_EQ(NodeBase::input_count(), kInputCount);
}
......@@ -3574,9 +3596,38 @@ class Deopt : public ControlNode {
DeoptimizeReason reason_;
};
class Switch : public ConditionalControlNode {
public:
explicit Switch(uint64_t bitfield, int value_base, BasicBlockRef* targets,
int size, BasicBlockRef* fallthrough)
: ConditionalControlNode(bitfield),
value_base_(value_base),
targets_(targets),
size_(size),
fallthrough_(fallthrough) {}
int value_base() const { return value_base_; }
const BasicBlockRef* targets() const { return targets_; }
int size() const { return size_; }
BasicBlock* fallthrough() const { return fallthrough_.block_ptr(); }
Input& value() { return input(0); }
void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
private:
const int value_base_;
const BasicBlockRef* targets_;
const int size_;
BasicBlockRef fallthrough_;
};
class BranchIfRootConstant
: public ConditionalControlNodeT<1, BranchIfRootConstant> {
using Base = ConditionalControlNodeT<1, BranchIfRootConstant>;
: public BranchControlNodeT<1, BranchIfRootConstant> {
using Base = BranchControlNodeT<1, BranchIfRootConstant>;
public:
explicit BranchIfRootConstant(uint64_t bitfield, BasicBlockRef* if_true_refs,
......@@ -3596,8 +3647,8 @@ class BranchIfRootConstant
};
class BranchIfUndefinedOrNull
: public ConditionalControlNodeT<1, BranchIfUndefinedOrNull> {
using Base = ConditionalControlNodeT<1, BranchIfUndefinedOrNull>;
: public BranchControlNodeT<1, BranchIfUndefinedOrNull> {
using Base = BranchControlNodeT<1, BranchIfUndefinedOrNull>;
public:
explicit BranchIfUndefinedOrNull(uint64_t bitfield,
......@@ -3612,9 +3663,8 @@ class BranchIfUndefinedOrNull
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
};
class BranchIfJSReceiver
: public ConditionalControlNodeT<1, BranchIfJSReceiver> {
using Base = ConditionalControlNodeT<1, BranchIfJSReceiver>;
class BranchIfJSReceiver : public BranchControlNodeT<1, BranchIfJSReceiver> {
using Base = BranchControlNodeT<1, BranchIfJSReceiver>;
public:
explicit BranchIfJSReceiver(uint64_t bitfield, BasicBlockRef* if_true_refs,
......@@ -3629,8 +3679,8 @@ class BranchIfJSReceiver
};
class BranchIfToBooleanTrue
: public ConditionalControlNodeT<1, BranchIfToBooleanTrue> {
using Base = ConditionalControlNodeT<1, BranchIfToBooleanTrue>;
: public BranchControlNodeT<1, BranchIfToBooleanTrue> {
using Base = BranchControlNodeT<1, BranchIfToBooleanTrue>;
public:
explicit BranchIfToBooleanTrue(uint64_t bitfield, BasicBlockRef* if_true_refs,
......@@ -3647,8 +3697,8 @@ class BranchIfToBooleanTrue
};
class BranchIfInt32Compare
: public ConditionalControlNodeT<2, BranchIfInt32Compare> {
using Base = ConditionalControlNodeT<2, BranchIfInt32Compare>;
: public BranchControlNodeT<2, BranchIfInt32Compare> {
using Base = BranchControlNodeT<2, BranchIfInt32Compare>;
public:
static constexpr int kLeftIndex = 0;
......@@ -3670,8 +3720,8 @@ class BranchIfInt32Compare
};
class BranchIfFloat64Compare
: public ConditionalControlNodeT<2, BranchIfFloat64Compare> {
using Base = ConditionalControlNodeT<2, BranchIfFloat64Compare>;
: public BranchControlNodeT<2, BranchIfFloat64Compare> {
using Base = BranchControlNodeT<2, BranchIfFloat64Compare>;
public:
static constexpr int kLeftIndex = 0;
......@@ -3693,8 +3743,8 @@ class BranchIfFloat64Compare
};
class BranchIfReferenceCompare
: public ConditionalControlNodeT<2, BranchIfReferenceCompare> {
using Base = ConditionalControlNodeT<2, BranchIfReferenceCompare>;
: public BranchControlNodeT<2, BranchIfReferenceCompare> {
using Base = BranchControlNodeT<2, BranchIfReferenceCompare>;
public:
static constexpr int kLeftIndex = 0;
......
......@@ -46,7 +46,7 @@ ControlNode* NearestPostDominatingHole(ControlNode* node) {
// Conditional control nodes don't cause holes themselves. So, the nearest
// post-dominating hole is the conditional control node's next post-dominating
// hole.
if (node->Is<ConditionalControlNode>()) {
if (node->Is<BranchControlNode>()) {
return node->next_post_dominating_hole();
}
......@@ -99,7 +99,8 @@ void ClearDeadFallthroughRegisters(RegisterFrameState<RegisterT> registers,
StraightForwardRegisterAllocator::StraightForwardRegisterAllocator(
MaglevCompilationInfo* compilation_info, Graph* graph)
: compilation_info_(compilation_info), graph_(graph) {
ComputePostDominatingHoles();
// TODO(v8:7700): Extend ComputePostDominatingHoles to support Switch.
// ComputePostDominatingHoles();
AllocateRegisters();
graph_->set_tagged_stack_slots(tagged_.top);
graph_->set_untagged_stack_slots(untagged_.top);
......@@ -176,7 +177,7 @@ void StraightForwardRegisterAllocator::ComputePostDominatingHoles() {
// at the target.
control->set_next_post_dominating_hole(
NearestPostDominatingHole(node->target()->control_node()));
} else if (auto node = control->TryCast<ConditionalControlNode>()) {
} else if (auto node = control->TryCast<BranchControlNode>()) {
ControlNode* first =
NearestPostDominatingHole(node->if_true()->control_node());
ControlNode* second =
......@@ -795,9 +796,16 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
// Finally, initialize the merge states of branch targets, including the
// fallthrough, with the final state after all allocation
if (auto conditional = node->TryCast<ConditionalControlNode>()) {
if (auto conditional = node->TryCast<BranchControlNode>()) {
InitializeConditionalBranchTarget(conditional, conditional->if_true());
InitializeConditionalBranchTarget(conditional, conditional->if_false());
} else if (Switch* control_node = node->TryCast<Switch>()) {
const BasicBlockRef* targets = control_node->targets();
for (int i = 0; i < control_node->size(); i++) {
InitializeConditionalBranchTarget(control_node, targets[i].block_ptr());
}
InitializeConditionalBranchTarget(control_node,
control_node->fallthrough());
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment