Commit 04224d8c authored by Victor Gomes's avatar Victor Gomes Committed by V8 LUCI CQ

[maglev] Support exception handlers

At the start of the graph builder, we add merge states to exception
handlers basic block with ExceptionPhis (normal phis with no input,
but with an interpreter register "owner").

Every Node that can throw, can also lazy deopt, so we use the
lazy deopt IFS to recover the exception phi values in a trampoline
before jumping to the exception catch block.

Bug: v8:7700
Change-Id: I62fe7f19ce5e89c3df645224ea62f9fc2798207c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3865865Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82935}
parent a81f56c5
......@@ -100,14 +100,9 @@ class BasicBlock {
}
bool has_state() const { return !is_empty_block() && state_ != nullptr; }
#ifdef DEBUG
void set_is_exception_handler_block(bool value) {
is_exception_handler_block_ = value;
}
bool is_exception_handler_block() const {
return is_exception_handler_block_;
return has_state() && state_->is_exception_handler();
}
#endif // DEBUG
private:
bool is_empty_block_ = false;
......@@ -119,10 +114,6 @@ class BasicBlock {
};
BasicBlock* empty_block_predecessor_;
Label label_;
#ifdef DEBUG
bool is_exception_handler_block_ = false;
#endif // DEBUG
};
} // namespace maglev
......
......@@ -57,13 +57,11 @@ class MaglevCodeGenState {
}
inline void DefineLazyDeoptPoint(LazyDeoptInfo* info);
void PushHandlerInfo(ExceptionHandlerInfo* handler) {
handlers_.push_back(handler);
}
const std::vector<ExceptionHandlerInfo*>& handlers() const {
return handlers_;
}
inline void DefineExceptionHandlerPoint(ExceptionHandlerInfo* info);
void PushHandlerInfo(NodeBase* node) { handlers_.push_back(node); }
const std::vector<NodeBase*>& handlers() const { return handlers_; }
inline void DefineExceptionHandlerPoint(NodeBase* node);
inline void DefineExceptionHandlerAndLazyDeoptPoint(NodeBase* node);
compiler::NativeContextRef native_context() const {
return broker()->target_native_context();
......@@ -119,7 +117,7 @@ class MaglevCodeGenState {
std::vector<DeferredCodeInfo*> deferred_code_;
std::vector<EagerDeoptInfo*> eager_deopts_;
std::vector<LazyDeoptInfo*> lazy_deopts_;
std::vector<ExceptionHandlerInfo*> handlers_;
std::vector<NodeBase*> handlers_;
int untagged_slots_ = 0;
int tagged_slots_ = 0;
......@@ -166,11 +164,17 @@ inline void MaglevCodeGenState::DefineLazyDeoptPoint(LazyDeoptInfo* info) {
safepoint_table_builder()->DefineSafepoint(masm());
}
inline void MaglevCodeGenState::DefineExceptionHandlerPoint(
ExceptionHandlerInfo* info) {
inline void MaglevCodeGenState::DefineExceptionHandlerPoint(NodeBase* node) {
ExceptionHandlerInfo* info = node->exception_handler_info();
if (!info->HasExceptionHandler()) return;
info->pc_offset = masm()->pc_offset_for_safepoint();
PushHandlerInfo(info);
PushHandlerInfo(node);
}
inline void MaglevCodeGenState::DefineExceptionHandlerAndLazyDeoptPoint(
NodeBase* node) {
DefineExceptionHandlerPoint(node);
DefineLazyDeoptPoint(node->lazy_deopt_info());
}
} // namespace maglev
......
......@@ -8,6 +8,8 @@
#include "src/base/hashmap.h"
#include "src/codegen/code-desc.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/register.h"
#include "src/codegen/reglist.h"
#include "src/codegen/safepoint-table.h"
......@@ -429,6 +431,207 @@ class ParallelMoveResolver {
bool scratch_has_cycle_start_ = false;
};
class ExceptionHandlerTrampolineBuilder {
public:
ExceptionHandlerTrampolineBuilder(MaglevCodeGenState* code_gen_state)
: code_gen_state_(code_gen_state) {}
void EmitTrampolineFor(NodeBase* node) {
DCHECK(node->properties().can_throw());
ExceptionHandlerInfo* handler_info = node->exception_handler_info();
DCHECK(handler_info->HasExceptionHandler());
BasicBlock* block = handler_info->catch_block.block_ptr();
LazyDeoptInfo* deopt_info = node->lazy_deopt_info();
__ bind(&handler_info->trampoline_entry);
ClearState();
// TODO(v8:7700): Handle inlining.
RecordMoves(deopt_info->unit, block, deopt_info->state.register_frame);
// We do moves that need to materialise values first, since we might need to
// call a builtin to create a HeapNumber, and therefore we would need to
// spill all registers.
DoMaterialiseMoves();
// Move the rest, we will not call HeapNumber anymore.
DoDirectMoves();
// Jump to the catch block.
__ jmp(block->label());
}
private:
MaglevCodeGenState* code_gen_state_;
using Move = std::pair<const ValueLocation&, ValueNode*>;
base::SmallVector<Move, 16> direct_moves_;
base::SmallVector<Move, 16> materialisation_moves_;
bool save_accumulator_ = false;
MacroAssembler* masm() { return code_gen_state_->masm(); }
void ClearState() {
direct_moves_.clear();
materialisation_moves_.clear();
save_accumulator_ = false;
}
void RecordMoves(const MaglevCompilationUnit& unit, BasicBlock* block,
const CompactInterpreterFrameState* register_frame) {
for (Phi* phi : *block->phis()) {
DCHECK_EQ(phi->input_count(), 0);
if (phi->owner() == interpreter::Register::virtual_accumulator()) {
// If the accumulator is live, then it is the exception object located
// at kReturnRegister0. This is also the first phi in the list.
DCHECK_EQ(phi->result().AssignedGeneralRegister(), kReturnRegister0);
save_accumulator_ = true;
continue;
}
if (!phi->has_valid_live_range()) continue;
ValueNode* value = register_frame->GetValueOf(phi->owner(), unit);
DCHECK_NOT_NULL(value);
switch (value->properties().value_representation()) {
case ValueRepresentation::kTagged:
// All registers should have been spilled due to the call.
DCHECK(!value->allocation().IsRegister());
direct_moves_.emplace_back(phi->result(), value);
break;
case ValueRepresentation::kInt32:
if (value->allocation().IsConstant()) {
direct_moves_.emplace_back(phi->result(), value);
} else {
materialisation_moves_.emplace_back(phi->result(), value);
}
break;
case ValueRepresentation::kFloat64:
materialisation_moves_.emplace_back(phi->result(), value);
break;
}
}
}
void DoMaterialiseMoves() {
if (materialisation_moves_.size() == 0) return;
if (save_accumulator_) {
__ Push(kReturnRegister0);
}
for (auto it = materialisation_moves_.begin();
it < materialisation_moves_.end(); it++) {
switch (it->second->properties().value_representation()) {
case ValueRepresentation::kInt32: {
EmitMoveInt32ToReturnValue0(it->second);
break;
}
case ValueRepresentation::kFloat64:
EmitMoveFloat64ToReturnValue0(it->second);
break;
case ValueRepresentation::kTagged:
UNREACHABLE();
}
if (it->first.operand().IsStackSlot()) {
// If the target is in a stack sot, we can immediately move
// the result to it.
__ movq(ToMemOperand(it->first), kReturnRegister0);
} else {
// We spill the result to the stack, in order to be able to call the
// NewHeapNumber builtin again, however we don't need to push the result
// of the last one.
if (it != materialisation_moves_.end() - 1) {
__ Push(kReturnRegister0);
}
}
}
// If the last move target is a register, the result should be in
// kReturnValue0, so so we emit a simple move. Otherwise it has already been
// moved.
const ValueLocation& last_move_target =
materialisation_moves_.rbegin()->first;
if (last_move_target.operand().IsRegister()) {
__ Move(last_move_target.AssignedGeneralRegister(), kReturnRegister0);
}
// And then pop the rest.
for (auto it = materialisation_moves_.rbegin() + 1;
it < materialisation_moves_.rend(); it++) {
if (it->first.operand().IsRegister()) {
__ Pop(it->first.AssignedGeneralRegister());
}
}
if (save_accumulator_) {
__ Pop(kReturnRegister0);
}
}
void DoDirectMoves() {
for (auto& [target, value] : direct_moves_) {
if (value->allocation().IsConstant()) {
if (Int32Constant* constant = value->TryCast<Int32Constant>()) {
EmitMove(target, Smi::FromInt(constant->value()));
} else {
// Int32 and Float64 constants should have already been dealt with.
DCHECK_EQ(value->properties().value_representation(),
ValueRepresentation::kTagged);
EmitConstantLoad(target, value);
}
} else {
EmitMove(target, ToMemOperand(value));
}
}
}
void EmitMoveInt32ToReturnValue0(ValueNode* value) {
// We consider Int32Constants together with tagged values.
DCHECK(!value->allocation().IsConstant());
using D = NewHeapNumberDescriptor;
Label done;
__ movq(kReturnRegister0, ToMemOperand(value));
__ addl(kReturnRegister0, kReturnRegister0);
__ j(no_overflow, &done);
// If we overflow, instead of bailing out (deopting), we change
// representation to a HeapNumber.
__ Cvtlsi2sd(D::GetDoubleRegisterParameter(D::kValue), ToMemOperand(value));
__ CallBuiltin(Builtin::kNewHeapNumber);
__ bind(&done);
}
void EmitMoveFloat64ToReturnValue0(ValueNode* value) {
using D = NewHeapNumberDescriptor;
if (Float64Constant* constant = value->TryCast<Float64Constant>()) {
__ Move(D::GetDoubleRegisterParameter(D::kValue), constant->value());
} else {
__ Movsd(D::GetDoubleRegisterParameter(D::kValue), ToMemOperand(value));
}
__ CallBuiltin(Builtin::kNewHeapNumber);
}
MemOperand ToMemOperand(ValueNode* node) {
DCHECK(node->allocation().IsAnyStackSlot());
return code_gen_state_->ToMemOperand(node->allocation());
}
MemOperand ToMemOperand(const ValueLocation& location) {
DCHECK(location.operand().IsStackSlot());
return code_gen_state_->ToMemOperand(location.operand());
}
template <typename Operand>
void EmitMove(const ValueLocation& dst, Operand src) {
if (dst.operand().IsRegister()) {
__ Move(dst.AssignedGeneralRegister(), src);
} else {
__ Move(kScratchRegister, src);
__ movq(ToMemOperand(dst), kScratchRegister);
}
}
void EmitConstantLoad(const ValueLocation& dst, ValueNode* value) {
DCHECK(value->allocation().IsConstant());
if (dst.operand().IsRegister()) {
value->LoadToRegister(code_gen_state_, dst.AssignedGeneralRegister());
} else {
value->LoadToRegister(code_gen_state_, kScratchRegister);
__ movq(ToMemOperand(dst), kScratchRegister);
}
}
};
class MaglevCodeGeneratingNodeProcessor {
public:
explicit MaglevCodeGeneratingNodeProcessor(MaglevCodeGenState* code_gen_state)
......@@ -764,6 +967,7 @@ class MaglevCodeGeneratorImpl final {
processor_.ProcessGraph(graph_);
EmitDeferredCode();
EmitDeopts();
EmitExceptionHandlersTrampolines();
}
void EmitDeferredCode() {
......@@ -809,6 +1013,15 @@ class MaglevCodeGeneratorImpl final {
}
}
void EmitExceptionHandlersTrampolines() {
if (code_gen_state_.handlers().size() == 0) return;
ExceptionHandlerTrampolineBuilder builder(&code_gen_state_);
__ RecordComment("-- Exception handlers trampolines");
for (NodeBase* node : code_gen_state_.handlers()) {
builder.EmitTrampolineFor(node);
}
}
void EmitMetadata() {
// Final alignment before starting on the metadata section.
masm()->Align(Code::kMetadataAlignment);
......@@ -817,10 +1030,10 @@ class MaglevCodeGeneratorImpl final {
// Exception handler table.
handler_table_offset_ = HandlerTable::EmitReturnTableStart(masm());
for (ExceptionHandlerInfo* info : code_gen_state_.handlers()) {
HandlerTable::EmitReturnEntry(
masm(), info->pc_offset,
info->catch_block.block_ptr()->label()->pos());
for (NodeBase* node : code_gen_state_.handlers()) {
ExceptionHandlerInfo* info = node->exception_handler_info();
HandlerTable::EmitReturnEntry(masm(), info->pc_offset,
info->trampoline_entry.pos());
}
}
......
......@@ -552,14 +552,6 @@ void MaglevCompiler::Compile(LocalIsolate* local_isolate,
top_level_unit->feedback().object()->Print(std::cout);
}
// TODO(v8:7700): Support exceptions in maglev. We currently bail if exception
// handler table is non-empty.
if (compilation_info->toplevel_compilation_unit()
->bytecode()
.handler_table_size() > 0) {
return;
}
Graph* graph = Graph::New(compilation_info->zone());
MaglevGraphBuilder graph_builder(
......
......@@ -82,6 +82,18 @@ MaglevGraphBuilder::MaglevGraphBuilder(LocalIsolate* local_isolate,
*compilation_unit_, offset, NumPredecessors(offset), liveness,
&loop_info);
}
if (bytecode().handler_table_size() > 0) {
HandlerTable table(*bytecode().object());
for (int i = 0; i < table.NumberOfRangeEntries(); i++) {
int offset = table.GetRangeHandler(i);
const compiler::BytecodeLivenessState* liveness =
GetInLivenessFor(offset);
DCHECK_EQ(NumPredecessors(offset), 0);
merge_states_[offset] = zone()->New<MergePointInterpreterFrameState>(
*compilation_unit_, liveness, offset);
}
}
}
void MaglevGraphBuilder::StartPrologue() {
......
......@@ -75,6 +75,30 @@ class MaglevGraphBuilder {
return result;
}
void ProcessMergePointAtExceptionHandlerStart(int offset) {
MergePointInterpreterFrameState& merge_state = *merge_states_[offset];
DCHECK_EQ(merge_state.predecessor_count(), 0);
// Copy state.
current_interpreter_frame_.CopyFrom(*compilation_unit_, merge_state);
// Merges aren't simple fallthroughs, so we should reset the checkpoint
// validity.
latest_checkpointed_state_.reset();
// Register exception phis.
if (has_graph_labeller()) {
for (Phi* phi : *merge_states_[offset]->phis()) {
graph_labeller()->RegisterNode(phi);
if (FLAG_trace_maglev_graph_building) {
std::cout << " " << phi << " "
<< PrintNodeLabel(graph_labeller(), phi) << ": "
<< PrintNode(graph_labeller(), phi) << std::endl;
}
}
}
}
void ProcessMergePoint(int offset) {
// First copy the merge state to be the current state.
MergePointInterpreterFrameState& merge_state = *merge_states_[offset];
......@@ -131,17 +155,6 @@ class MaglevGraphBuilder {
return merge_states_[offset] != nullptr;
}
// Return true if current offset is the beginning of a catch block, that
// is, it is the offset handler in the exception handler table in the
// bytecode array.
bool IsHandlerOffset(int offset) const {
HandlerTable table(*bytecode().object());
for (int i = 0; i < table.NumberOfRangeEntries(); i++) {
if (offset == table.GetRangeHandler(i)) return true;
}
return false;
}
// Called when a block is killed by an unconditional eager deopt.
void EmitUnconditionalDeopt(DeoptimizeReason reason) {
// Create a block rather than calling finish, since we don't yet know the
......@@ -222,24 +235,25 @@ class MaglevGraphBuilder {
graph()->last_block(), offset);
}
if (FLAG_trace_maglev_graph_building) {
std::cout << "== New block (merge) ==" << std::endl;
auto detail =
merge_state->is_exception_handler() ? "exception handler" : "merge";
std::cout << "== New block (" << detail << ") ==" << std::endl;
}
ProcessMergePoint(offset);
StartNewBlock(offset);
// If we have no predecessor, then we can be the start of an exception
// handler block.
} else if (predecessors_[offset] == 0 && IsHandlerOffset(offset)) {
// If we have no reference to this block, then the exception handler is
// dead.
if (!jump_targets_[offset].has_ref()) {
MarkBytecodeDead();
return;
if (merge_state->is_exception_handler()) {
DCHECK_EQ(predecessors_[offset], 0);
// If we have no reference to this block, then the exception handler is
// dead.
if (!jump_targets_[offset].has_ref()) {
MarkBytecodeDead();
return;
}
ProcessMergePointAtExceptionHandlerStart(offset);
} else {
ProcessMergePoint(offset);
}
StartNewBlock(offset);
#ifdef DEBUG
current_block_->set_is_exception_handler_block(true);
#endif // DEBUG
} else if (V8_UNLIKELY(current_block_ == nullptr)) {
// If we don't have a current block, the bytecode must be dead (because of
// some earlier deopt). Mark this bytecode dead too and return.
......@@ -364,10 +378,13 @@ class MaglevGraphBuilder {
if constexpr (NodeT::kProperties.can_throw()) {
if (catch_block_stack_.size() > 0) {
// Inside a try-block.
new (node->exception_handler_info()) ExceptionHandlerInfo(
&jump_targets_[catch_block_stack_.top().handler]);
int handler_offset = catch_block_stack_.top().handler;
new (node->exception_handler_info())
ExceptionHandlerInfo(&jump_targets_[handler_offset]);
} else {
// Patch no exception handler marker.
// TODO(victorgomes): Avoid allocating exception handler data in this
// case.
new (node->exception_handler_info()) ExceptionHandlerInfo();
}
}
......
......@@ -357,11 +357,9 @@ void MaglevPrintingVisitor::PreProcessBasicBlock(
int block_id = graph_labeller->BlockId(block);
os_ << "Block b" << block_id;
#ifdef DEBUG
if (block->is_exception_handler_block()) {
os_ << " (exception handler)";
}
#endif // DEBUG
os_ << "\n";
MaglevPrintingVisitorOstream::cast(os_for_additional_info_)->set_padding(1);
......@@ -442,16 +440,68 @@ void PrintLazyDeopt(std::ostream& os, std::vector<BasicBlock*> targets,
});
os << "}\n";
}
void MaybePrintLazyDeopt(std::ostream& os, std::vector<BasicBlock*> targets,
NodeBase* node, const ProcessingState& state,
int max_node_id) {
template <typename NodeT>
void PrintExceptionHandlerPoint(std::ostream& os,
std::vector<BasicBlock*> targets, NodeT* node,
const ProcessingState& state, int max_node_id) {
// If no handler info, then we cannot throw.
ExceptionHandlerInfo* info = node->exception_handler_info();
if (!info->HasExceptionHandler()) return;
BasicBlock* block = info->catch_block.block_ptr();
DCHECK(block->is_exception_handler_block());
Phi* first_phi = block->phis()->first();
if (first_phi == nullptr) {
// No phis in the block.
return;
}
int handler_offset = first_phi->merge_offset();
// The exception handler liveness should be a subset of lazy_deopt_info one.
auto* liveness = block->state()->frame_state().liveness();
LazyDeoptInfo* deopt_info = node->lazy_deopt_info();
MaglevGraphLabeller* graph_labeller = state.graph_labeller();
PrintVerticalArrows(os, targets);
PrintPadding(os, graph_labeller, max_node_id, 0);
os << " ↳ throw @" << handler_offset << " : {";
bool first = true;
deopt_info->state.register_frame->ForEachValue(
deopt_info->unit, [&](ValueNode* node, interpreter::Register reg) {
if (!reg.is_parameter() && !liveness->RegisterIsLive(reg.index())) {
// Skip, since not live at the handler offset.
return;
}
if (first) {
first = false;
} else {
os << ", ";
}
os << reg.ToString() << ":" << PrintNodeLabel(graph_labeller, node);
});
os << "}\n";
}
void MaybePrintLazyDeoptOrExceptionHandler(std::ostream& os,
std::vector<BasicBlock*> targets,
NodeBase* node,
const ProcessingState& state,
int max_node_id) {
switch (node->opcode()) {
#define CASE(Name) \
case Opcode::k##Name: \
if constexpr (Name::kProperties.can_lazy_deopt()) { \
PrintLazyDeopt<Name>(os, targets, node->Cast<Name>(), state, \
max_node_id); \
} \
#define CASE(Name) \
case Opcode::k##Name: \
if constexpr (Name::kProperties.can_lazy_deopt()) { \
PrintLazyDeopt<Name>(os, targets, node->Cast<Name>(), state, \
max_node_id); \
} \
if constexpr (Name::kProperties.can_throw()) { \
PrintExceptionHandlerPoint<Name>(os, targets, node->Cast<Name>(), state, \
max_node_id); \
} \
break;
NODE_BASE_LIST(CASE)
#undef CASE
......@@ -465,15 +515,20 @@ void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) {
PrintVerticalArrows(os_, targets_);
PrintPaddedId(os_, graph_labeller, max_node_id_, phi);
os_ << "φ (";
// Manually walk Phi inputs to print just the node labels, without
// input locations (which are shown in the predecessor block's gap
// moves).
for (int i = 0; i < phi->input_count(); ++i) {
if (i > 0) os_ << ", ";
os_ << PrintNodeLabel(graph_labeller, phi->input(i).node());
if (phi->input_count() == 0) {
os_ << "φₑ " << phi->owner().ToString();
} else {
os_ << "φ (";
// Manually walk Phi inputs to print just the node labels, without
// input locations (which are shown in the predecessor block's gap
// moves).
for (int i = 0; i < phi->input_count(); ++i) {
if (i > 0) os_ << ", ";
os_ << PrintNodeLabel(graph_labeller, phi->input(i).node());
}
os_ << ")";
}
os_ << ") → " << phi->result().operand() << "\n";
os_ << " → " << phi->result().operand() << "\n";
MaglevPrintingVisitorOstream::cast(os_for_additional_info_)
->set_padding(MaxIdWidth(graph_labeller, max_node_id_, 2));
......@@ -491,7 +546,8 @@ void MaglevPrintingVisitor::Process(Node* node, const ProcessingState& state) {
MaglevPrintingVisitorOstream::cast(os_for_additional_info_)
->set_padding(MaxIdWidth(graph_labeller, max_node_id_, 2));
MaybePrintLazyDeopt(os_, targets_, node, state, max_node_id_);
MaybePrintLazyDeoptOrExceptionHandler(os_, targets_, node, state,
max_node_id_);
}
void MaglevPrintingVisitor::Process(ControlNode* control_node,
......
......@@ -316,9 +316,11 @@ class CompactInterpreterFrameState {
const compiler::BytecodeLivenessState* liveness() const { return liveness_; }
ValueNode*& accumulator(const MaglevCompilationUnit& info) {
DCHECK(liveness_->AccumulatorIsLive());
return live_registers_and_accumulator_[size(info) - 1];
}
ValueNode* accumulator(const MaglevCompilationUnit& info) const {
DCHECK(liveness_->AccumulatorIsLive());
return live_registers_and_accumulator_[size(info) - 1];
}
......@@ -329,6 +331,33 @@ class CompactInterpreterFrameState {
return live_registers_and_accumulator_[info.parameter_count()];
}
ValueNode* GetValueOf(interpreter::Register reg,
const MaglevCompilationUnit& info) const {
DCHECK(reg.is_valid());
if (reg == interpreter::Register::current_context()) {
return context(info);
}
if (reg == interpreter::Register::virtual_accumulator()) {
return accumulator(info);
}
if (reg.is_parameter()) {
DCHECK_LT(reg.ToParameterIndex(), info.parameter_count());
return live_registers_and_accumulator_[reg.ToParameterIndex()];
}
int live_reg = 0;
// TODO(victorgomes): See if we can do better than a linear search here.
for (int register_index : *liveness_) {
if (reg == interpreter::Register(register_index)) {
return live_registers_and_accumulator_[info.parameter_count() +
context_register_count_ +
live_reg];
}
live_reg++;
}
// No value in this frame state.
return nullptr;
}
size_t size(const MaglevCompilationUnit& info) const {
return SizeFor(info, liveness_);
}
......@@ -376,6 +405,11 @@ class MergePointRegisterState {
class MergePointInterpreterFrameState {
public:
enum class BasicBlockType {
kDefault,
kLoopHeader,
kExceptionHandlerStart,
};
void CheckIsLoopPhiIfNeeded(const MaglevCompilationUnit& compilation_unit,
int merge_offset, interpreter::Register reg,
ValueNode* value) {
......@@ -401,7 +435,7 @@ class MergePointInterpreterFrameState {
const compiler::BytecodeLivenessState* liveness)
: predecessor_count_(predecessor_count),
predecessors_so_far_(1),
is_loop_header_(false),
basic_block_type_(BasicBlockType::kDefault),
predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)),
frame_state_(info, liveness, state),
known_node_aspects_(state.known_node_aspects().Clone(info.zone())) {
......@@ -414,7 +448,7 @@ class MergePointInterpreterFrameState {
const compiler::LoopInfo* loop_info)
: predecessor_count_(predecessor_count),
predecessors_so_far_(0),
is_loop_header_(true),
basic_block_type_(BasicBlockType::kLoopHeader),
predecessors_(info.zone()->NewArray<BasicBlock*>(predecessor_count)),
frame_state_(info, liveness) {
auto& assignments = loop_info->assignments();
......@@ -436,6 +470,36 @@ class MergePointInterpreterFrameState {
DCHECK(!frame_state_.liveness()->AccumulatorIsLive());
}
MergePointInterpreterFrameState(
const MaglevCompilationUnit& info,
const compiler::BytecodeLivenessState* liveness, int handler_offset)
: predecessor_count_(0),
predecessors_so_far_(0),
basic_block_type_(BasicBlockType::kExceptionHandlerStart),
predecessors_(nullptr),
frame_state_(info, liveness),
known_node_aspects_(info.zone()->New<KnownNodeAspects>(info.zone())) {
// If the accumulator is live, the ExceptionPhi associated to it is the
// first one in the block. That ensures it gets kReturnValue0 in the
// register allocator. See
// StraightForwardRegisterAllocator::AllocateRegisters.
if (frame_state_.liveness()->AccumulatorIsLive()) {
frame_state_.accumulator(info) = NewExceptionPhi(
info.zone(), interpreter::Register::virtual_accumulator(),
handler_offset);
}
frame_state_.ForEachParameter(
info, [&](ValueNode*& entry, interpreter::Register reg) {
entry = NewExceptionPhi(info.zone(), reg, handler_offset);
});
frame_state_.context(info) = NewExceptionPhi(
info.zone(), interpreter::Register::current_context(), handler_offset);
frame_state_.ForEachLocal(
info, [&](ValueNode*& entry, interpreter::Register reg) {
entry = NewExceptionPhi(info.zone(), reg, handler_offset);
});
}
// Merges an unmerged framestate with a possibly merged framestate into |this|
// framestate.
void Merge(MaglevCompilationUnit& compilation_unit,
......@@ -544,7 +608,7 @@ class MergePointInterpreterFrameState {
DCHECK(is_unmerged_loop());
MergeDead(compilation_unit, merge_offset);
// This means that this is no longer a loop.
is_loop_header_ = false;
basic_block_type_ = BasicBlockType::kDefault;
}
const CompactInterpreterFrameState& frame_state() const {
......@@ -569,20 +633,25 @@ class MergePointInterpreterFrameState {
return predecessors_[i];
}
bool is_loop() const { return is_loop_header_; }
bool is_loop() const {
return basic_block_type_ == BasicBlockType::kLoopHeader;
}
bool is_exception_handler() const {
return basic_block_type_ == BasicBlockType::kExceptionHandlerStart;
}
bool is_unmerged_loop() const {
// If this is a loop and not all predecessors are set, then the loop isn't
// merged yet.
DCHECK_GT(predecessor_count_, 0);
return is_loop_header_ && predecessors_so_far_ < predecessor_count_;
return is_loop() && predecessors_so_far_ < predecessor_count_;
}
bool is_unreachable_loop() const {
// If there is only one predecessor, and it's not set, then this is a loop
// merge with no forward control flow entering it.
return is_loop_header_ && predecessor_count_ == 1 &&
predecessors_so_far_ == 0;
return is_loop() && predecessor_count_ == 1 && predecessors_so_far_ == 0;
}
private:
......@@ -765,9 +834,19 @@ class MergePointInterpreterFrameState {
return result;
}
ValueNode* NewExceptionPhi(Zone* zone, interpreter::Register reg,
int handler_offset) {
DCHECK_EQ(predecessors_so_far_, 0);
DCHECK_EQ(predecessor_count_, 0);
DCHECK_NULL(predecessors_);
Phi* result = Node::New<Phi>(zone, 0, reg, handler_offset);
phis_.Add(result);
return result;
}
int predecessor_count_;
int predecessors_so_far_;
bool is_loop_header_;
BasicBlockType basic_block_type_;
Phi::List phis_;
BasicBlock** predecessors_;
......
This diff is collapsed.
......@@ -429,7 +429,9 @@ class OpProperties {
constexpr bool can_lazy_deopt() const {
return kCanLazyDeoptBit::decode(bitfield_);
}
constexpr bool can_throw() const { return kCanThrowBit::decode(bitfield_); }
constexpr bool can_throw() const {
return kCanThrowBit::decode(bitfield_) && can_lazy_deopt();
}
constexpr bool can_read() const { return kCanReadBit::decode(bitfield_); }
constexpr bool can_write() const { return kCanWriteBit::decode(bitfield_); }
constexpr bool non_memory_side_effects() const {
......@@ -466,7 +468,7 @@ class OpProperties {
return OpProperties(kCanLazyDeoptBit::encode(true));
}
static constexpr OpProperties Throw() {
return OpProperties(kCanThrowBit::encode(true));
return OpProperties(kCanThrowBit::encode(true)) | LazyDeopt();
}
static constexpr OpProperties Reading() {
return OpProperties(kCanReadBit::encode(true));
......@@ -496,8 +498,7 @@ class OpProperties {
return OpProperties(kNeedsRegisterSnapshotBit::encode(true));
}
static constexpr OpProperties JSCall() {
return Call() | NonMemorySideEffects() | LazyDeopt() |
OpProperties::Throw();
return Call() | NonMemorySideEffects() | LazyDeopt() | Throw();
}
static constexpr OpProperties AnySideEffects() {
return Reading() | Writing() | NonMemorySideEffects();
......@@ -672,6 +673,7 @@ class ExceptionHandlerInfo {
}
BasicBlockRef catch_block;
Label trampoline_entry;
int pc_offset;
};
......
......@@ -313,7 +313,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
BasicBlock* block = *block_it_;
// Restore mergepoint state.
if (block->has_state()) {
if (block->has_state() && !block->state()->is_exception_handler()) {
InitializeRegisterValues(block->state()->register_state());
} else if (block->is_empty_block()) {
InitializeRegisterValues(block->empty_block_register_state());
......@@ -368,12 +368,29 @@ void StraightForwardRegisterAllocator::AllocateRegisters() {
// location.
for (Phi* phi : *block->phis()) {
// Ignore dead phis.
// TODO(leszeks): We should remove dead phis entirely and turn this into
// a DCHECK.
// TODO(leszeks): We should remove dead phis entirely and turn this
// into a DCHECK.
if (!phi->has_valid_live_range()) continue;
phi->SetNoSpillOrHint();
TryAllocateToInput(phi);
}
if (block->is_exception_handler_block()) {
// If we are in exception handler block, then we find the ExceptionPhi
// (the first one by default) that is marked with the
// virtual_accumulator and force kReturnRegister0. This corresponds to
// the exception message object.
Phi* phi = block->phis()->first();
DCHECK_EQ(phi->input_count(), 0);
if (phi->owner() == interpreter::Register::virtual_accumulator()) {
phi->result().SetAllocated(ForceAllocate(kReturnRegister0, phi));
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->Process(
phi, ProcessingState(compilation_info_, block_it_));
printing_visitor_->os() << "phi (exception message object) "
<< phi->result().operand() << std::endl;
}
}
}
// Secondly try to assign the phi to a free register.
for (Phi* phi : *block->phis()) {
// Ignore dead phis.
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax --maglev
// This examples creates a simple exception handler block where the trampoline
// has an int32 value and needs to convert to a tagged value.
function foo_int32() {
let x = 1;
try {
x = x + x;
throw "Error";
} catch {
return x;
}
}
%PrepareFunctionForOptimization(foo_int32);
assertEquals(foo_int32(), 2);
%OptimizeMaglevOnNextCall(foo_int32);
assertEquals(foo_int32(), 2);
// This examples creates a simple exception handler block where the trampoline
// has an int32 value that overflows and it needs to create a HeapNumber.
function foo_int32_overflow(x) {
try {
x = x + x;
throw "Error";
} catch {
return x;
}
}
%PrepareFunctionForOptimization(foo_int32_overflow);
assertEquals(foo_int32_overflow(1), 2);
%OptimizeMaglevOnNextCall(foo_int32_overflow);
assertEquals(foo_int32_overflow(0x3FFFFFFF), 0x7FFFFFFE);
// If we call it with a HeapNumber, we deopt before the exception:
assertTrue(%ActiveTierIsMaglev(foo_int32_overflow));
assertEquals(foo_int32_overflow(1.1), 2.2);
assertFalse(%ActiveTierIsMaglev(foo_int32_overflow));
// This examples creates a simple exception handler block where the trampoline
// has an float64 value and needs to convert to a tagged value.
function foo_float64() {
let x = 1.1;
try {
x = x + x;
throw "Error";
} catch {
return x;
}
}
%PrepareFunctionForOptimization(foo_float64);
assertEquals(foo_float64(), 2.2);
%OptimizeMaglevOnNextCall(foo_float64);
assertEquals(foo_float64(), 2.2);
// Combination of previous examples with a big number of registers.
// This creates a _quite_ large trampoline.
function foo() {
let x = 1;
let y = 1.1;
let a, b, c, d, e, f, g, h;
a = b = c = d = e = f = g = h = 0;
let p, q, r, s, t;
try {
x = x + x;
y = y + y;
p = q = r = s = t = x;
throw "Error";
} catch {
return x + y + a + b + c + d + e + f + g + h
+ p + q + r + s + t;
}
}
%PrepareFunctionForOptimization(foo);
assertEquals(foo(), 14.2);
%OptimizeMaglevOnNextCall(foo);
assertEquals(foo(), 14.2);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment