Commit 0d4e9860 authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[maglev] Port eager deopts to use src/deoptimizer

Rather than emitting eager deopt code inline, use the full
DeoptimizationData+TranslationArray mechanism in maglev, for consistency
with TurboFan and simplification of implementing lazy deopts in the
future.

Bug: v8:7700
Change-Id: I67282b27493772c78ad28feaa4ad6a2c35c8e239
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3545169Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79600}
parent ebfa35bc
......@@ -19,16 +19,31 @@ namespace v8 {
namespace internal {
namespace maglev {
class MaglevCodeGenState {
class InterpreterFrameState;
class DeoptimizationInfo {
public:
class DeferredCodeInfo {
public:
virtual void Generate(MaglevCodeGenState* code_gen_state,
Label* return_label) = 0;
Label deferred_code_label;
Label return_label;
};
DeoptimizationInfo(BytecodeOffset bytecode_position,
InterpreterFrameState* checkpoint_state)
: bytecode_position(bytecode_position),
checkpoint_state(checkpoint_state) {}
BytecodeOffset bytecode_position;
InterpreterFrameState* checkpoint_state;
Label entry_label;
int index = -1;
};
class DeferredCodeInfo {
public:
virtual void Generate(MaglevCodeGenState* code_gen_state,
Label* return_label) = 0;
Label deferred_code_label;
Label return_label;
};
class MaglevCodeGenState {
public:
MaglevCodeGenState(MaglevCompilationUnit* compilation_unit,
SafepointTableBuilder* safepoint_table_builder)
: compilation_unit_(compilation_unit),
......@@ -40,13 +55,20 @@ class MaglevCodeGenState {
void PushDeferredCode(DeferredCodeInfo* deferred_code) {
deferred_code_.push_back(deferred_code);
}
void EmitDeferredCode() {
for (auto& deferred_code : deferred_code_) {
masm()->RecordComment("-- Deferred block");
masm()->bind(&deferred_code->deferred_code_label);
deferred_code->Generate(this, &deferred_code->return_label);
masm()->int3();
}
const std::vector<DeferredCodeInfo*>& deferred_code() const {
return deferred_code_;
}
void PushNonLazyDeopt(DeoptimizationInfo* info) {
non_lazy_deopts_.push_back(info);
}
void PushLazyDeopt(DeoptimizationInfo* info) {
non_lazy_deopts_.push_back(info);
}
const std::vector<DeoptimizationInfo*> non_lazy_deopts() const {
return non_lazy_deopts_;
}
const std::vector<DeoptimizationInfo*> lazy_deopts() const {
return lazy_deopts_;
}
compiler::NativeContextRef native_context() const {
......@@ -86,6 +108,8 @@ class MaglevCodeGenState {
MacroAssembler masm_;
std::vector<DeferredCodeInfo*> deferred_code_;
std::vector<DeoptimizationInfo*> non_lazy_deopts_;
std::vector<DeoptimizationInfo*> lazy_deopts_;
int vreg_slots_ = 0;
// Allow marking some codegen paths as unsupported, so that we can test maglev
......@@ -97,9 +121,24 @@ class MaglevCodeGenState {
// Some helpers for codegen.
// TODO(leszeks): consider moving this to a separate header.
inline constexpr int GetFramePointerOffsetForStackSlot(int index) {
return StandardFrameConstants::kExpressionsOffset -
index * kSystemPointerSize;
}
inline constexpr int GetFramePointerOffsetForStackSlot(
const compiler::AllocatedOperand& operand) {
return GetFramePointerOffsetForStackSlot(operand.index());
}
inline int GetSafepointIndexForStackSlot(int i) {
// Safepoint tables also contain slots for all fixed frame slots (both
// above and below the fp).
return StandardFrameConstants::kFixedSlotCount + i;
}
inline MemOperand GetStackSlot(int index) {
return MemOperand(rbp, StandardFrameConstants::kExpressionsOffset -
index * kSystemPointerSize);
return MemOperand(rbp, GetFramePointerOffsetForStackSlot(index));
}
inline MemOperand GetStackSlot(const compiler::AllocatedOperand& operand) {
......@@ -122,12 +161,6 @@ inline MemOperand ToMemOperand(const ValueLocation& location) {
return ToMemOperand(location.operand());
}
inline int GetSafepointIndexForStackSlot(int i) {
// Safepoint tables also contain slots for all fixed frame slots (both
// above and below the fp).
return StandardFrameConstants::kFixedSlotCount + i;
}
} // namespace maglev
} // namespace internal
} // namespace v8
......
......@@ -7,6 +7,8 @@
#include "src/codegen/code-desc.h"
#include "src/codegen/register.h"
#include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/translation-array.h"
#include "src/execution/frame-constants.h"
#include "src/maglev/maglev-code-gen-state.h"
#include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-graph-labeller.h"
......@@ -15,10 +17,10 @@
#include "src/maglev/maglev-graph.h"
#include "src/maglev/maglev-ir.h"
#include "src/maglev/maglev-regalloc-data.h"
#include "src/objects/code-inl.h"
namespace v8 {
namespace internal {
namespace maglev {
#define __ masm()->
......@@ -84,9 +86,7 @@ class MaglevCodeGeneratingNodeProcessor {
}
}
void PostProcessGraph(MaglevCompilationUnit*, Graph* graph) {
code_gen_state_->EmitDeferredCode();
}
void PostProcessGraph(MaglevCompilationUnit*, Graph*) {}
void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {
if (FLAG_code_comments) {
......@@ -305,6 +305,16 @@ class MaglevCodeGeneratingNodeProcessor {
MaglevCodeGenState* code_gen_state_;
};
constexpr int DeoptStackSlotIndexFromFPOffset(int offset) {
return 1 - offset / kSystemPointerSize;
}
constexpr int DeoptStackSlotFromStackSlot(
const compiler::AllocatedOperand& operand) {
return DeoptStackSlotIndexFromFPOffset(
GetFramePointerOffsetForStackSlot(operand));
}
} // namespace
class MaglevCodeGeneratorImpl final {
......@@ -315,8 +325,12 @@ class MaglevCodeGeneratorImpl final {
}
private:
static constexpr int kFunctionLiteralIndex = 0;
static constexpr int kOptimizedOutConstantIndex = 1;
MaglevCodeGeneratorImpl(MaglevCompilationUnit* compilation_unit, Graph* graph)
: safepoint_table_builder_(compilation_unit->zone()),
translation_array_builder_(compilation_unit->zone()),
code_gen_state_(compilation_unit, safepoint_table_builder()),
processor_(compilation_unit, &code_gen_state_),
graph_(graph) {}
......@@ -328,7 +342,100 @@ class MaglevCodeGeneratorImpl final {
return BuildCodeObject();
}
void EmitCode() { processor_.ProcessGraph(graph_); }
void EmitCode() {
processor_.ProcessGraph(graph_);
EmitDeferredCode();
EmitDeopts();
}
void EmitDeferredCode() {
for (DeferredCodeInfo* deferred_code : code_gen_state_.deferred_code()) {
__ RecordComment("-- Deferred block");
__ bind(&deferred_code->deferred_code_label);
deferred_code->Generate(&code_gen_state_, &deferred_code->return_label);
__ Trap();
}
}
void EmitDeopts() {
deopt_exit_start_offset_ = __ pc_offset();
__ RecordComment("-- Non-lazy deopts");
for (DeoptimizationInfo* deopt_info : code_gen_state_.non_lazy_deopts()) {
EmitDeopt(deopt_info);
__ bind(&deopt_info->entry_label);
// TODO(leszeks): Add soft deopt entry.
__ CallForDeoptimization(Builtin::kDeoptimizationEntry_Eager, 0,
&deopt_info->entry_label, DeoptimizeKind::kEager,
nullptr, nullptr);
}
__ RecordComment("-- Lazy deopts");
for (DeoptimizationInfo* deopt_info : code_gen_state_.lazy_deopts()) {
EmitDeopt(deopt_info);
__ bind(&deopt_info->entry_label);
__ CallForDeoptimization(Builtin::kDeoptimizationEntry_Lazy, 0,
&deopt_info->entry_label, DeoptimizeKind::kLazy,
nullptr, nullptr);
}
}
void EmitDeopt(DeoptimizationInfo* deopt_info) {
int frame_count = 1;
int jsframe_count = 1;
int update_feedback_count = 0;
deopt_info->index = translation_array_builder_.BeginTranslation(
frame_count, jsframe_count, update_feedback_count);
// Returns are used for updating an accumulator or register after a lazy
// deopt.
int return_offset = 0;
int return_count = 0;
translation_array_builder_.BeginInterpretedFrame(
deopt_info->bytecode_position, kFunctionLiteralIndex,
code_gen_state_.register_count(), return_offset, return_count);
auto* liveness = code_gen_state_.bytecode_analysis().GetInLivenessFor(
deopt_info->bytecode_position.ToInt());
// Closure
int closure_index = DeoptStackSlotIndexFromFPOffset(
StandardFrameConstants::kFunctionOffset);
translation_array_builder_.StoreStackSlot(closure_index);
// Parameters
for (int i = 0; i < code_gen_state_.parameter_count(); ++i) {
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
translation_array_builder_.StoreStackSlot(DeoptStackSlotFromStackSlot(
deopt_info->checkpoint_state->get(reg)->spill_slot()));
}
// Context
int context_index =
DeoptStackSlotIndexFromFPOffset(StandardFrameConstants::kContextOffset);
translation_array_builder_.StoreStackSlot(context_index);
// Locals
for (int i = 0; i < code_gen_state_.register_count(); ++i) {
interpreter::Register reg(i);
if (liveness->RegisterIsLive(i)) {
translation_array_builder_.StoreStackSlot(DeoptStackSlotFromStackSlot(
deopt_info->checkpoint_state->get(reg)->spill_slot()));
} else {
translation_array_builder_.StoreLiteral(kOptimizedOutConstantIndex);
}
}
// Accumulator
if (liveness->AccumulatorIsLive()) {
translation_array_builder_.StoreStackSlot(
deopt_info->checkpoint_state->accumulator()->spill_slot().index());
} else {
translation_array_builder_.StoreLiteral(kOptimizedOutConstantIndex);
}
}
void EmitMetadata() {
// Final alignment before starting on the metadata section.
......@@ -345,9 +452,84 @@ class MaglevCodeGeneratorImpl final {
kNoHandlerTableOffset);
return Factory::CodeBuilder{isolate(), desc, CodeKind::MAGLEV}
.set_stack_slots(stack_slot_count_with_fixed_frame())
.set_deoptimization_data(GenerateDeoptimizationData())
.TryBuild();
}
Handle<DeoptimizationData> GenerateDeoptimizationData() {
int non_lazy_deopt_count =
static_cast<int>(code_gen_state_.non_lazy_deopts().size());
int lazy_deopt_count =
static_cast<int>(code_gen_state_.lazy_deopts().size());
int deopt_count = lazy_deopt_count + non_lazy_deopt_count;
if (deopt_count == 0) {
return DeoptimizationData::Empty(isolate());
}
Handle<DeoptimizationData> data =
DeoptimizationData::New(isolate(), deopt_count, AllocationType::kOld);
Handle<TranslationArray> translation_array =
translation_array_builder_.ToTranslationArray(isolate()->factory());
data->SetTranslationByteArray(*translation_array);
data->SetInlinedFunctionCount(Smi::zero());
// TODO(leszeks): Support optimization IDs
data->SetOptimizationId(Smi::zero());
DCHECK_NE(deopt_exit_start_offset_, -1);
data->SetDeoptExitStart(Smi::FromInt(deopt_exit_start_offset_));
data->SetNonLazyDeoptCount(Smi::FromInt(non_lazy_deopt_count));
data->SetLazyDeoptCount(Smi::FromInt(lazy_deopt_count));
data->SetSharedFunctionInfo(
*code_gen_state_.compilation_unit()->shared_function_info().object());
// TODO(leszeks): Proper literals array.
Handle<DeoptimizationLiteralArray> literals =
isolate()->factory()->NewDeoptimizationLiteralArray(2);
literals->set(
kFunctionLiteralIndex,
*code_gen_state_.compilation_unit()->shared_function_info().object());
literals->set(kOptimizedOutConstantIndex,
ReadOnlyRoots(isolate()).optimized_out());
data->SetLiteralArray(*literals);
// TODO(leszeks): Fix once we have inlining.
Handle<PodArray<InliningPosition>> inlining_positions =
PodArray<InliningPosition>::New(isolate(), 0);
data->SetInliningPositions(*inlining_positions);
// TODO(leszeks): Fix once we have OSR.
BytecodeOffset osr_offset = BytecodeOffset::None();
data->SetOsrBytecodeOffset(Smi::FromInt(osr_offset.ToInt()));
data->SetOsrPcOffset(Smi::FromInt(-1));
// Populate deoptimization entries.
int i = 0;
for (DeoptimizationInfo* deopt_info : code_gen_state_.non_lazy_deopts()) {
DCHECK_NE(deopt_info->index, -1);
data->SetBytecodeOffset(i, deopt_info->bytecode_position);
data->SetTranslationIndex(i, Smi::FromInt(deopt_info->index));
data->SetPc(i, Smi::FromInt(deopt_info->entry_label.pos()));
#ifdef DEBUG
data->SetNodeId(i, Smi::FromInt(i));
#endif // DEBUG
i++;
}
for (DeoptimizationInfo* deopt_info : code_gen_state_.lazy_deopts()) {
DCHECK_NE(deopt_info->index, -1);
data->SetBytecodeOffset(i, deopt_info->bytecode_position);
data->SetTranslationIndex(i, Smi::FromInt(deopt_info->index));
data->SetPc(i, Smi::FromInt(deopt_info->entry_label.pos()));
#ifdef DEBUG
data->SetNodeId(i, Smi::FromInt(i));
#endif // DEBUG
i++;
}
return data;
}
int stack_slot_count() const { return code_gen_state_.vreg_slots(); }
int stack_slot_count_with_fixed_frame() const {
return stack_slot_count() + StandardFrameConstants::kFixedSlotCount;
......@@ -360,11 +542,17 @@ class MaglevCodeGeneratorImpl final {
SafepointTableBuilder* safepoint_table_builder() {
return &safepoint_table_builder_;
}
TranslationArrayBuilder* translation_array_builder() {
return &translation_array_builder_;
}
SafepointTableBuilder safepoint_table_builder_;
TranslationArrayBuilder translation_array_builder_;
MaglevCodeGenState code_gen_state_;
GraphProcessor<MaglevCodeGeneratingNodeProcessor> processor_;
Graph* const graph_;
int deopt_exit_start_offset_ = -1;
};
// static
......
......@@ -15,8 +15,8 @@ namespace maglev {
MaglevCompilationUnit::MaglevCompilationUnit(MaglevCompilationInfo* info,
Handle<JSFunction> function)
: info_(info),
bytecode_(
MakeRef(broker(), function->shared().GetBytecodeArray(isolate()))),
shared_function_info_(MakeRef(broker(), function->shared())),
bytecode_(shared_function_info_.GetBytecodeArray()),
feedback_(MakeRef(broker(), function->feedback_vector())),
bytecode_analysis_(bytecode_.object(), zone(), BytecodeOffset::None(),
true),
......
......@@ -35,6 +35,9 @@ class MaglevCompilationUnit : public ZoneObject {
int parameter_count() const { return parameter_count_; }
bool has_graph_labeller() const;
MaglevGraphLabeller* graph_labeller() const;
const compiler::SharedFunctionInfoRef& shared_function_info() const {
return shared_function_info_;
}
const compiler::BytecodeArrayRef& bytecode() const { return bytecode_; }
const compiler::FeedbackVectorRef& feedback() const { return feedback_; }
const compiler::BytecodeAnalysis& bytecode_analysis() const {
......@@ -43,6 +46,7 @@ class MaglevCompilationUnit : public ZoneObject {
private:
MaglevCompilationInfo* const info_;
const compiler::SharedFunctionInfoRef shared_function_info_;
const compiler::BytecodeArrayRef bytecode_;
const compiler::FeedbackVectorRef feedback_;
const compiler::BytecodeAnalysis bytecode_analysis_;
......
......@@ -193,7 +193,7 @@ class MaglevGraphBuilder {
void AddCheckpoint() {
// TODO(v8:7700): Verify this calls the initializer list overload.
AddNewNode<Checkpoint>({}, iterator_.current_offset(),
AddNewNode<Checkpoint>({}, BytecodeOffset(iterator_.current_offset()),
GetInLiveness()->AccumulatorIsLive(),
GetAccumulator());
has_valid_checkpoint_ = true;
......
......@@ -325,7 +325,7 @@ class GraphProcessor {
void ClearDeadCheckpointNodes() {
const compiler::BytecodeLivenessState* liveness =
bytecode_analysis().GetInLivenessFor(
checkpoint_state_->latest_checkpoint->bytecode_position());
checkpoint_state_->latest_checkpoint->bytecode_position().ToInt());
for (int i = 0; i < register_count(); ++i) {
if (!liveness->RegisterIsLive(i)) {
checkpoint_state_->checkpoint_frame_state.set(interpreter::Register(i),
......
......@@ -12,9 +12,11 @@
#include "src/compiler/backend/instruction.h"
#include "src/ic/handler-configuration.h"
#include "src/maglev/maglev-code-gen-state.h"
#include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph-printer.h"
#include "src/maglev/maglev-graph-processor.h"
#include "src/maglev/maglev-interpreter-frame-state.h"
#include "src/maglev/maglev-vreg-allocator.h"
namespace v8 {
......@@ -147,6 +149,10 @@ struct CopyForDeferredHelper<MaglevCompilationUnit*>
template <>
struct CopyForDeferredHelper<Register>
: public CopyForDeferredByValue<Register> {};
// Bytecode offsets are copied by value.
template <>
struct CopyForDeferredHelper<BytecodeOffset>
: public CopyForDeferredByValue<BytecodeOffset> {};
// InterpreterFrameState is cloned.
template <>
......@@ -196,7 +202,7 @@ struct StripFirstTwoTupleArgs<std::tuple<T1, T2, T...>> {
};
template <typename Function>
class DeferredCodeInfoImpl final : public MaglevCodeGenState::DeferredCodeInfo {
class DeferredCodeInfoImpl final : public DeferredCodeInfo {
public:
using FunctionPointer =
typename FunctionArgumentsTupleHelper<Function>::FunctionPointer;
......@@ -252,64 +258,36 @@ void JumpToDeferredIf(Condition cond, MaglevCodeGenState* code_gen_state,
// Deopt
// ---
void EmitDeopt(MaglevCodeGenState* code_gen_state, Node* node,
int deopt_bytecode_position,
const InterpreterFrameState* checkpoint_state) {
DCHECK(node->properties().can_deopt());
// TODO(leszeks): Extract to separate call, or at the very least defer.
// TODO(leszeks): Stack check.
MaglevCompilationUnit* compilation_unit = code_gen_state->compilation_unit();
int maglev_frame_size = code_gen_state->vreg_slots();
ASM_CODE_COMMENT_STRING(code_gen_state->masm(), "Deoptimize");
__ RecordComment("Push registers and load accumulator");
int num_saved_slots = 0;
// TODO(verwaest): We probably shouldn't be spilling all values that go
// through deopt :)
for (int i = 0; i < compilation_unit->register_count(); ++i) {
ValueNode* node = checkpoint_state->get(interpreter::Register(i));
if (node == nullptr) continue;
__ Push(ToMemOperand(node->spill_slot()));
num_saved_slots++;
}
ValueNode* accumulator = checkpoint_state->accumulator();
if (accumulator) {
__ movq(kInterpreterAccumulatorRegister,
ToMemOperand(accumulator->spill_slot()));
}
DeoptimizationInfo* CreateEagerDeopt(
MaglevCodeGenState* code_gen_state, BytecodeOffset bytecode_position,
const InterpreterFrameState* checkpoint_state) {
Zone* zone = code_gen_state->compilation_unit()->zone();
DeoptimizationInfo* deopt_info = zone->New<DeoptimizationInfo>(
bytecode_position,
// TODO(leszeks): Right now we unconditionally copy the IFS. If we made
// checkpoint states already always be copies, we could remove this copy.
zone->New<InterpreterFrameState>(*code_gen_state->compilation_unit(),
*checkpoint_state));
__ RecordComment("Load registers from extra pushed slots");
int slot = 0;
for (int i = 0; i < compilation_unit->register_count(); ++i) {
ValueNode* node = checkpoint_state->get(interpreter::Register(i));
if (node == nullptr) continue;
__ movq(kScratchRegister, MemOperand(rsp, (num_saved_slots - slot++ - 1) *
kSystemPointerSize));
__ movq(MemOperand(rbp, InterpreterFrameConstants::kRegisterFileFromFp -
i * kSystemPointerSize),
kScratchRegister);
}
DCHECK_EQ(slot, num_saved_slots);
__ RecordComment("Materialize bytecode array and offset");
__ Move(MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp),
compilation_unit->bytecode().object());
__ Move(MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
Smi::FromInt(deopt_bytecode_position +
(BytecodeArray::kHeaderSize - kHeapObjectTag)));
code_gen_state->PushNonLazyDeopt(deopt_info);
return deopt_info;
}
// Reset rsp to bytecode sized frame.
__ addq(rsp, Immediate((maglev_frame_size + num_saved_slots -
(2 + compilation_unit->register_count())) *
kSystemPointerSize));
__ TailCallBuiltin(Builtin::kBaselineOrInterpreterEnterAtBytecode);
void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state,
BytecodeOffset bytecode_position,
const InterpreterFrameState* checkpoint_state) {
DeoptimizationInfo* deopt_info =
CreateEagerDeopt(code_gen_state, bytecode_position, checkpoint_state);
__ RecordComment("-- Jump to eager deopt");
__ j(cond, &deopt_info->entry_label);
}
void EmitDeopt(MaglevCodeGenState* code_gen_state, Node* node,
const ProcessingState& state) {
EmitDeopt(code_gen_state, node, state.checkpoint()->bytecode_position(),
state.checkpoint_frame_state());
void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state,
Node* node, const ProcessingState& state) {
DCHECK(node->properties().can_deopt());
EmitEagerDeoptIf(cond, code_gen_state,
state.checkpoint()->bytecode_position(),
state.checkpoint_frame_state());
}
// ---
......@@ -407,7 +385,8 @@ void SoftDeopt::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {}
void SoftDeopt::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
EmitDeopt(code_gen_state, this, state);
// TODO(leszeks): Make this a soft deopt.
EmitEagerDeoptIf(always, code_gen_state, this, state);
}
void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
......@@ -520,17 +499,18 @@ void CheckMaps::GenerateCode(MaglevCodeGenState* code_gen_state,
JumpToDeferredIf(
not_equal, code_gen_state,
[](MaglevCodeGenState* code_gen_state, Label* return_label,
Register object, CheckMaps* node, int checkpoint_position,
Register object, CheckMaps* node, BytecodeOffset checkpoint_position,
const InterpreterFrameState* checkpoint_state_snapshot,
Register map_tmp) {
Label deopt;
DeoptimizationInfo* deopt = CreateEagerDeopt(
code_gen_state, checkpoint_position, checkpoint_state_snapshot);
// If the map is not deprecated, deopt straight away.
__ movl(kScratchRegister,
FieldOperand(map_tmp, Map::kBitField3Offset));
__ testl(kScratchRegister,
Immediate(Map::Bits3::IsDeprecatedBit::kMask));
__ j(zero, &deopt);
__ j(zero, &deopt->entry_label);
// Otherwise, try migrating the object. If the migration returns Smi
// zero, then it failed and we should deopt.
......@@ -540,25 +520,19 @@ void CheckMaps::GenerateCode(MaglevCodeGenState* code_gen_state,
// TODO(verwaest): We're calling so we need to spill around it.
__ CallRuntime(Runtime::kTryMigrateInstance);
__ cmpl(kReturnRegister0, Immediate(0));
__ j(equal, &deopt);
__ j(equal, &deopt->entry_label);
// The migrated object is returned on success, retry the map check.
__ Move(object, kReturnRegister0);
__ LoadMap(map_tmp, object);
__ Cmp(map_tmp, node->map().object());
__ j(equal, return_label);
__ bind(&deopt);
EmitDeopt(code_gen_state, node, checkpoint_position,
checkpoint_state_snapshot);
__ jmp(&deopt->entry_label);
},
object, this, state.checkpoint()->bytecode_position(),
state.checkpoint_frame_state(), map_tmp);
} else {
Label is_ok;
__ j(equal, &is_ok);
EmitDeopt(code_gen_state, this, state);
__ bind(&is_ok);
EmitEagerDeoptIf(not_equal, code_gen_state, this, state);
}
}
void CheckMaps::PrintParams(std::ostream& os,
......
......@@ -869,13 +869,13 @@ class Checkpoint : public FixedInputNodeT<0, Checkpoint> {
using Base = FixedInputNodeT<0, Checkpoint>;
public:
explicit Checkpoint(size_t input_count, int bytecode_position,
explicit Checkpoint(size_t input_count, BytecodeOffset bytecode_position,
bool accumulator_is_live, ValueNode* accumulator)
: Base(input_count),
bytecode_position_(bytecode_position),
accumulator_(accumulator_is_live ? accumulator : nullptr) {}
int bytecode_position() const { return bytecode_position_; }
BytecodeOffset bytecode_position() const { return bytecode_position_; }
bool is_used() const { return IsUsedBit::decode(bit_field_); }
void SetUsed() { bit_field_ = IsUsedBit::update(bit_field_, true); }
ValueNode* accumulator() const { return accumulator_; }
......@@ -887,7 +887,7 @@ class Checkpoint : public FixedInputNodeT<0, Checkpoint> {
private:
using IsUsedBit = NextBitField<bool, 1>;
const int bytecode_position_;
const BytecodeOffset bytecode_position_;
ValueNode* const accumulator_;
};
......
......@@ -178,7 +178,6 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
DCHECK_EQ(0, args.length());
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
DCHECK(CodeKindCanDeoptimize(deoptimizer->compiled_code()->kind()));
DCHECK(deoptimizer->compiled_code()->is_turbofanned());
DCHECK(AllowGarbageCollection::IsAllowed());
DCHECK(isolate->context().is_null());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment