Commit 0df9606d authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[maglev] Add lazy deopts

Nodes can now hold a LazyDeoptSafepoint which stores the frame state in
case they trigger a lazy deopt. OpProperties have a new CanLazyDeopt
bit, and codegen emits a safepoint table entry + lazy deopt for all
nodes with this bit. Also, we now check the deoptimized code bit on
entry into the maglev compiled function.

An example use of these lazy deopts is added as a PropertyCell fast path
for LdaGlobal, which adds a code dependency on the property cell.

Bug: v8:7700
Change-Id: I663db38dfa7325d38fc6d5f079d263a958074e36
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3557251Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarJakob Linke <jgruber@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79688}
parent d368dcf4
...@@ -3099,6 +3099,23 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) { ...@@ -3099,6 +3099,23 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
leaq(dst, Operand(&current, -pc)); leaq(dst, Operand(&current, -pc));
} }
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void TurboAssembler::BailoutIfDeoptimized(Register scratch) {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
LoadTaggedPointerField(scratch,
Operand(kJavaScriptCallCodeStartRegister, offset));
testl(FieldOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, not_zero);
}
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret, DeoptimizeKind kind, Label* ret,
Label*) { Label*) {
......
...@@ -412,6 +412,7 @@ class V8_EXPORT_PRIVATE TurboAssembler ...@@ -412,6 +412,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode, void Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode,
Condition cc = always); Condition cc = always);
void BailoutIfDeoptimized(Register scratch);
void CallForDeoptimization(Builtin target, int deopt_id, Label* exit, void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret, DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label); Label* jump_deoptimization_entry_label);
......
...@@ -1159,22 +1159,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() { ...@@ -1159,22 +1159,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
__ Assert(equal, AbortReason::kWrongFunctionCodeStart); __ Assert(equal, AbortReason::kWrongFunctionCodeStart);
} }
// Check if the code object is marked for deoptimization. If it is, then it void CodeGenerator::BailoutIfDeoptimized() { __ BailoutIfDeoptimized(rbx); }
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ LoadTaggedPointerField(rbx,
Operand(kJavaScriptCallCodeStartRegister, offset));
__ testl(FieldOperand(rbx, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, not_zero);
}
bool ShouldClearOutputRegisterBeforeInstruction(CodeGenerator* g, bool ShouldClearOutputRegisterBeforeInstruction(CodeGenerator* g,
Instruction* instr) { Instruction* instr) {
......
...@@ -124,6 +124,8 @@ std::string Register::ToString() const { ...@@ -124,6 +124,8 @@ std::string Register::ToString() const {
return std::string("<context>"); return std::string("<context>");
} else if (is_function_closure()) { } else if (is_function_closure()) {
return std::string("<closure>"); return std::string("<closure>");
} else if (*this == virtual_accumulator()) {
return std::string("<accumulator>");
} else if (is_parameter()) { } else if (is_parameter()) {
int parameter_index = ToParameterIndex(); int parameter_index = ToParameterIndex();
if (parameter_index == 0) { if (parameter_index == 0) {
......
...@@ -46,11 +46,13 @@ class MaglevCodeGenState { ...@@ -46,11 +46,13 @@ class MaglevCodeGenState {
return deferred_code_; return deferred_code_;
} }
void PushNonLazyDeopt(Checkpoint* info) { non_lazy_deopts_.push_back(info); } void PushNonLazyDeopt(Checkpoint* info) { non_lazy_deopts_.push_back(info); }
void PushLazyDeopt(Checkpoint* info) { non_lazy_deopts_.push_back(info); } void PushLazyDeopt(LazyDeoptSafepoint* info) { lazy_deopts_.push_back(info); }
const std::vector<Checkpoint*> non_lazy_deopts() const { const std::vector<Checkpoint*>& non_lazy_deopts() const {
return non_lazy_deopts_; return non_lazy_deopts_;
} }
const std::vector<Checkpoint*> lazy_deopts() const { return lazy_deopts_; } const std::vector<LazyDeoptSafepoint*>& lazy_deopts() const {
return lazy_deopts_;
}
compiler::NativeContextRef native_context() const { compiler::NativeContextRef native_context() const {
return broker()->target_native_context(); return broker()->target_native_context();
...@@ -90,7 +92,7 @@ class MaglevCodeGenState { ...@@ -90,7 +92,7 @@ class MaglevCodeGenState {
MacroAssembler masm_; MacroAssembler masm_;
std::vector<DeferredCodeInfo*> deferred_code_; std::vector<DeferredCodeInfo*> deferred_code_;
std::vector<Checkpoint*> non_lazy_deopts_; std::vector<Checkpoint*> non_lazy_deopts_;
std::vector<Checkpoint*> lazy_deopts_; std::vector<LazyDeoptSafepoint*> lazy_deopts_;
int vreg_slots_ = 0; int vreg_slots_ = 0;
// Allow marking some codegen paths as unsupported, so that we can test maglev // Allow marking some codegen paths as unsupported, so that we can test maglev
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include "src/codegen/safepoint-table.h" #include "src/codegen/safepoint-table.h"
#include "src/deoptimizer/translation-array.h" #include "src/deoptimizer/translation-array.h"
#include "src/execution/frame-constants.h" #include "src/execution/frame-constants.h"
#include "src/interpreter/bytecode-register.h"
#include "src/maglev/maglev-code-gen-state.h" #include "src/maglev/maglev-code-gen-state.h"
#include "src/maglev/maglev-compilation-unit.h" #include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-graph-labeller.h" #include "src/maglev/maglev-graph-labeller.h"
...@@ -51,6 +52,8 @@ class MaglevCodeGeneratingNodeProcessor { ...@@ -51,6 +52,8 @@ class MaglevCodeGeneratingNodeProcessor {
__ int3(); __ int3();
} }
__ BailoutIfDeoptimized(rbx);
__ EnterFrame(StackFrame::BASELINE); __ EnterFrame(StackFrame::BASELINE);
// Save arguments in frame. // Save arguments in frame.
...@@ -359,7 +362,7 @@ class MaglevCodeGeneratorImpl final { ...@@ -359,7 +362,7 @@ class MaglevCodeGeneratorImpl final {
__ RecordComment("-- Non-lazy deopts"); __ RecordComment("-- Non-lazy deopts");
for (Checkpoint* checkpoint : code_gen_state_.non_lazy_deopts()) { for (Checkpoint* checkpoint : code_gen_state_.non_lazy_deopts()) {
EmitDeopt(checkpoint); EmitEagerDeopt(checkpoint);
__ bind(&checkpoint->deopt_entry_label); __ bind(&checkpoint->deopt_entry_label);
// TODO(leszeks): Add soft deopt entry. // TODO(leszeks): Add soft deopt entry.
...@@ -369,17 +372,24 @@ class MaglevCodeGeneratorImpl final { ...@@ -369,17 +372,24 @@ class MaglevCodeGeneratorImpl final {
} }
__ RecordComment("-- Lazy deopts"); __ RecordComment("-- Lazy deopts");
for (Checkpoint* deopt_info : code_gen_state_.lazy_deopts()) { int last_updated_safepoint = 0;
EmitDeopt(deopt_info); for (LazyDeoptSafepoint* deopt_info : code_gen_state_.lazy_deopts()) {
EmitLazyDeopt(deopt_info);
__ bind(&deopt_info->deopt_entry_label); __ bind(&deopt_info->deopt_entry_label);
__ CallForDeoptimization(Builtin::kDeoptimizationEntry_Lazy, 0, __ CallForDeoptimization(Builtin::kDeoptimizationEntry_Lazy, 0,
&deopt_info->deopt_entry_label, &deopt_info->deopt_entry_label,
DeoptimizeKind::kLazy, nullptr, nullptr); DeoptimizeKind::kLazy, nullptr, nullptr);
last_updated_safepoint =
safepoint_table_builder_.UpdateDeoptimizationInfo(
deopt_info->deopting_call_return_pc,
deopt_info->deopt_entry_label.pos(), last_updated_safepoint,
deopt_info->deopt_index);
} }
} }
void EmitDeopt(Checkpoint* checkpoint) { void EmitEagerDeopt(Checkpoint* checkpoint) {
int frame_count = 1; int frame_count = 1;
int jsframe_count = 1; int jsframe_count = 1;
int update_feedback_count = 0; int update_feedback_count = 0;
...@@ -388,12 +398,59 @@ class MaglevCodeGeneratorImpl final { ...@@ -388,12 +398,59 @@ class MaglevCodeGeneratorImpl final {
// Returns are used for updating an accumulator or register after a lazy // Returns are used for updating an accumulator or register after a lazy
// deopt. // deopt.
int return_offset = 0; const int return_offset = 0;
int return_count = 0; const int return_count = 0;
translation_array_builder_.BeginInterpretedFrame( translation_array_builder_.BeginInterpretedFrame(
checkpoint->bytecode_position, kFunctionLiteralIndex, checkpoint->bytecode_position, kFunctionLiteralIndex,
code_gen_state_.register_count(), return_offset, return_count); code_gen_state_.register_count(), return_offset, return_count);
EmitDeoptFrameValues(*code_gen_state_.compilation_unit(), checkpoint->state,
interpreter::Register::invalid_value());
}
void EmitLazyDeopt(LazyDeoptSafepoint* safepoint) {
int frame_count = 1;
int jsframe_count = 1;
int update_feedback_count = 0;
safepoint->deopt_index = translation_array_builder_.BeginTranslation(
frame_count, jsframe_count, update_feedback_count);
// Return offsets are counted from the end of the translation frame, which
// is the array [parameters..., locals..., accumulator].
int return_offset;
if (safepoint->result_location ==
interpreter::Register::virtual_accumulator()) {
return_offset = 0;
} else if (safepoint->result_location.is_parameter()) {
// This is slightly tricky to reason about because of zero indexing and
// fence post errors. As an example, consider a frame with 2 locals and
// 2 parameters, where we want argument index 1 -- looking at the array
// in reverse order we have:
// [acc, r1, r0, a1, a0]
// ^
// and this calculation gives, correctly:
// 2 + 2 - 1 = 3
return_offset = code_gen_state_.register_count() +
code_gen_state_.parameter_count() -
safepoint->result_location.ToParameterIndex();
} else {
return_offset =
code_gen_state_.register_count() - safepoint->result_location.index();
}
// TODO(leszeks): Support lazy deopts with multiple return values.
int return_count = 1;
translation_array_builder_.BeginInterpretedFrame(
safepoint->bytecode_position, kFunctionLiteralIndex,
code_gen_state_.register_count(), return_offset, return_count);
EmitDeoptFrameValues(*code_gen_state_.compilation_unit(), safepoint->state,
safepoint->result_location);
}
void EmitDeoptFrameValues(
const MaglevCompilationUnit& compilation_unit,
const CompactInterpreterFrameState* checkpoint_state,
interpreter::Register result_location) {
// Closure // Closure
int closure_index = DeoptStackSlotIndexFromFPOffset( int closure_index = DeoptStackSlotIndexFromFPOffset(
StandardFrameConstants::kFunctionOffset); StandardFrameConstants::kFunctionOffset);
...@@ -402,12 +459,16 @@ class MaglevCodeGeneratorImpl final { ...@@ -402,12 +459,16 @@ class MaglevCodeGeneratorImpl final {
// Parameters // Parameters
{ {
int i = 0; int i = 0;
checkpoint->state->ForEachParameter( checkpoint_state->ForEachParameter(
*code_gen_state_.compilation_unit(), compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
[&](ValueNode* value, interpreter::Register reg) {
DCHECK_EQ(reg.ToParameterIndex(), i); DCHECK_EQ(reg.ToParameterIndex(), i);
translation_array_builder_.StoreStackSlot( if (reg != result_location) {
DeoptStackSlotFromStackSlot(value->spill_slot())); translation_array_builder_.StoreStackSlot(
DeoptStackSlotFromStackSlot(value->spill_slot()));
} else {
translation_array_builder_.StoreLiteral(
kOptimizedOutConstantIndex);
}
i++; i++;
}); });
} }
...@@ -420,10 +481,10 @@ class MaglevCodeGeneratorImpl final { ...@@ -420,10 +481,10 @@ class MaglevCodeGeneratorImpl final {
// Locals // Locals
{ {
int i = 0; int i = 0;
checkpoint->state->ForEachLocal( checkpoint_state->ForEachLocal(
*code_gen_state_.compilation_unit(), compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
[&](ValueNode* value, interpreter::Register reg) {
DCHECK_LE(i, reg.index()); DCHECK_LE(i, reg.index());
if (reg == result_location) return;
while (i < reg.index()) { while (i < reg.index()) {
translation_array_builder_.StoreLiteral( translation_array_builder_.StoreLiteral(
kOptimizedOutConstantIndex); kOptimizedOutConstantIndex);
...@@ -442,16 +503,13 @@ class MaglevCodeGeneratorImpl final { ...@@ -442,16 +503,13 @@ class MaglevCodeGeneratorImpl final {
// Accumulator // Accumulator
{ {
// TODO(leszeks): Bit ugly to use a did_emit boolean here rather than if (checkpoint_state->liveness()->AccumulatorIsLive() &&
// explicitly checking for accumulator liveness. result_location != interpreter::Register::virtual_accumulator()) {
bool did_emit = false; ValueNode* value = checkpoint_state->accumulator(compilation_unit);
checkpoint->state->ForAccumulator( translation_array_builder_.StoreStackSlot(
*code_gen_state_.compilation_unit(), [&](ValueNode* value) { DeoptStackSlotFromStackSlot(value->spill_slot()));
translation_array_builder_.StoreStackSlot(
DeoptStackSlotFromStackSlot(value->spill_slot())); } else {
did_emit = true;
});
if (!did_emit) {
translation_array_builder_.StoreLiteral(kOptimizedOutConstantIndex); translation_array_builder_.StoreLiteral(kOptimizedOutConstantIndex);
} }
} }
......
...@@ -66,7 +66,7 @@ class UseMarkingProcessor { ...@@ -66,7 +66,7 @@ class UseMarkingProcessor {
template <typename NodeT> template <typename NodeT>
void Process(NodeT* node, const ProcessingState& state) { void Process(NodeT* node, const ProcessingState& state) {
if constexpr (NodeT::kProperties.can_deopt()) { if constexpr (NodeT::kProperties.can_eager_deopt()) {
MarkCheckpointNodes(node, node->eager_deopt_info(), state); MarkCheckpointNodes(node, node->eager_deopt_info(), state);
} }
for (Input& input : *node) { for (Input& input : *node) {
...@@ -195,6 +195,7 @@ MaybeHandle<CodeT> MaglevCompiler::GenerateCode( ...@@ -195,6 +195,7 @@ MaybeHandle<CodeT> MaglevCompiler::GenerateCode(
} }
Isolate* const isolate = toplevel_compilation_unit->isolate(); Isolate* const isolate = toplevel_compilation_unit->isolate();
isolate->native_context()->AddOptimizedCode(ToCodeT(*code));
return ToCodeT(code, isolate); return ToCodeT(code, isolate);
} }
......
This diff is collapsed.
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "src/compiler/bytecode-liveness-map.h" #include "src/compiler/bytecode-liveness-map.h"
#include "src/compiler/heap-refs.h" #include "src/compiler/heap-refs.h"
#include "src/compiler/js-heap-broker.h" #include "src/compiler/js-heap-broker.h"
#include "src/interpreter/bytecode-register.h"
#include "src/maglev/maglev-compilation-info.h" #include "src/maglev/maglev-compilation-info.h"
#include "src/maglev/maglev-graph-labeller.h" #include "src/maglev/maglev-graph-labeller.h"
#include "src/maglev/maglev-graph.h" #include "src/maglev/maglev-graph.h"
...@@ -133,10 +134,6 @@ class MaglevGraphBuilder { ...@@ -133,10 +134,6 @@ class MaglevGraphBuilder {
return node; return node;
} }
template <Operation kOperation, typename... Args>
ValueNode* AddNewOperationNode(std::initializer_list<ValueNode*> inputs,
Args&&... args);
template <typename NodeT, typename... Args> template <typename NodeT, typename... Args>
NodeT* AddNewNode(size_t input_count, Args&&... args) { NodeT* AddNewNode(size_t input_count, Args&&... args) {
return AddNode( return AddNode(
...@@ -150,7 +147,7 @@ class MaglevGraphBuilder { ...@@ -150,7 +147,7 @@ class MaglevGraphBuilder {
template <typename NodeT, typename... Args> template <typename NodeT, typename... Args>
NodeT* CreateNewNode(Args&&... args) { NodeT* CreateNewNode(Args&&... args) {
if constexpr (NodeT::kProperties.can_deopt()) { if constexpr (NodeT::kProperties.can_eager_deopt()) {
return Node::New<NodeT>(zone(), *compilation_unit_, GetCheckpoint(), return Node::New<NodeT>(zone(), *compilation_unit_, GetCheckpoint(),
std::forward<Args>(args)...); std::forward<Args>(args)...);
} else { } else {
...@@ -175,10 +172,40 @@ class MaglevGraphBuilder { ...@@ -175,10 +172,40 @@ class MaglevGraphBuilder {
operand_index, isolate()))); operand_index, isolate())));
} }
void SetAccumulator(ValueNode* node) { // For cases where we're setting the accumulator to a previously created node
// (e.g. moving an interpreter register to the accumulator).
// TODO(leszeks): Somehow DCHECK that this isn't a new node.
void SetAccumulatorToExistingNode(ValueNode* node) {
current_interpreter_frame_.set_accumulator(node); current_interpreter_frame_.set_accumulator(node);
} }
template <typename NodeT>
void SetAccumulatorToNewNode(NodeT* node) {
DCHECK_EQ(NodeT::kProperties.can_lazy_deopt(),
node->properties().can_lazy_deopt());
if constexpr (NodeT::kProperties.can_lazy_deopt()) {
node->AttachLazyDeopt(
GetLazyDeopt(interpreter::Register::virtual_accumulator()));
}
SetAccumulatorToExistingNode(node);
}
template <typename NodeT, typename... Args>
void SetAccumulatorToNewNode(std::initializer_list<ValueNode*> inputs,
Args&&... args) {
NodeT* node = AddNewNode<NodeT>(inputs, args...);
SetAccumulatorToNewNode(node);
}
void SetAccumulatorToConstant(const compiler::ObjectRef& ref) {
if (ref.IsSmi()) {
return SetAccumulatorToNewNode<SmiConstant>({},
Smi::FromInt(ref.AsSmi()));
}
// TODO(leszeks): Detect roots and use RootConstant.
SetAccumulatorToNewNode<Constant>({}, ref.AsHeapObject());
}
ValueNode* GetAccumulator() const { ValueNode* GetAccumulator() const {
return current_interpreter_frame_.accumulator(); return current_interpreter_frame_.accumulator();
} }
...@@ -212,6 +239,14 @@ class MaglevGraphBuilder { ...@@ -212,6 +239,14 @@ class MaglevGraphBuilder {
return latest_checkpoint_; return latest_checkpoint_;
} }
LazyDeoptSafepoint* GetLazyDeopt(interpreter::Register result_location) {
return zone()->New<LazyDeoptSafepoint>(
BytecodeOffset(iterator_.current_offset()),
zone()->New<CompactInterpreterFrameState>(
*compilation_unit_, GetOutLiveness(), current_interpreter_frame_),
result_location);
}
void MarkPossibleSideEffect() { void MarkPossibleSideEffect() {
// If there was a potential side effect, invalidate the previous checkpoint. // If there was a potential side effect, invalidate the previous checkpoint.
latest_checkpoint_ = nullptr; latest_checkpoint_ = nullptr;
...@@ -289,6 +324,8 @@ class MaglevGraphBuilder { ...@@ -289,6 +324,8 @@ class MaglevGraphBuilder {
void BuildCallFromRegisters(int argc_count, void BuildCallFromRegisters(int argc_count,
ConvertReceiverMode receiver_mode); ConvertReceiverMode receiver_mode);
void BuildPropertyCellAccess(const compiler::PropertyCellRef& property_cell);
template <Operation kOperation> template <Operation kOperation>
void BuildGenericUnaryOperationNode(); void BuildGenericUnaryOperationNode();
template <Operation kOperation> template <Operation kOperation>
......
This diff is collapsed.
...@@ -40,8 +40,8 @@ class MaglevPrintingVisitor { ...@@ -40,8 +40,8 @@ class MaglevPrintingVisitor {
private: private:
std::ostream& os_; std::ostream& os_;
std::unique_ptr<std::ostream> os_for_additional_info_; std::unique_ptr<std::ostream> os_for_additional_info_;
std::set<BasicBlock*> loop_headers; std::set<BasicBlock*> loop_headers_;
std::vector<BasicBlock*> targets; std::vector<BasicBlock*> targets_;
}; };
void PrintGraph(std::ostream& os, MaglevCompilationUnit* compilation_unit, void PrintGraph(std::ostream& os, MaglevCompilationUnit* compilation_unit,
......
...@@ -132,20 +132,6 @@ class CompactInterpreterFrameState { ...@@ -132,20 +132,6 @@ class CompactInterpreterFrameState {
} }
} }
template <typename Function>
void ForAccumulator(const MaglevCompilationUnit& info, Function&& f) {
if (liveness_->AccumulatorIsLive()) {
f(live_registers_and_accumulator_[SizeFor(info, liveness_) - 1]);
}
}
template <typename Function>
void ForAccumulator(const MaglevCompilationUnit& info, Function&& f) const {
if (liveness_->AccumulatorIsLive()) {
f(live_registers_and_accumulator_[SizeFor(info, liveness_) - 1]);
}
}
template <typename Function> template <typename Function>
void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) { void ForEachRegister(const MaglevCompilationUnit& info, Function&& f) {
ForEachParameter(info, f); ForEachParameter(info, f);
...@@ -161,21 +147,28 @@ class CompactInterpreterFrameState { ...@@ -161,21 +147,28 @@ class CompactInterpreterFrameState {
template <typename Function> template <typename Function>
void ForEachValue(const MaglevCompilationUnit& info, Function&& f) { void ForEachValue(const MaglevCompilationUnit& info, Function&& f) {
ForEachRegister(info, f); ForEachRegister(info, f);
ForAccumulator(info, [&](ValueNode*& value) { if (liveness_->AccumulatorIsLive()) {
f(value, interpreter::Register::virtual_accumulator()); f(accumulator(info), interpreter::Register::virtual_accumulator());
}); }
} }
template <typename Function> template <typename Function>
void ForEachValue(const MaglevCompilationUnit& info, Function&& f) const { void ForEachValue(const MaglevCompilationUnit& info, Function&& f) const {
ForEachRegister(info, f); ForEachRegister(info, f);
ForAccumulator(info, [&](ValueNode* value) { if (liveness_->AccumulatorIsLive()) {
f(value, interpreter::Register::virtual_accumulator()); f(accumulator(info), interpreter::Register::virtual_accumulator());
}); }
} }
const compiler::BytecodeLivenessState* liveness() const { return liveness_; } const compiler::BytecodeLivenessState* liveness() const { return liveness_; }
ValueNode*& accumulator(const MaglevCompilationUnit& info) {
return live_registers_and_accumulator_[size(info) - 1];
}
ValueNode* accumulator(const MaglevCompilationUnit& info) const {
return live_registers_and_accumulator_[size(info) - 1];
}
size_t size(const MaglevCompilationUnit& info) const { size_t size(const MaglevCompilationUnit& info) const {
return SizeFor(info, liveness_); return SizeFor(info, liveness_);
} }
......
...@@ -279,7 +279,7 @@ void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state, ...@@ -279,7 +279,7 @@ void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state,
template <typename NodeT> template <typename NodeT>
void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state, void EmitEagerDeoptIf(Condition cond, MaglevCodeGenState* code_gen_state,
NodeT* node) { NodeT* node) {
STATIC_ASSERT(NodeT::kProperties.can_deopt()); STATIC_ASSERT(NodeT::kProperties.can_eager_deopt());
EmitEagerDeoptIf(cond, code_gen_state, node->checkpoint()); EmitEagerDeoptIf(cond, code_gen_state, node->checkpoint());
} }
...@@ -386,7 +386,7 @@ void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state, ...@@ -386,7 +386,7 @@ void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
} }
void Constant::GenerateCode(MaglevCodeGenState* code_gen_state, void Constant::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) { const ProcessingState& state) {
UNREACHABLE(); __ Move(ToRegister(result()), object_.object());
} }
void Constant::PrintParams(std::ostream& os, void Constant::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const { MaglevGraphLabeller* graph_labeller) const {
...@@ -767,6 +767,16 @@ void Call::GenerateCode(MaglevCodeGenState* code_gen_state, ...@@ -767,6 +767,16 @@ void Call::GenerateCode(MaglevCodeGenState* code_gen_state,
__ CallBuiltin(Builtin::kCall_ReceiverIsAny); __ CallBuiltin(Builtin::kCall_ReceiverIsAny);
break; break;
} }
lazy_deopt()->deopting_call_return_pc = __ pc_offset_for_safepoint();
code_gen_state->PushLazyDeopt(lazy_deopt());
SafepointTableBuilder::Safepoint safepoint =
code_gen_state->safepoint_table_builder()->DefineSafepoint(
code_gen_state->masm());
for (int i = 0; i < code_gen_state->vreg_slots(); i++) {
safepoint.DefineTaggedStackSlot(GetSafepointIndexForStackSlot(i));
}
} }
// --- // ---
......
...@@ -170,7 +170,12 @@ static constexpr uint32_t kInvalidNodeId = 0; ...@@ -170,7 +170,12 @@ static constexpr uint32_t kInvalidNodeId = 0;
class OpProperties { class OpProperties {
public: public:
constexpr bool is_call() const { return kIsCallBit::decode(bitfield_); } constexpr bool is_call() const { return kIsCallBit::decode(bitfield_); }
constexpr bool can_deopt() const { return kCanDeoptBit::decode(bitfield_); } constexpr bool can_eager_deopt() const {
return kCanEagerDeoptBit::decode(bitfield_);
}
constexpr bool can_lazy_deopt() const {
return kCanLazyDeoptBit::decode(bitfield_);
}
constexpr bool can_read() const { return kCanReadBit::decode(bitfield_); } constexpr bool can_read() const { return kCanReadBit::decode(bitfield_); }
constexpr bool can_write() const { return kCanWriteBit::decode(bitfield_); } constexpr bool can_write() const { return kCanWriteBit::decode(bitfield_); }
constexpr bool non_memory_side_effects() const { constexpr bool non_memory_side_effects() const {
...@@ -192,12 +197,11 @@ class OpProperties { ...@@ -192,12 +197,11 @@ class OpProperties {
static constexpr OpProperties Call() { static constexpr OpProperties Call() {
return OpProperties(kIsCallBit::encode(true)); return OpProperties(kIsCallBit::encode(true));
} }
static constexpr OpProperties JSCall() { static constexpr OpProperties EagerDeopt() {
return OpProperties(kIsCallBit::encode(true) | return OpProperties(kCanEagerDeoptBit::encode(true));
kNonMemorySideEffectsBit::encode(true));
} }
static constexpr OpProperties Deopt() { static constexpr OpProperties LazyDeopt() {
return OpProperties(kCanDeoptBit::encode(true)); return OpProperties(kCanLazyDeoptBit::encode(true));
} }
static constexpr OpProperties Reading() { static constexpr OpProperties Reading() {
return OpProperties(kCanReadBit::encode(true)); return OpProperties(kCanReadBit::encode(true));
...@@ -208,6 +212,9 @@ class OpProperties { ...@@ -208,6 +212,9 @@ class OpProperties {
static constexpr OpProperties NonMemorySideEffects() { static constexpr OpProperties NonMemorySideEffects() {
return OpProperties(kNonMemorySideEffectsBit::encode(true)); return OpProperties(kNonMemorySideEffectsBit::encode(true));
} }
static constexpr OpProperties JSCall() {
return Call() | NonMemorySideEffects() | LazyDeopt();
}
static constexpr OpProperties AnySideEffects() { static constexpr OpProperties AnySideEffects() {
return Reading() | Writing() | NonMemorySideEffects(); return Reading() | Writing() | NonMemorySideEffects();
} }
...@@ -217,8 +224,9 @@ class OpProperties { ...@@ -217,8 +224,9 @@ class OpProperties {
private: private:
using kIsCallBit = base::BitField<bool, 0, 1>; using kIsCallBit = base::BitField<bool, 0, 1>;
using kCanDeoptBit = kIsCallBit::Next<bool, 1>; using kCanEagerDeoptBit = kIsCallBit::Next<bool, 1>;
using kCanReadBit = kCanDeoptBit::Next<bool, 1>; using kCanLazyDeoptBit = kCanEagerDeoptBit::Next<bool, 1>;
using kCanReadBit = kCanLazyDeoptBit::Next<bool, 1>;
using kCanWriteBit = kCanReadBit::Next<bool, 1>; using kCanWriteBit = kCanReadBit::Next<bool, 1>;
using kNonMemorySideEffectsBit = kCanWriteBit::Next<bool, 1>; using kNonMemorySideEffectsBit = kCanWriteBit::Next<bool, 1>;
...@@ -293,14 +301,14 @@ class Input : public InputLocation { ...@@ -293,14 +301,14 @@ class Input : public InputLocation {
ValueNode* node_; ValueNode* node_;
}; };
class Checkpoint { class Checkpoint : public ZoneObject {
public: public:
Checkpoint(BytecodeOffset bytecode_position, Checkpoint(BytecodeOffset bytecode_position,
const CompactInterpreterFrameState* state) const CompactInterpreterFrameState* state)
: bytecode_position(bytecode_position), state(state) {} : bytecode_position(bytecode_position), state(state) {}
BytecodeOffset bytecode_position; const BytecodeOffset bytecode_position;
const CompactInterpreterFrameState* state; const CompactInterpreterFrameState* const state;
Label deopt_entry_label; Label deopt_entry_label;
int deopt_index = -1; int deopt_index = -1;
}; };
...@@ -314,6 +322,18 @@ class EagerDeoptInfo { ...@@ -314,6 +322,18 @@ class EagerDeoptInfo {
InputLocation* const input_locations; InputLocation* const input_locations;
}; };
class LazyDeoptSafepoint : public Checkpoint {
public:
LazyDeoptSafepoint(BytecodeOffset bytecode_position,
const CompactInterpreterFrameState* state,
interpreter::Register result_location)
: Checkpoint(bytecode_position, state),
result_location(result_location) {}
int deopting_call_return_pc = -1;
const interpreter::Register result_location;
};
// Dummy type for the initial raw allocation. // Dummy type for the initial raw allocation.
struct NodeWithInlineInputs {}; struct NodeWithInlineInputs {};
...@@ -467,7 +487,7 @@ class NodeBase : public ZoneObject { ...@@ -467,7 +487,7 @@ class NodeBase : public ZoneObject {
void Print(std::ostream& os, MaglevGraphLabeller*) const; void Print(std::ostream& os, MaglevGraphLabeller*) const;
const EagerDeoptInfo* eager_deopt_info() const { const EagerDeoptInfo* eager_deopt_info() const {
DCHECK(properties().can_deopt()); DCHECK(properties().can_eager_deopt());
return (reinterpret_cast<const EagerDeoptInfo*>( return (reinterpret_cast<const EagerDeoptInfo*>(
input_address(input_count() - 1)) - input_address(input_count() - 1)) -
1); 1);
...@@ -501,7 +521,7 @@ class NodeBase : public ZoneObject { ...@@ -501,7 +521,7 @@ class NodeBase : public ZoneObject {
} }
EagerDeoptInfo* eager_deopt_info_address() { EagerDeoptInfo* eager_deopt_info_address() {
DCHECK(properties().can_deopt()); DCHECK(properties().can_eager_deopt());
return reinterpret_cast<EagerDeoptInfo*>(input_address(input_count() - 1)) - return reinterpret_cast<EagerDeoptInfo*>(input_address(input_count() - 1)) -
1; 1;
} }
...@@ -511,7 +531,7 @@ class NodeBase : public ZoneObject { ...@@ -511,7 +531,7 @@ class NodeBase : public ZoneObject {
static Derived* Allocate(Zone* zone, size_t input_count, Args&&... args) { static Derived* Allocate(Zone* zone, size_t input_count, Args&&... args) {
const size_t size_before_node = const size_t size_before_node =
input_count * sizeof(Input) + input_count * sizeof(Input) +
(Derived::kProperties.can_deopt() ? sizeof(EagerDeoptInfo) : 0); (Derived::kProperties.can_eager_deopt() ? sizeof(EagerDeoptInfo) : 0);
const size_t size = size_before_node + sizeof(Derived); const size_t size = size_before_node + sizeof(Derived);
intptr_t raw_buffer = intptr_t raw_buffer =
reinterpret_cast<intptr_t>(zone->Allocate<NodeWithInlineInputs>(size)); reinterpret_cast<intptr_t>(zone->Allocate<NodeWithInlineInputs>(size));
...@@ -784,6 +804,14 @@ class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> { ...@@ -784,6 +804,14 @@ class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> {
static constexpr int kOperandIndex = 0; static constexpr int kOperandIndex = 0;
Input& operand_input() { return Node::input(kOperandIndex); } Input& operand_input() { return Node::input(kOperandIndex); }
compiler::FeedbackSource feedback() const { return feedback_; } compiler::FeedbackSource feedback() const { return feedback_; }
LazyDeoptSafepoint* lazy_deopt() const {
DCHECK_NOT_NULL(lazy_deopt_);
return lazy_deopt_;
}
void AttachLazyDeopt(LazyDeoptSafepoint* safepoint) {
DCHECK_NULL(lazy_deopt_);
lazy_deopt_ = safepoint;
}
protected: protected:
explicit UnaryWithFeedbackNode(uint32_t bitfield, explicit UnaryWithFeedbackNode(uint32_t bitfield,
...@@ -795,6 +823,7 @@ class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> { ...@@ -795,6 +823,7 @@ class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> {
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
const compiler::FeedbackSource feedback_; const compiler::FeedbackSource feedback_;
LazyDeoptSafepoint* lazy_deopt_ = nullptr;
}; };
template <class Derived, Operation kOperation> template <class Derived, Operation kOperation>
...@@ -810,6 +839,14 @@ class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> { ...@@ -810,6 +839,14 @@ class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
Input& left_input() { return Node::input(kLeftIndex); } Input& left_input() { return Node::input(kLeftIndex); }
Input& right_input() { return Node::input(kRightIndex); } Input& right_input() { return Node::input(kRightIndex); }
compiler::FeedbackSource feedback() const { return feedback_; } compiler::FeedbackSource feedback() const { return feedback_; }
LazyDeoptSafepoint* lazy_deopt() const {
DCHECK_NOT_NULL(lazy_deopt_);
return lazy_deopt_;
}
void AttachLazyDeopt(LazyDeoptSafepoint* safepoint) {
DCHECK_NULL(lazy_deopt_);
lazy_deopt_ = safepoint;
}
protected: protected:
BinaryWithFeedbackNode(uint32_t bitfield, BinaryWithFeedbackNode(uint32_t bitfield,
...@@ -821,6 +858,7 @@ class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> { ...@@ -821,6 +858,7 @@ class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> {
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
const compiler::FeedbackSource feedback_; const compiler::FeedbackSource feedback_;
LazyDeoptSafepoint* lazy_deopt_ = nullptr;
}; };
#define DEF_OPERATION_NODE(Name, Super, OpName) \ #define DEF_OPERATION_NODE(Name, Super, OpName) \
...@@ -934,7 +972,7 @@ class SoftDeopt : public FixedInputNodeT<0, SoftDeopt> { ...@@ -934,7 +972,7 @@ class SoftDeopt : public FixedInputNodeT<0, SoftDeopt> {
public: public:
explicit SoftDeopt(uint32_t bitfield) : Base(bitfield) {} explicit SoftDeopt(uint32_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties = OpProperties::Deopt(); static constexpr OpProperties kProperties = OpProperties::EagerDeopt();
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&); void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
...@@ -952,7 +990,7 @@ class CheckMaps : public FixedInputNodeT<1, CheckMaps> { ...@@ -952,7 +990,7 @@ class CheckMaps : public FixedInputNodeT<1, CheckMaps> {
// mark that to generate stack maps. Mark as call so we at least clear the // mark that to generate stack maps. Mark as call so we at least clear the
// registers since we currently don't properly spill either. // registers since we currently don't properly spill either.
static constexpr OpProperties kProperties = static constexpr OpProperties kProperties =
OpProperties::Deopt() | OpProperties::Call(); OpProperties::EagerDeopt() | OpProperties::Call();
compiler::MapRef map() const { return map_; } compiler::MapRef map() const { return map_; }
...@@ -1025,6 +1063,14 @@ class LoadGlobal : public FixedInputValueNodeT<1, LoadGlobal> { ...@@ -1025,6 +1063,14 @@ class LoadGlobal : public FixedInputValueNodeT<1, LoadGlobal> {
Input& context() { return input(0); } Input& context() { return input(0); }
const compiler::NameRef& name() const { return name_; } const compiler::NameRef& name() const { return name_; }
LazyDeoptSafepoint* lazy_deopt() const {
DCHECK_NOT_NULL(lazy_deopt_);
return lazy_deopt_;
}
void AttachLazyDeopt(LazyDeoptSafepoint* safepoint) {
DCHECK_NULL(lazy_deopt_);
lazy_deopt_ = safepoint;
}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&); void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
...@@ -1032,6 +1078,7 @@ class LoadGlobal : public FixedInputValueNodeT<1, LoadGlobal> { ...@@ -1032,6 +1078,7 @@ class LoadGlobal : public FixedInputValueNodeT<1, LoadGlobal> {
private: private:
const compiler::NameRef name_; const compiler::NameRef name_;
LazyDeoptSafepoint* lazy_deopt_ = nullptr;
}; };
class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> { class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> {
...@@ -1050,6 +1097,14 @@ class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> { ...@@ -1050,6 +1097,14 @@ class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> {
static constexpr int kObjectIndex = 1; static constexpr int kObjectIndex = 1;
Input& context() { return input(kContextIndex); } Input& context() { return input(kContextIndex); }
Input& object_input() { return input(kObjectIndex); } Input& object_input() { return input(kObjectIndex); }
LazyDeoptSafepoint* lazy_deopt() const {
DCHECK_NOT_NULL(lazy_deopt_);
return lazy_deopt_;
}
void AttachLazyDeopt(LazyDeoptSafepoint* safepoint) {
DCHECK_NULL(lazy_deopt_);
lazy_deopt_ = safepoint;
}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&); void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
...@@ -1057,6 +1112,7 @@ class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> { ...@@ -1057,6 +1112,7 @@ class LoadNamedGeneric : public FixedInputValueNodeT<2, LoadNamedGeneric> {
private: private:
const compiler::NameRef name_; const compiler::NameRef name_;
LazyDeoptSafepoint* lazy_deopt_ = nullptr;
}; };
class GapMove : public FixedInputNodeT<0, GapMove> { class GapMove : public FixedInputNodeT<0, GapMove> {
...@@ -1141,12 +1197,21 @@ class Call : public ValueNodeT<Call> { ...@@ -1141,12 +1197,21 @@ class Call : public ValueNodeT<Call> {
void set_arg(int i, ValueNode* node) { void set_arg(int i, ValueNode* node) {
set_input(i + kFixedInputCount, node); set_input(i + kFixedInputCount, node);
} }
LazyDeoptSafepoint* lazy_deopt() const {
DCHECK_NOT_NULL(lazy_deopt_);
return lazy_deopt_;
}
void AttachLazyDeopt(LazyDeoptSafepoint* safepoint) {
DCHECK_NULL(lazy_deopt_);
lazy_deopt_ = safepoint;
}
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&); void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&); void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} void PrintParams(std::ostream&, MaglevGraphLabeller*) const {}
private: private:
LazyDeoptSafepoint* lazy_deopt_ = nullptr;
ConvertReceiverMode receiver_mode_; ConvertReceiverMode receiver_mode_;
}; };
......
...@@ -339,11 +339,13 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) { ...@@ -339,11 +339,13 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) {
for (Input& input : *node) AssignInput(input); for (Input& input : *node) AssignInput(input);
AssignTemporaries(node); AssignTemporaries(node);
for (Input& input : *node) UpdateUse(&input); for (Input& input : *node) UpdateUse(&input);
if (node->properties().can_deopt()) UpdateUse(*node->eager_deopt_info()); if (node->properties().can_eager_deopt()) {
UpdateUse(*node->eager_deopt_info());
}
if (node->properties().is_call()) SpillAndClearRegisters(); if (node->properties().is_call()) SpillAndClearRegisters();
// TODO(verwaest): This isn't a good idea :) // TODO(verwaest): This isn't a good idea :)
if (node->properties().can_deopt()) SpillRegisters(); if (node->properties().can_eager_deopt()) SpillRegisters();
// Allocate node output. // Allocate node output.
if (node->Is<ValueNode>()) AllocateNodeResult(node->Cast<ValueNode>()); if (node->Is<ValueNode>()) AllocateNodeResult(node->Cast<ValueNode>());
...@@ -500,7 +502,7 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node, ...@@ -500,7 +502,7 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
} }
// TODO(verwaest): This isn't a good idea :) // TODO(verwaest): This isn't a good idea :)
if (node->properties().can_deopt()) SpillRegisters(); if (node->properties().can_eager_deopt()) SpillRegisters();
// Merge register values. Values only flowing into phis and not being // Merge register values. Values only flowing into phis and not being
// independently live will be killed as part of the merge. // independently live will be killed as part of the merge.
......
...@@ -1087,6 +1087,7 @@ inline bool CodeDataContainer::is_interpreter_trampoline_builtin() const { ...@@ -1087,6 +1087,7 @@ inline bool CodeDataContainer::is_interpreter_trampoline_builtin() const {
return FromCodeT(*this).name(cage_base); \ return FromCodeT(*this).name(cage_base); \
} }
DEF_PRIMITIVE_FORWARDING_CDC_GETTER(is_maglevved, bool)
DEF_PRIMITIVE_FORWARDING_CDC_GETTER(is_turbofanned, bool) DEF_PRIMITIVE_FORWARDING_CDC_GETTER(is_turbofanned, bool)
DEF_PRIMITIVE_FORWARDING_CDC_GETTER(is_off_heap_trampoline, bool) DEF_PRIMITIVE_FORWARDING_CDC_GETTER(is_off_heap_trampoline, bool)
......
...@@ -513,7 +513,9 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate, ...@@ -513,7 +513,9 @@ void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
os << "compiler = " os << "compiler = "
<< (is_turbofanned() << (is_turbofanned()
? "turbofan" ? "turbofan"
: kind() == CodeKind::BASELINE ? "baseline" : "unknown") : is_maglevved()
? "turbofan"
: kind() == CodeKind::BASELINE ? "baseline" : "unknown")
<< "\n"; << "\n";
os << "address = " << reinterpret_cast<void*>(ptr()) << "\n\n"; os << "address = " << reinterpret_cast<void*>(ptr()) << "\n\n";
......
...@@ -137,6 +137,10 @@ class CodeDataContainer : public HeapObject { ...@@ -137,6 +137,10 @@ class CodeDataContainer : public HeapObject {
// Tells whether the outgoing parameters of this code are tagged pointers. // Tells whether the outgoing parameters of this code are tagged pointers.
inline bool has_tagged_outgoing_params() const; inline bool has_tagged_outgoing_params() const;
// [is_maglevved]: Tells whether the code object was generated by the
// Maglev optimizing compiler.
inline bool is_maglevved() const;
// [is_turbofanned]: Tells whether the code object was generated by the // [is_turbofanned]: Tells whether the code object was generated by the
// TurboFan optimizing compiler. // TurboFan optimizing compiler.
inline bool is_turbofanned() const; inline bool is_turbofanned() const;
......
...@@ -766,7 +766,9 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) { ...@@ -766,7 +766,9 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
} else { } else {
status |= static_cast<int>(OptimizationStatus::kOptimized); status |= static_cast<int>(OptimizationStatus::kOptimized);
} }
if (code.is_turbofanned()) { if (code.is_maglevved()) {
status |= static_cast<int>(OptimizationStatus::kMaglevved);
} else if (code.is_turbofanned()) {
status |= static_cast<int>(OptimizationStatus::kTurboFanned); status |= static_cast<int>(OptimizationStatus::kTurboFanned);
} }
} }
......
...@@ -895,18 +895,19 @@ enum class OptimizationStatus { ...@@ -895,18 +895,19 @@ enum class OptimizationStatus {
kAlwaysOptimize = 1 << 2, kAlwaysOptimize = 1 << 2,
kMaybeDeopted = 1 << 3, kMaybeDeopted = 1 << 3,
kOptimized = 1 << 4, kOptimized = 1 << 4,
kTurboFanned = 1 << 5, kMaglevved = 1 << 5,
kInterpreted = 1 << 6, kTurboFanned = 1 << 6,
kMarkedForOptimization = 1 << 7, kInterpreted = 1 << 7,
kMarkedForConcurrentOptimization = 1 << 8, kMarkedForOptimization = 1 << 8,
kOptimizingConcurrently = 1 << 9, kMarkedForConcurrentOptimization = 1 << 9,
kIsExecuting = 1 << 10, kOptimizingConcurrently = 1 << 10,
kTopmostFrameIsTurboFanned = 1 << 11, kIsExecuting = 1 << 11,
kLiteMode = 1 << 12, kTopmostFrameIsTurboFanned = 1 << 12,
kMarkedForDeoptimization = 1 << 13, kLiteMode = 1 << 13,
kBaseline = 1 << 14, kMarkedForDeoptimization = 1 << 14,
kTopmostFrameIsInterpreted = 1 << 15, kBaseline = 1 << 15,
kTopmostFrameIsBaseline = 1 << 16, kTopmostFrameIsInterpreted = 1 << 16,
kTopmostFrameIsBaseline = 1 << 17,
}; };
} // namespace internal } // namespace internal
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --maglev --no-always-opt
var x = 1;
var do_change = false;
function g() {
if (do_change) {
x = 2;
return 40;
}
return 30;
}
function f() {
return g() + x;
}
%PrepareFunctionForOptimization(f);
assertEquals(31, f());
%OptimizeMaglevOnNextCall(f);
assertEquals(31, f());
assertTrue(isMaglevved(f));
// Trigger a lazy deopt on the next g() call.
do_change = true;
assertEquals(42, f());
assertFalse(isMaglevved(f));
assertUnoptimized(f);
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --maglev --no-always-opt
var x = 1;
function f(o) {
return x;
}
%PrepareFunctionForOptimization(f);
assertEquals(1, f());
%OptimizeMaglevOnNextCall(f);
assertEquals(1, f());
assertTrue(isMaglevved(f));
// Trigger a lazy deopt now, so that f() deopts on its next call.
x = 2;
assertEquals(2, f());
assertFalse(isMaglevved(f));
assertUnoptimized(f);
...@@ -175,18 +175,19 @@ var V8OptimizationStatus = { ...@@ -175,18 +175,19 @@ var V8OptimizationStatus = {
kAlwaysOptimize: 1 << 2, kAlwaysOptimize: 1 << 2,
kMaybeDeopted: 1 << 3, kMaybeDeopted: 1 << 3,
kOptimized: 1 << 4, kOptimized: 1 << 4,
kTurboFanned: 1 << 5, kMaglevved: 1 << 5,
kInterpreted: 1 << 6, kTurboFanned: 1 << 6,
kMarkedForOptimization: 1 << 7, kInterpreted: 1 << 7,
kMarkedForConcurrentOptimization: 1 << 8, kMarkedForOptimization: 1 << 8,
kOptimizingConcurrently: 1 << 9, kMarkedForConcurrentOptimization: 1 << 9,
kIsExecuting: 1 << 10, kOptimizingConcurrently: 1 << 10,
kTopmostFrameIsTurboFanned: 1 << 11, kIsExecuting: 1 << 11,
kLiteMode: 1 << 12, kTopmostFrameIsTurboFanned: 1 << 12,
kMarkedForDeoptimization: 1 << 13, kLiteMode: 1 << 13,
kBaseline: 1 << 14, kMarkedForDeoptimization: 1 << 14,
kTopmostFrameIsInterpreted: 1 << 15, kBaseline: 1 << 15,
kTopmostFrameIsBaseline: 1 << 16, kTopmostFrameIsInterpreted: 1 << 16,
kTopmostFrameIsBaseline: 1 << 17,
}; };
// Returns true if --lite-mode is on and we can't ever turn on optimization. // Returns true if --lite-mode is on and we can't ever turn on optimization.
...@@ -210,6 +211,9 @@ var isUnoptimized; ...@@ -210,6 +211,9 @@ var isUnoptimized;
// Returns true if given function is optimized. // Returns true if given function is optimized.
var isOptimized; var isOptimized;
// Returns true if given function is compiled by Maglev.
var isMaglevved;
// Returns true if given function is compiled by TurboFan. // Returns true if given function is compiled by TurboFan.
var isTurboFanned; var isTurboFanned;
...@@ -781,6 +785,14 @@ var prettyPrinted; ...@@ -781,6 +785,14 @@ var prettyPrinted;
return (opt_status & V8OptimizationStatus.kOptimized) !== 0; return (opt_status & V8OptimizationStatus.kOptimized) !== 0;
} }
isMaglevved = function isMaglevved(fun) {
var opt_status = OptimizationStatus(fun, "");
assertTrue((opt_status & V8OptimizationStatus.kIsFunction) !== 0,
"not a function");
return (opt_status & V8OptimizationStatus.kOptimized) !== 0 &&
(opt_status & V8OptimizationStatus.kMaglevved) !== 0;
}
isTurboFanned = function isTurboFanned(fun) { isTurboFanned = function isTurboFanned(fun) {
var opt_status = OptimizationStatus(fun, ""); var opt_status = OptimizationStatus(fun, "");
assertTrue((opt_status & V8OptimizationStatus.kIsFunction) !== 0, assertTrue((opt_status & V8OptimizationStatus.kIsFunction) !== 0,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment