Commit ebc9f39f authored by Deepti Gandluri's avatar Deepti Gandluri Committed by Commit Bot

Revert "Reland "[sparkplug][deoptimizer] Deoptimize to baseline.""

This reverts commit e3ccb538.

Reason for revert: Speculative revert for ARM 64 CFI fails - https://ci.chromium.org/p/v8/builders/ci/V8%20Linux%20-%20arm64%20-%20sim%20-%20CFI/5174?

Original change's description:
> Reland "[sparkplug][deoptimizer] Deoptimize to baseline."
>
> This is a reland of bdcd7d79
>
> Handle lazy deopts when the current bytecode is JumpLoop.
> Instead of advancing to the next bytecode, re-execute the JumpLoop.
>
> TBR=jgruber@chromium.org, neis@chromium.org
>
> Original change's description:
> > [sparkplug][deoptimizer] Deoptimize to baseline.
> >
> > If we have baseline code, deoptimize to baseline instead of the
> > interpreter. The process is similar to deopting to the interpreter.
> > We just use different builtins
> > (BaselineEnterAtBytecode/BaselineEnterAtNextBytecode) instead of
> > InterpreterEnterBytecodeDispatch/InterpreterEnterBytecodeAdvance, that
> > patch an interpreter frame to a baseline frame and continue execution in
> > baseline code (based on the deopt type, at the current or next
> > bytecode).
> >
> > Bug: v8:11420
> > Change-Id: Iabaefb36c05155a435c7b380906a86d9b9d549fa
> > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2695591
> > Commit-Queue: Patrick Thier <pthier@chromium.org>
> > Reviewed-by: Leszek Swirski <leszeks@chromium.org>
> > Reviewed-by: Georg Neis <neis@chromium.org>
> > Reviewed-by: Jakob Gruber <jgruber@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#73609}
>
> Bug: v8:11420
> Change-Id: Ib8cac028121188ddc23ff29377760ed684eb7392
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2783035
> Reviewed-by: Patrick Thier <pthier@chromium.org>
> Reviewed-by: Leszek Swirski <leszeks@chromium.org>
> Commit-Queue: Patrick Thier <pthier@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#73636}

Bug: v8:11420
Change-Id: Icd797b4979a114a2a627e12c8bb7d2215df03182
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2785074Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73643}
parent 30bf932f
...@@ -92,10 +92,6 @@ void BaselineAssembler::Bind(Label* label) { ...@@ -92,10 +92,6 @@ void BaselineAssembler::Bind(Label* label) {
__ bind(label); __ bind(label);
} }
void BaselineAssembler::JumpTarget() {
// NOP on arm.
}
void BaselineAssembler::Jump(Label* target, Label::Distance distance) { void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ b(target); __ b(target);
} }
......
...@@ -87,8 +87,6 @@ void BaselineAssembler::Bind(Label* label) { ...@@ -87,8 +87,6 @@ void BaselineAssembler::Bind(Label* label) {
__ BindJumpTarget(label); __ BindJumpTarget(label);
} }
void BaselineAssembler::JumpTarget() { __ JumpTarget(); }
void BaselineAssembler::Jump(Label* target, Label::Distance distance) { void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ B(target); __ B(target);
} }
......
...@@ -40,9 +40,6 @@ class BaselineAssembler { ...@@ -40,9 +40,6 @@ class BaselineAssembler {
inline void DebugBreak(); inline void DebugBreak();
inline void Bind(Label* label); inline void Bind(Label* label);
// Marks the current position as a valid jump target on CFI enabled
// architectures.
inline void JumpTarget();
inline void JumpIf(Condition cc, Label* target, inline void JumpIf(Condition cc, Label* target,
Label::Distance distance = Label::kFar); Label::Distance distance = Label::kFar);
inline void Jump(Label* target, Label::Distance distance = Label::kFar); inline void Jump(Label* target, Label::Distance distance = Label::kFar);
......
...@@ -426,7 +426,6 @@ void BaselineCompiler::PreVisitSingleBytecode() { ...@@ -426,7 +426,6 @@ void BaselineCompiler::PreVisitSingleBytecode() {
void BaselineCompiler::VisitSingleBytecode() { void BaselineCompiler::VisitSingleBytecode() {
int offset = iterator().current_offset(); int offset = iterator().current_offset();
bool is_marked_as_jump_target = false;
if (labels_[offset]) { if (labels_[offset]) {
// Bind labels for this offset that have already been linked to a // Bind labels for this offset that have already been linked to a
// jump (i.e. forward jumps, excluding jump tables). // jump (i.e. forward jumps, excluding jump tables).
...@@ -437,23 +436,15 @@ void BaselineCompiler::VisitSingleBytecode() { ...@@ -437,23 +436,15 @@ void BaselineCompiler::VisitSingleBytecode() {
labels_[offset]->linked.Clear(); labels_[offset]->linked.Clear();
#endif #endif
__ Bind(&labels_[offset]->unlinked); __ Bind(&labels_[offset]->unlinked);
is_marked_as_jump_target = true;
} }
// Record positions of exception handlers. // Record positions of exception handlers.
if (iterator().current_offset() == *next_handler_offset_) { if (iterator().current_offset() == *next_handler_offset_) {
__ ExceptionHandler(); __ ExceptionHandler();
next_handler_offset_++; next_handler_offset_++;
is_marked_as_jump_target = true;
} }
DCHECK_LT(iterator().current_offset(), *next_handler_offset_); DCHECK_LT(iterator().current_offset(), *next_handler_offset_);
// Mark position as valid jump target, if it isn't one already.
// This is required for the deoptimizer, when CFI is enabled.
if (!is_marked_as_jump_target) {
__ JumpTarget();
}
if (FLAG_code_comments) { if (FLAG_code_comments) {
std::ostringstream str; std::ostringstream str;
str << "[ "; str << "[ ";
......
...@@ -90,10 +90,6 @@ MemOperand BaselineAssembler::FeedbackVectorOperand() { ...@@ -90,10 +90,6 @@ MemOperand BaselineAssembler::FeedbackVectorOperand() {
void BaselineAssembler::Bind(Label* label) { __ bind(label); } void BaselineAssembler::Bind(Label* label) { __ bind(label); }
void BaselineAssembler::JumpTarget() {
// NOP on ia32.
}
void BaselineAssembler::Jump(Label* target, Label::Distance distance) { void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ jmp(target, distance); __ jmp(target, distance);
} }
......
...@@ -93,10 +93,6 @@ MemOperand BaselineAssembler::FeedbackVectorOperand() { ...@@ -93,10 +93,6 @@ MemOperand BaselineAssembler::FeedbackVectorOperand() {
void BaselineAssembler::Bind(Label* label) { __ bind(label); } void BaselineAssembler::Bind(Label* label) { __ bind(label); }
void BaselineAssembler::JumpTarget() {
// NOP on x64.
}
void BaselineAssembler::Jump(Label* target, Label::Distance distance) { void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ jmp(target, distance); __ jmp(target, distance);
} }
......
...@@ -3462,125 +3462,6 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { ...@@ -3462,125 +3462,6 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy); Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
} }
namespace {
// Converts an interpreter frame into a baseline frame and continues execution
// in baseline code (baseline code has to exist on the shared function info),
// either at the start or the end of the current bytecode.
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
// Get bytecode array and bytecode offset from the stack frame.
__ ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ ldr(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Get function from the frame.
Register closure = r1;
__ ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = r2;
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
if (__ emit_debug_code()) {
Register scratch = r3;
__ CompareObjectType(feedback_vector, scratch, scratch,
FEEDBACK_VECTOR_TYPE);
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
}
__ str(feedback_vector,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
feedback_vector = no_reg;
// Get the Code object from the shared function info.
Register code_obj = r4;
__ ldr(code_obj,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(code_obj,
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
__ ldr(code_obj,
FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
closure = no_reg;
// Compute baseline pc for bytecode offset.
__ Push(kInterpreterAccumulatorRegister);
ExternalReference get_baseline_pc_extref =
next_bytecode
? ExternalReference::baseline_end_pc_for_bytecode_offset()
: ExternalReference::baseline_start_pc_for_bytecode_offset();
Register get_baseline_pc = r3;
__ Move(get_baseline_pc, get_baseline_pc_extref);
// If the code deoptimizes during the implicit function entry stack interrupt
// check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
// not a valid bytecode offset.
// TODO(pthier): Investigate if it is feasible to handle this special case
// in TurboFan instead of here.
Label valid_bytecode_offset, function_entry_bytecode;
__ cmp(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
kFunctionEntryBytecodeOffset));
__ b(eq, &function_entry_bytecode);
__ bind(&valid_bytecode_offset);
// In the case we advance the BC, check if the current bytecode is JumpLoop.
// If it is, re-execute it instead of continuing at the next bytecode.
if (next_bytecode) {
Label not_jump_loop;
Register bytecode = r1;
__ ldrb(bytecode, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ cmp(bytecode,
Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
__ b(ne, &not_jump_loop);
__ Move(get_baseline_pc,
ExternalReference::baseline_start_pc_for_bytecode_offset());
__ bind(&not_jump_loop);
}
__ sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ PrepareCallCFunction(3, 0, r0);
Register arg_reg_1 = r0;
Register arg_reg_2 = r1;
Register arg_reg_3 = r2;
__ mov(arg_reg_1, code_obj);
__ mov(arg_reg_2, kInterpreterBytecodeOffsetRegister);
__ mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
__ CallCFunction(get_baseline_pc, 3, 0);
}
__ add(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
__ add(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister);
__ Jump(code_obj);
__ Trap(); // Unreachable.
__ bind(&function_entry_bytecode);
// If the bytecode offset is kFunctionEntryOffset, get the start address of
// the first bytecode.
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ Move(get_baseline_pc,
ExternalReference::baseline_start_pc_for_bytecode_offset());
__ b(&valid_bytecode_offset);
}
} // namespace
void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
Generate_BaselineEntry(masm, false);
}
void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
Generate_BaselineEntry(masm, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL); FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL); __ EnterFrame(StackFrame::INTERNAL);
......
...@@ -3969,127 +3969,6 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { ...@@ -3969,127 +3969,6 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy); Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
} }
namespace {
// Converts an interpreter frame into a baseline frame and continues execution
// in baseline code (baseline code has to exist on the shared function info),
// either at the start or the end of the current bytecode.
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
// Get bytecode array and bytecode offset from the stack frame.
__ Ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Get function from the frame.
Register closure = x1;
__ Ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = x2;
__ LoadTaggedPointerField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
if (__ emit_debug_code()) {
Register scratch = x3;
__ CompareObjectType(feedback_vector, scratch, scratch,
FEEDBACK_VECTOR_TYPE);
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
}
__ Str(feedback_vector,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
feedback_vector = no_reg;
// Get the Code object from the shared function info.
Register code_obj = x22;
__ LoadTaggedPointerField(
code_obj,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
code_obj,
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
__ LoadTaggedPointerField(
code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
closure = no_reg;
// Compute baseline pc for bytecode offset.
__ Push(padreg, kInterpreterAccumulatorRegister);
ExternalReference get_baseline_pc_extref =
next_bytecode
? ExternalReference::baseline_end_pc_for_bytecode_offset()
: ExternalReference::baseline_start_pc_for_bytecode_offset();
Register get_baseline_pc = x3;
__ Mov(get_baseline_pc, get_baseline_pc_extref);
// If the code deoptimizes during the implicit function entry stack interrupt
// check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
// not a valid bytecode offset.
// TODO(pthier): Investigate if it is feasible to handle this special case
// in TurboFan instead of here.
Label valid_bytecode_offset, function_entry_bytecode;
__ cmp(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
kFunctionEntryBytecodeOffset));
__ B(eq, &function_entry_bytecode);
__ bind(&valid_bytecode_offset);
// In the case we advance the BC, check if the current bytecode is JumpLoop.
// If it is, re-execute it instead of continuing at the next bytecode.
if (next_bytecode) {
Label not_jump_loop;
Register bytecode = x1;
__ Ldrb(bytecode, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ Cmp(bytecode,
Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
__ B(ne, &not_jump_loop);
__ Mov(get_baseline_pc,
ExternalReference::baseline_start_pc_for_bytecode_offset());
__ bind(&not_jump_loop);
}
__ Sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister,
(BytecodeArray::kHeaderSize - kHeapObjectTag));
{
FrameScope scope(masm, StackFrame::INTERNAL);
Register arg_reg_1 = x0;
Register arg_reg_2 = x1;
Register arg_reg_3 = x2;
__ Mov(arg_reg_1, code_obj);
__ Mov(arg_reg_2, kInterpreterBytecodeOffsetRegister);
__ Mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
__ CallCFunction(get_baseline_pc, 3, 0);
}
__ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
__ Add(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister, padreg);
__ Jump(code_obj);
__ Trap(); // Unreachable.
__ bind(&function_entry_bytecode);
// If the bytecode offset is kFunctionEntryOffset, get the start address of
// the first bytecode.
__ Mov(kInterpreterBytecodeOffsetRegister,
BytecodeArray::kHeaderSize - kHeapObjectTag);
__ Mov(get_baseline_pc,
ExternalReference::baseline_start_pc_for_bytecode_offset());
__ B(&valid_bytecode_offset);
}
} // namespace
void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
Generate_BaselineEntry(masm, false);
}
void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
Generate_BaselineEntry(masm, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL); FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL); __ EnterFrame(StackFrame::INTERNAL);
......
...@@ -141,8 +141,6 @@ namespace internal { ...@@ -141,8 +141,6 @@ namespace internal {
ASM(BaselineOutOfLinePrologue, BaselineOutOfLinePrologue) \ ASM(BaselineOutOfLinePrologue, BaselineOutOfLinePrologue) \
ASM(BaselineOnStackReplacement, ContextOnly) \ ASM(BaselineOnStackReplacement, ContextOnly) \
ASM(BaselineLeaveFrame, BaselineLeaveFrame) \ ASM(BaselineLeaveFrame, BaselineLeaveFrame) \
ASM(BaselineEnterAtBytecode, Void) \
ASM(BaselineEnterAtNextBytecode, Void) \
\ \
/* Code life-cycle */ \ /* Code life-cycle */ \
TFC(CompileLazy, JSTrampoline) \ TFC(CompileLazy, JSTrampoline) \
......
...@@ -4073,128 +4073,6 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { ...@@ -4073,128 +4073,6 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy); Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
} }
namespace {
// Converts an interpreter frame into a baseline frame and continues execution
// in baseline code (baseline code has to exist on the shared function info),
// either at the start or the end of the current bytecode.
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
// Get bytecode array and bytecode offset from the stack frame.
__ mov(kInterpreterBytecodeArrayRegister,
MemOperand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
MemOperand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Get function from the frame.
Register closure = esi;
__ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset));
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = ecx;
__ mov(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
if (__ emit_debug_code()) {
__ push(kInterpreterAccumulatorRegister);
Register scratch = kInterpreterAccumulatorRegister;
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, scratch);
__ Assert(equal, AbortReason::kExpectedFeedbackVector);
scratch = no_reg;
__ pop(kInterpreterAccumulatorRegister);
}
__ mov(MemOperand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
feedback_vector);
feedback_vector = no_reg;
// Get the Code object from the shared function info.
Register code_obj = closure;
__ mov(code_obj,
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
closure = no_reg;
__ mov(code_obj,
FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
__ mov(code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
// Compute baseline pc for bytecode offset.
__ push(kInterpreterAccumulatorRegister);
ExternalReference get_baseline_pc_extref =
next_bytecode
? ExternalReference::baseline_end_pc_for_bytecode_offset()
: ExternalReference::baseline_start_pc_for_bytecode_offset();
Register get_baseline_pc = ecx;
__ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
// If the code deoptimizes during the implicit function entry stack interrupt
// check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
// not a valid bytecode offset.
// TODO(pthier): Investigate if it is feasible to handle this special case
// in TurboFan instead of here.
Label valid_bytecode_offset, function_entry_bytecode;
__ cmp(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
kFunctionEntryBytecodeOffset));
__ j(equal, &function_entry_bytecode);
__ bind(&valid_bytecode_offset);
// In the case we advance the BC, check if the current bytecode is JumpLoop.
// If it is, re-execute it instead of continuing at the next bytecode.
if (next_bytecode) {
Label not_jump_loop;
__ push(kInterpreterBytecodeOffsetRegister);
Register bytecode = kInterpreterBytecodeOffsetRegister;
__ movzx_b(bytecode,
Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ cmpb(bytecode,
Immediate(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
__ j(not_equal, &not_jump_loop, Label::kNear);
__ LoadAddress(get_baseline_pc,
ExternalReference::baseline_start_pc_for_bytecode_offset());
__ bind(&not_jump_loop);
__ pop(kInterpreterBytecodeOffsetRegister);
}
__ sub(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ PrepareCallCFunction(3, eax);
__ mov(Operand(esp, 0 * kSystemPointerSize), code_obj);
__ mov(Operand(esp, 1 * kSystemPointerSize),
kInterpreterBytecodeOffsetRegister);
__ mov(Operand(esp, 2 * kSystemPointerSize),
kInterpreterBytecodeArrayRegister);
__ CallCFunction(get_baseline_pc, 3);
}
__ lea(code_obj,
FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
__ pop(kInterpreterAccumulatorRegister);
__ jmp(code_obj);
__ Trap(); // Unreachable.
__ bind(&function_entry_bytecode);
// If the bytecode offset is kFunctionEntryOffset, get the start address of
// the first bytecode.
__ mov(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadAddress(get_baseline_pc,
ExternalReference::baseline_start_pc_for_bytecode_offset());
__ jmp(&valid_bytecode_offset);
}
} // namespace
void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
Generate_BaselineEntry(masm, false);
}
void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
Generate_BaselineEntry(masm, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL); FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL); __ EnterFrame(StackFrame::INTERNAL);
......
...@@ -4343,123 +4343,6 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { ...@@ -4343,123 +4343,6 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy); Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
} }
namespace {
// Converts an interpreter frame into a baseline frame and continues execution
// in baseline code (baseline code has to exist on the shared function info),
// either at the start or the end of the current bytecode.
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
// Get bytecode array and bytecode offset from the stack frame.
__ movq(kInterpreterBytecodeArrayRegister,
MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ SmiUntag(
kInterpreterBytecodeOffsetRegister,
MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Get function from the frame.
Register closure = rdi;
__ movq(closure, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = rbx;
__ LoadTaggedPointerField(
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_vector, Cell::kValueOffset));
if (__ emit_debug_code()) {
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kExpectedFeedbackVector);
}
__ movq(MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
feedback_vector);
feedback_vector = no_reg;
// Get the Code object from the shared function info.
Register code_obj = rbx;
__ LoadTaggedPointerField(
code_obj, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
code_obj,
FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
__ LoadTaggedPointerField(
code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
closure = no_reg;
// Compute baseline pc for bytecode offset.
__ pushq(kInterpreterAccumulatorRegister);
ExternalReference get_baseline_pc_extref =
next_bytecode
? ExternalReference::baseline_end_pc_for_bytecode_offset()
: ExternalReference::baseline_start_pc_for_bytecode_offset();
Register get_baseline_pc = rax;
__ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
// If the code deoptimizes during the implicit function entry stack interrupt
// check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
// not a valid bytecode offset.
// TODO(pthier): Investigate if it is feasible to handle this special case
// in TurboFan instead of here.
Label valid_bytecode_offset, function_entry_bytecode;
__ cmpq(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
kFunctionEntryBytecodeOffset));
__ j(equal, &function_entry_bytecode);
__ bind(&valid_bytecode_offset);
// In the case we advance the BC, check if the current bytecode is JumpLoop.
// If it is, re-execute it instead of continuing at the next bytecode.
if (next_bytecode) {
Label not_jump_loop;
Register bytecode = rdi;
__ movzxbq(bytecode,
Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ cmpb(bytecode,
Immediate(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
__ j(not_equal, &not_jump_loop, Label::kNear);
__ LoadAddress(get_baseline_pc,
ExternalReference::baseline_start_pc_for_bytecode_offset());
__ bind(&not_jump_loop);
}
__ subq(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ PrepareCallCFunction(3);
__ movq(arg_reg_1, code_obj);
__ movq(arg_reg_2, kInterpreterBytecodeOffsetRegister);
__ movq(arg_reg_3, kInterpreterBytecodeArrayRegister);
__ CallCFunction(get_baseline_pc, 3);
}
__ leaq(code_obj,
FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
__ popq(kInterpreterAccumulatorRegister);
__ jmp(code_obj);
__ Trap(); // Unreachable.
__ bind(&function_entry_bytecode);
// If the bytecode offset is kFunctionEntryOffset, get the start address of
// the first bytecode.
__ movq(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadAddress(get_baseline_pc,
ExternalReference::baseline_start_pc_for_bytecode_offset());
__ jmp(&valid_bytecode_offset);
}
} // namespace
void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
Generate_BaselineEntry(masm, false);
}
void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
Generate_BaselineEntry(masm, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL); FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL); __ EnterFrame(StackFrame::INTERNAL);
......
...@@ -621,35 +621,6 @@ ExternalReference::address_of_enable_experimental_regexp_engine() { ...@@ -621,35 +621,6 @@ ExternalReference::address_of_enable_experimental_regexp_engine() {
return ExternalReference(&FLAG_enable_experimental_regexp_engine); return ExternalReference(&FLAG_enable_experimental_regexp_engine);
} }
namespace {
static uintptr_t BaselineStartPCForBytecodeOffset(Address raw_code_obj,
int bytecode_offset,
Address raw_bytecode_array) {
Code code_obj = Code::cast(Object(raw_code_obj));
BytecodeArray bytecode_array =
BytecodeArray::cast(Object(raw_bytecode_array));
return code_obj.GetBaselineStartPCForBytecodeOffset(bytecode_offset,
bytecode_array);
}
static uintptr_t BaselineEndPCForBytecodeOffset(Address raw_code_obj,
int bytecode_offset,
Address raw_bytecode_array) {
Code code_obj = Code::cast(Object(raw_code_obj));
BytecodeArray bytecode_array =
BytecodeArray::cast(Object(raw_bytecode_array));
return code_obj.GetBaselineEndPCForBytecodeOffset(bytecode_offset,
bytecode_array);
}
} // namespace
FUNCTION_REFERENCE(baseline_end_pc_for_bytecode_offset,
BaselineEndPCForBytecodeOffset)
FUNCTION_REFERENCE(baseline_start_pc_for_bytecode_offset,
BaselineStartPCForBytecodeOffset)
ExternalReference ExternalReference::thread_in_wasm_flag_address_address( ExternalReference ExternalReference::thread_in_wasm_flag_address_address(
Isolate* isolate) { Isolate* isolate) {
return ExternalReference(isolate->thread_in_wasm_flag_address_address()); return ExternalReference(isolate->thread_in_wasm_flag_address_address());
......
...@@ -124,8 +124,6 @@ class StatsCounter; ...@@ -124,8 +124,6 @@ class StatsCounter;
V(address_of_wasm_i8x16_splat_0x33, "wasm_i8x16_splat_0x33") \ V(address_of_wasm_i8x16_splat_0x33, "wasm_i8x16_splat_0x33") \
V(address_of_wasm_i8x16_splat_0x55, "wasm_i8x16_splat_0x55") \ V(address_of_wasm_i8x16_splat_0x55, "wasm_i8x16_splat_0x55") \
V(address_of_wasm_i16x8_splat_0x0001, "wasm_16x8_splat_0x0001") \ V(address_of_wasm_i16x8_splat_0x0001, "wasm_16x8_splat_0x0001") \
V(baseline_start_pc_for_bytecode_offset, "BaselineStartPCForBytecodeOffset") \
V(baseline_end_pc_for_bytecode_offset, "BaselineEndPCForBytecodeOffset") \
V(bytecode_size_table_address, "Bytecodes::bytecode_size_table_address") \ V(bytecode_size_table_address, "Bytecodes::bytecode_size_table_address") \
V(check_object_type, "check_object_type") \ V(check_object_type, "check_object_type") \
V(compute_integer_hash, "ComputeSeededHash") \ V(compute_integer_hash, "ComputeSeededHash") \
......
...@@ -14,8 +14,6 @@ void Builtins_ContinueToJavaScriptBuiltinWithResult(); ...@@ -14,8 +14,6 @@ void Builtins_ContinueToJavaScriptBuiltinWithResult();
void Builtins_ContinueToJavaScriptBuiltin(); void Builtins_ContinueToJavaScriptBuiltin();
void construct_stub_create_deopt_addr(); void construct_stub_create_deopt_addr();
void construct_stub_invoke_deopt_addr(); void construct_stub_invoke_deopt_addr();
void Builtins_BaselineEnterAtBytecode();
void Builtins_BaselineEnterAtNextBytecode();
typedef void (*function_ptr)(); typedef void (*function_ptr)();
} }
...@@ -32,8 +30,6 @@ constexpr function_ptr builtins[] = { ...@@ -32,8 +30,6 @@ constexpr function_ptr builtins[] = {
&Builtins_ContinueToJavaScriptBuiltin, &Builtins_ContinueToJavaScriptBuiltin,
&construct_stub_create_deopt_addr, &construct_stub_create_deopt_addr,
&construct_stub_invoke_deopt_addr, &construct_stub_invoke_deopt_addr,
&Builtins_BaselineEnterAtBytecode,
&Builtins_BaselineEnterAtNextBytecode,
}; };
bool Deoptimizer::IsValidReturnAddress(Address address) { bool Deoptimizer::IsValidReturnAddress(Address address) {
......
...@@ -991,25 +991,11 @@ void Deoptimizer::DoComputeOutputFrames() { ...@@ -991,25 +991,11 @@ void Deoptimizer::DoComputeOutputFrames() {
stack_guard->real_jslimit() - kStackLimitSlackForDeoptimizationInBytes); stack_guard->real_jslimit() - kStackLimitSlackForDeoptimizationInBytes);
} }
namespace {
// Get the dispatch builtin for unoptimized frames.
Builtins::Name DispatchBuiltinFor(bool is_baseline, bool advance_bc) {
if (is_baseline) {
return advance_bc ? Builtins::kBaselineEnterAtNextBytecode
: Builtins::kBaselineEnterAtBytecode;
} else {
return advance_bc ? Builtins::kInterpreterEnterBytecodeAdvance
: Builtins::kInterpreterEnterBytecodeDispatch;
}
}
} // namespace
void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame, void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
int frame_index, int frame_index,
bool goto_catch_handler) { bool goto_catch_handler) {
SharedFunctionInfo shared = translated_frame->raw_shared_info(); SharedFunctionInfo shared = translated_frame->raw_shared_info();
TranslatedFrame::iterator value_iterator = translated_frame->begin(); TranslatedFrame::iterator value_iterator = translated_frame->begin();
const bool is_bottommost = (0 == frame_index); const bool is_bottommost = (0 == frame_index);
const bool is_topmost = (output_count_ - 1 == frame_index); const bool is_topmost = (output_count_ - 1 == frame_index);
...@@ -1034,10 +1020,15 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame, ...@@ -1034,10 +1020,15 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
const uint32_t output_frame_size = frame_info.frame_size_in_bytes(); const uint32_t output_frame_size = frame_info.frame_size_in_bytes();
TranslatedFrame::iterator function_iterator = value_iterator++; TranslatedFrame::iterator function_iterator = value_iterator++;
if (verbose_tracing_enabled()) {
BytecodeArray bytecode_array = PrintF(trace_scope()->file(), " translating unoptimized frame ");
shared.HasBreakInfo() ? shared.GetDebugInfo().DebugBytecodeArray() std::unique_ptr<char[]> name = shared.DebugNameCStr();
: shared.GetBytecodeArray(isolate()); PrintF(trace_scope()->file(), "%s", name.get());
PrintF(trace_scope()->file(),
" => bytecode_offset=%d, variable_frame_size=%d, frame_size=%d%s\n",
real_bytecode_offset, frame_info.frame_size_in_bytes_without_fixed(),
output_frame_size, goto_catch_handler ? " (throw)" : "");
}
// Allocate and store the output frame description. // Allocate and store the output frame description.
FrameDescription* output_frame = new (output_frame_size) FrameDescription* output_frame = new (output_frame_size)
...@@ -1048,34 +1039,6 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame, ...@@ -1048,34 +1039,6 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
CHECK_NULL(output_[frame_index]); CHECK_NULL(output_[frame_index]);
output_[frame_index] = output_frame; output_[frame_index] = output_frame;
// Compute this frame's PC and state.
// For interpreted frames, the PC will be a special builtin that
// continues the bytecode dispatch. Note that non-topmost and lazy-style
// bailout handlers also advance the bytecode offset before dispatch, hence
// simulating what normal handlers do upon completion of the operation.
// For baseline frames, the PC will be a builtin to convert the interpreter
// frame to a baseline frame before continuing execution of baseline code.
// We can't directly continue into baseline code, because of CFI.
Builtins* builtins = isolate_->builtins();
const bool advance_bc =
(!is_topmost || (deopt_kind_ == DeoptimizeKind::kLazy)) &&
!goto_catch_handler;
const bool is_baseline = shared.HasBaselineData();
Code dispatch_builtin =
builtins->builtin(DispatchBuiltinFor(is_baseline, advance_bc));
if (verbose_tracing_enabled()) {
PrintF(trace_scope()->file(), " translating %s frame ",
is_baseline ? "baseline" : "interpreted");
std::unique_ptr<char[]> name = shared.DebugNameCStr();
PrintF(trace_scope()->file(), "%s", name.get());
PrintF(trace_scope()->file(), " => bytecode_offset=%d, ",
real_bytecode_offset);
PrintF(trace_scope()->file(), "variable_frame_size=%d, frame_size=%d%s\n",
frame_info.frame_size_in_bytes_without_fixed(), output_frame_size,
goto_catch_handler ? " (throw)" : "");
}
// The top address of the frame is computed from the previous frame's top and // The top address of the frame is computed from the previous frame's top and
// this frame's size. // this frame's size.
const intptr_t top_address = const intptr_t top_address =
...@@ -1182,6 +1145,9 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame, ...@@ -1182,6 +1145,9 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
frame_writer.PushRawValue(argc, "actual argument count\n"); frame_writer.PushRawValue(argc, "actual argument count\n");
// Set the bytecode array pointer. // Set the bytecode array pointer.
Object bytecode_array = shared.HasBreakInfo()
? shared.GetDebugInfo().DebugBytecodeArray()
: shared.GetBytecodeArray(isolate());
frame_writer.PushRawObject(bytecode_array, "bytecode array\n"); frame_writer.PushRawObject(bytecode_array, "bytecode array\n");
// The bytecode offset was mentioned explicitly in the BEGIN_FRAME. // The bytecode offset was mentioned explicitly in the BEGIN_FRAME.
...@@ -1271,16 +1237,26 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame, ...@@ -1271,16 +1237,26 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
CHECK_EQ(translated_frame->end(), value_iterator); CHECK_EQ(translated_frame->end(), value_iterator);
CHECK_EQ(0u, frame_writer.top_offset()); CHECK_EQ(0u, frame_writer.top_offset());
const intptr_t pc = // Compute this frame's PC and state. The PC will be a special builtin that
static_cast<intptr_t>(dispatch_builtin.InstructionStart()); // continues the bytecode dispatch. Note that non-topmost and lazy-style
// bailout handlers also advance the bytecode offset before dispatch, hence
// simulating what normal handlers do upon completion of the operation.
Builtins* builtins = isolate_->builtins();
Code dispatch_builtin =
(!is_topmost || (deopt_kind_ == DeoptimizeKind::kLazy)) &&
!goto_catch_handler
? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance)
: builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
if (is_topmost) { if (is_topmost) {
// Only the pc of the topmost frame needs to be signed since it is // Only the pc of the topmost frame needs to be signed since it is
// authenticated at the end of the DeoptimizationEntry builtin. // authenticated at the end of the DeoptimizationEntry builtin.
const intptr_t top_most_pc = PointerAuthentication::SignAndCheckPC( const intptr_t top_most_pc = PointerAuthentication::SignAndCheckPC(
pc, frame_writer.frame()->GetTop()); static_cast<intptr_t>(dispatch_builtin.InstructionStart()),
frame_writer.frame()->GetTop());
output_frame->SetPc(top_most_pc); output_frame->SetPc(top_most_pc);
} else { } else {
output_frame->SetPc(pc); output_frame->SetPc(
static_cast<intptr_t>(dispatch_builtin.InstructionStart()));
} }
// Update constant pool. // Update constant pool.
......
...@@ -225,13 +225,16 @@ namespace { ...@@ -225,13 +225,16 @@ namespace {
bool IsInterpreterFramePc(Isolate* isolate, Address pc, bool IsInterpreterFramePc(Isolate* isolate, Address pc,
StackFrame::State* state) { StackFrame::State* state) {
Builtins::Name builtin_index = InstructionStream::TryLookupCode(isolate, pc); Code interpreter_entry_trampoline =
if (builtin_index != Builtins::kNoBuiltinId && isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
(builtin_index == Builtins::kInterpreterEntryTrampoline || Code interpreter_bytecode_advance =
builtin_index == Builtins::kInterpreterEnterBytecodeAdvance || isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeAdvance);
builtin_index == Builtins::kInterpreterEnterBytecodeDispatch || Code interpreter_bytecode_dispatch =
builtin_index == Builtins::kBaselineEnterAtBytecode || isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
builtin_index == Builtins::kBaselineEnterAtNextBytecode)) {
if (interpreter_entry_trampoline.contains(isolate, pc) ||
interpreter_bytecode_advance.contains(isolate, pc) ||
interpreter_bytecode_dispatch.contains(isolate, pc)) {
return true; return true;
} else if (FLAG_interpreted_frames_native_stack) { } else if (FLAG_interpreted_frames_native_stack) {
intptr_t marker = Memory<intptr_t>( intptr_t marker = Memory<intptr_t>(
...@@ -248,7 +251,7 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc, ...@@ -248,7 +251,7 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc,
} else if (!isolate->heap()->InSpaceSlow(pc, CODE_SPACE)) { } else if (!isolate->heap()->InSpaceSlow(pc, CODE_SPACE)) {
return false; return false;
} }
Code interpreter_entry_trampoline = interpreter_entry_trampoline =
isolate->heap()->GcSafeFindCodeForInnerPointer(pc); isolate->heap()->GcSafeFindCodeForInnerPointer(pc);
return interpreter_entry_trampoline.is_interpreter_trampoline_builtin(); return interpreter_entry_trampoline.is_interpreter_trampoline_builtin();
} else { } else {
...@@ -592,10 +595,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator, ...@@ -592,10 +595,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
switch (code_obj.kind()) { switch (code_obj.kind()) {
case CodeKind::BUILTIN: case CodeKind::BUILTIN:
if (StackFrame::IsTypeMarker(marker)) break; if (StackFrame::IsTypeMarker(marker)) break;
if (code_obj.is_interpreter_trampoline_builtin() || if (code_obj.is_interpreter_trampoline_builtin()) {
// Frames for baseline entry trampolines on the stack are still
// interpreted frames.
code_obj.is_baseline_trampoline_builtin()) {
return INTERPRETED; return INTERPRETED;
} }
if (code_obj.is_baseline_leave_frame_builtin()) { if (code_obj.is_baseline_leave_frame_builtin()) {
...@@ -1838,7 +1838,7 @@ int BaselineFrame::GetBytecodeOffset() const { ...@@ -1838,7 +1838,7 @@ int BaselineFrame::GetBytecodeOffset() const {
} }
intptr_t BaselineFrame::GetPCForBytecodeOffset(int bytecode_offset) const { intptr_t BaselineFrame::GetPCForBytecodeOffset(int bytecode_offset) const {
return LookupCode().GetBaselineStartPCForBytecodeOffset(bytecode_offset, return LookupCode().GetBaselinePCForBytecodeOffset(bytecode_offset,
GetBytecodeArray()); GetBytecodeArray());
} }
......
...@@ -371,7 +371,7 @@ CodeKind Code::kind() const { ...@@ -371,7 +371,7 @@ CodeKind Code::kind() const {
int Code::GetBytecodeOffsetForBaselinePC(Address baseline_pc, int Code::GetBytecodeOffsetForBaselinePC(Address baseline_pc,
BytecodeArray bytecodes) { BytecodeArray bytecodes) {
DisallowGarbageCollection no_gc; DisallowGarbageCollection no_gc;
CHECK(!is_baseline_trampoline_builtin()); CHECK(!is_baseline_prologue_builtin());
if (is_baseline_leave_frame_builtin()) return kFunctionExitBytecodeOffset; if (is_baseline_leave_frame_builtin()) return kFunctionExitBytecodeOffset;
CHECK_EQ(kind(), CodeKind::BASELINE); CHECK_EQ(kind(), CodeKind::BASELINE);
baseline::BytecodeOffsetIterator offset_iterator( baseline::BytecodeOffsetIterator offset_iterator(
...@@ -382,33 +382,13 @@ int Code::GetBytecodeOffsetForBaselinePC(Address baseline_pc, ...@@ -382,33 +382,13 @@ int Code::GetBytecodeOffsetForBaselinePC(Address baseline_pc,
} }
uintptr_t Code::GetBaselinePCForBytecodeOffset(int bytecode_offset, uintptr_t Code::GetBaselinePCForBytecodeOffset(int bytecode_offset,
BytecodeToPCPosition position,
BytecodeArray bytecodes) { BytecodeArray bytecodes) {
DisallowGarbageCollection no_gc; DisallowGarbageCollection no_gc;
CHECK_EQ(kind(), CodeKind::BASELINE); CHECK_EQ(kind(), CodeKind::BASELINE);
baseline::BytecodeOffsetIterator offset_iterator( baseline::BytecodeOffsetIterator offset_iterator(
ByteArray::cast(bytecode_offset_table()), bytecodes); ByteArray::cast(bytecode_offset_table()), bytecodes);
offset_iterator.AdvanceToBytecodeOffset(bytecode_offset); offset_iterator.AdvanceToBytecodeOffset(bytecode_offset);
uintptr_t pc = 0; return offset_iterator.current_pc_start_offset();
if (position == kPcAtStartOfBytecode) {
pc = offset_iterator.current_pc_start_offset();
} else {
DCHECK_EQ(position, kPcAtEndOfBytecode);
pc = offset_iterator.current_pc_end_offset();
}
return pc;
}
uintptr_t Code::GetBaselineStartPCForBytecodeOffset(int bytecode_offset,
BytecodeArray bytecodes) {
return GetBaselinePCForBytecodeOffset(bytecode_offset, kPcAtStartOfBytecode,
bytecodes);
}
uintptr_t Code::GetBaselineEndPCForBytecodeOffset(int bytecode_offset,
BytecodeArray bytecodes) {
return GetBaselinePCForBytecodeOffset(bytecode_offset, kPcAtEndOfBytecode,
bytecodes);
} }
void Code::initialize_flags(CodeKind kind, bool is_turbofanned, int stack_slots, void Code::initialize_flags(CodeKind kind, bool is_turbofanned, int stack_slots,
...@@ -434,18 +414,14 @@ inline bool Code::is_interpreter_trampoline_builtin() const { ...@@ -434,18 +414,14 @@ inline bool Code::is_interpreter_trampoline_builtin() const {
index == Builtins::kInterpreterEnterBytecodeDispatch); index == Builtins::kInterpreterEnterBytecodeDispatch);
} }
inline bool Code::is_baseline_trampoline_builtin() const {
const int index = builtin_index();
return index != Builtins::kNoBuiltinId &&
(index == Builtins::kBaselineOutOfLinePrologue ||
index == Builtins::kBaselineEnterAtBytecode ||
index == Builtins::kBaselineEnterAtNextBytecode);
}
inline bool Code::is_baseline_leave_frame_builtin() const { inline bool Code::is_baseline_leave_frame_builtin() const {
return builtin_index() == Builtins::kBaselineLeaveFrame; return builtin_index() == Builtins::kBaselineLeaveFrame;
} }
inline bool Code::is_baseline_prologue_builtin() const {
return builtin_index() == Builtins::kBaselineOutOfLinePrologue;
}
inline bool Code::checks_optimization_marker() const { inline bool Code::checks_optimization_marker() const {
bool checks_marker = bool checks_marker =
(builtin_index() == Builtins::kCompileLazy || (builtin_index() == Builtins::kCompileLazy ||
......
...@@ -277,7 +277,7 @@ class Code : public HeapObject { ...@@ -277,7 +277,7 @@ class Code : public HeapObject {
inline bool is_interpreter_trampoline_builtin() const; inline bool is_interpreter_trampoline_builtin() const;
// Testers for baseline builtins. // Testers for baseline builtins.
inline bool is_baseline_trampoline_builtin() const; inline bool is_baseline_prologue_builtin() const;
inline bool is_baseline_leave_frame_builtin() const; inline bool is_baseline_leave_frame_builtin() const;
// Tells whether the code checks the optimization marker in the function's // Tells whether the code checks the optimization marker in the function's
...@@ -406,12 +406,8 @@ class Code : public HeapObject { ...@@ -406,12 +406,8 @@ class Code : public HeapObject {
static inline void CopyRelocInfoToByteArray(ByteArray dest, static inline void CopyRelocInfoToByteArray(ByteArray dest,
const CodeDesc& desc); const CodeDesc& desc);
inline uintptr_t GetBaselineStartPCForBytecodeOffset(int bytecode_offset, inline uintptr_t GetBaselinePCForBytecodeOffset(int bytecode_offset,
BytecodeArray bytecodes);
inline uintptr_t GetBaselineEndPCForBytecodeOffset(int bytecode_offset,
BytecodeArray bytecodes); BytecodeArray bytecodes);
inline int GetBytecodeOffsetForBaselinePC(Address baseline_pc, inline int GetBytecodeOffsetForBaselinePC(Address baseline_pc,
BytecodeArray bytecodes); BytecodeArray bytecodes);
...@@ -558,17 +554,6 @@ class Code : public HeapObject { ...@@ -558,17 +554,6 @@ class Code : public HeapObject {
bool is_promise_rejection() const; bool is_promise_rejection() const;
bool is_exception_caught() const; bool is_exception_caught() const;
enum BytecodeToPCPosition {
kPcAtStartOfBytecode,
// End of bytecode equals the start of the next bytecode.
// We need it when we deoptimize to the next bytecode (lazy deopt or deopt
// of non-topmost frame).
kPcAtEndOfBytecode
};
inline uintptr_t GetBaselinePCForBytecodeOffset(int bytecode_offset,
BytecodeToPCPosition position,
BytecodeArray bytecodes);
OBJECT_CONSTRUCTORS(Code, HeapObject); OBJECT_CONSTRUCTORS(Code, HeapObject);
}; };
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
let arr = [];
for (var i = 0; i < 1000000; i++) {
arr[i] = [];
}
assertEquals(1000000, i);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment