Commit be3c0126 authored by Leszek Swirski's avatar Leszek Swirski Committed by Commit Bot

[sparkplug] Include calls in stack guard

Calculate the maximum call size in the bytecode pre-visit, and pass that
(along with the bytecode's frame size) to the prologue to be included in
the stack check. This avoids doing a stack check before each call, and
mirrors a similar optimisation in TurboFan.

Also, use StackGuardWithGap instead of StackGuard, to make sure that
stack overflows in the prologue actually trigger stack overflows in the
runtime.

Bug: v8:11420
Fixed: chromium:1189890
Change-Id: I795c197c20f85611318ab09c7bca78ce40b64924
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2778278
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73600}
parent 07db5a65
......@@ -15,14 +15,13 @@ namespace baseline {
#define __ basm_.
void BaselineCompiler::Prologue() {
__ masm()->mov(kInterpreterBytecodeArrayRegister, Operand(bytecode_));
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
// Enter the frame here, since CallBuiltin will override lr.
__ masm()->EnterFrame(StackFrame::BASELINE);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size = bytecode_->frame_size() + max_call_args_;
CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
kJSFunctionRegister, kJavaScriptCallArgCountRegister,
kInterpreterBytecodeArrayRegister,
kJavaScriptCallNewTargetRegister);
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
PrologueFillFrame();
}
......
......@@ -14,14 +14,13 @@ namespace baseline {
#define __ basm_.
void BaselineCompiler::Prologue() {
__ masm()->Mov(kInterpreterBytecodeArrayRegister, Operand(bytecode_));
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
// Enter the frame here, since CallBuiltin will override lr.
__ masm()->EnterFrame(StackFrame::BASELINE);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size = bytecode_->frame_size() + max_call_args_;
CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
kJSFunctionRegister, kJavaScriptCallArgCountRegister,
kInterpreterBytecodeArrayRegister,
kJavaScriptCallNewTargetRegister);
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
__ masm()->AssertSpAligned();
PrologueFillFrame();
......
......@@ -391,8 +391,36 @@ void BaselineCompiler::AddPosition() {
}
void BaselineCompiler::PreVisitSingleBytecode() {
if (iterator().current_bytecode() == interpreter::Bytecode::kJumpLoop) {
EnsureLabels(iterator().GetJumpTargetOffset());
switch (iterator().current_bytecode()) {
case interpreter::Bytecode::kJumpLoop:
EnsureLabels(iterator().GetJumpTargetOffset());
break;
// TODO(leszeks): Update the max_call_args as part of the main bytecode
// visit loop, by patching the value passed to the prologue.
case interpreter::Bytecode::kCallProperty:
case interpreter::Bytecode::kCallAnyReceiver:
case interpreter::Bytecode::kCallWithSpread:
case interpreter::Bytecode::kCallNoFeedback:
case interpreter::Bytecode::kConstruct:
case interpreter::Bytecode::kConstructWithSpread:
return UpdateMaxCallArgs(
iterator().GetRegisterListOperand(1).register_count());
case interpreter::Bytecode::kCallUndefinedReceiver:
return UpdateMaxCallArgs(
iterator().GetRegisterListOperand(1).register_count() + 1);
case interpreter::Bytecode::kCallProperty0:
case interpreter::Bytecode::kCallUndefinedReceiver0:
return UpdateMaxCallArgs(1);
case interpreter::Bytecode::kCallProperty1:
case interpreter::Bytecode::kCallUndefinedReceiver1:
return UpdateMaxCallArgs(2);
case interpreter::Bytecode::kCallProperty2:
case interpreter::Bytecode::kCallUndefinedReceiver2:
return UpdateMaxCallArgs(3);
default:
break;
}
}
......
......@@ -111,6 +111,10 @@ class BaselineCompiler {
// Misc. helpers.
void UpdateMaxCallArgs(int max_call_args) {
max_call_args_ = std::max(max_call_args_, max_call_args);
}
// Select the root boolean constant based on the jump in the given
// `jump_func` -- the function should jump to the given label if we want to
// select "true", otherwise it should fall through.
......@@ -170,6 +174,8 @@ class BaselineCompiler {
BytecodeOffsetTableBuilder bytecode_offset_table_builder_;
Zone zone_;
int max_call_args_ = 0;
struct ThreadedLabel {
Label label;
ThreadedLabel* ptr;
......
......@@ -16,11 +16,11 @@ namespace baseline {
#define __ basm_.
void BaselineCompiler::Prologue() {
__ Move(ecx, bytecode_);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size = bytecode_->frame_size() + max_call_args_;
CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
kJSFunctionRegister, kJavaScriptCallArgCountRegister, ecx,
kJavaScriptCallNewTargetRegister);
kJSFunctionRegister, kJavaScriptCallArgCountRegister,
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
PrologueFillFrame();
}
......
......@@ -16,12 +16,11 @@ namespace baseline {
#define __ basm_.
void BaselineCompiler::Prologue() {
__ Move(kInterpreterBytecodeArrayRegister, bytecode_);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size = bytecode_->frame_size() + max_call_args_;
CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
kJSFunctionRegister, kJavaScriptCallArgCountRegister,
kInterpreterBytecodeArrayRegister,
kJavaScriptCallNewTargetRegister);
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
PrologueFillFrame();
}
......
......@@ -1136,6 +1136,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ RecordComment("[ Stack/interrupt check");
Label call_stack_guard;
Register frame_size = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
{
// Stack check. This folds the checks for both the interrupt stack limit
// check and the real stack limit into one by just checking for the
......@@ -1144,10 +1146,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// building the frame we can quickly precheck both at once.
UseScratchRegisterScope temps(masm);
Register frame_size = temps.Acquire();
__ ldr(frame_size,
FieldMemOperand(bytecodeArray, BytecodeArray::kFrameSizeOffset));
Register sp_minus_frame_size = frame_size;
Register sp_minus_frame_size = temps.Acquire();
__ sub(sp_minus_frame_size, sp, frame_size);
Register interrupt_limit = temps.Acquire();
__ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
......@@ -1180,7 +1179,9 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ RecordComment("[ Stack/interrupt call");
// Save incoming new target or generator
__ Push(kJavaScriptCallNewTargetRegister);
__ CallRuntime(Runtime::kStackGuard);
__ SmiTag(frame_size);
__ Push(frame_size);
__ CallRuntime(Runtime::kStackGuardWithGap);
__ Pop(kJavaScriptCallNewTargetRegister);
__ RecordComment("]");
}
......
......@@ -1304,6 +1304,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ RecordComment("[ Stack/interrupt check");
Label call_stack_guard;
Register frame_size = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
{
// Stack check. This folds the checks for both the interrupt stack limit
// check and the real stack limit into one by just checking for the
......@@ -1312,11 +1314,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// building the frame we can quickly precheck both at once.
UseScratchRegisterScope temps(masm);
Register frame_size = temps.AcquireW();
__ Ldr(frame_size,
FieldMemOperand(bytecodeArray, BytecodeArray::kFrameSizeOffset));
Register sp_minus_frame_size = frame_size.X();
__ Sub(sp_minus_frame_size, sp, frame_size.X());
Register sp_minus_frame_size = temps.AcquireX();
__ Sub(sp_minus_frame_size, sp, frame_size);
Register interrupt_limit = temps.AcquireX();
__ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
__ Cmp(sp_minus_frame_size, interrupt_limit);
......@@ -1348,7 +1347,9 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ RecordComment("[ Stack/interrupt call");
// Save incoming new target or generator
__ Push(padreg, new_target);
__ CallRuntime(Runtime::kStackGuard);
__ SmiTag(frame_size);
__ PushArgument(frame_size);
__ CallRuntime(Runtime::kStackGuardWithGap);
__ Pop(new_target, padreg);
__ RecordComment("]");
}
......
......@@ -1644,16 +1644,19 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
Builtins::kBaselineOutOfLinePrologue);
Register arg_count = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
Register frame_size = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
// Save argument count and bytecode array.
XMMRegister saved_arg_count = xmm0;
XMMRegister saved_bytecode_array = xmm1;
XMMRegister saved_frame_size = xmm2;
XMMRegister saved_feedback_vector = xmm3;
__ movd(saved_arg_count, arg_count);
__ movd(saved_bytecode_array, bytecode_array);
__ movd(saved_frame_size, frame_size);
Register scratch = eax;
// Use the arg count (eax) as the scratch register.
Register scratch = arg_count;
// Load the feedback vector from the closure.
Register feedback_vector = ecx;
......@@ -1671,7 +1674,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// register.
Label has_optimized_code_or_marker;
Register optimization_state = ecx;
XMMRegister saved_feedback_vector = xmm2;
LoadOptimizationStateAndJumpIfNeedsProcessing(masm, optimization_state,
saved_feedback_vector,
&has_optimized_code_or_marker);
......@@ -1685,6 +1687,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// Save the return address, so that we can push it to the end of the newly
// set-up frame once we're done setting it up.
__ PopReturnAddressTo(return_address, scratch);
// The bytecode array was pushed to the stack by the caller.
__ Pop(saved_bytecode_array, scratch);
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::BASELINE);
......@@ -1700,6 +1704,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
// the frame, so load it into a register.
Register bytecode_array = scratch;
__ movd(bytecode_array, saved_bytecode_array);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// are 8-bit fields next to each other, so we could just optimize by writing
......@@ -1727,10 +1732,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
//
// TODO(v8:11429): Backport this folded check to the
// InterpreterEntryTrampoline.
Register frame_size = ecx;
__ movd(bytecode_array, saved_bytecode_array);
__ movzx_w(frame_size,
FieldOperand(bytecode_array, BytecodeArray::kFrameSizeOffset));
__ movd(frame_size, saved_frame_size);
__ Move(scratch, esp);
DCHECK_NE(frame_size, kJavaScriptCallNewTargetRegister);
__ sub(scratch, frame_size);
......@@ -1747,11 +1749,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ bind(&has_optimized_code_or_marker);
{
__ RecordComment("[ Optimized marker check");
// Drop the return address, rebalancing the return stack buffer by using
// JumpMode::kPushAndReturn. We can't leave the slot and overwrite it on
// return since we may do a runtime call along the way that requires the
// stack to only contain valid frames.
__ Drop(1);
// Drop the return address and bytecode array, rebalancing the return stack
// buffer by using JumpMode::kPushAndReturn. We can't leave the slot and
// overwrite it on return since we may do a runtime call along the way that
// requires the stack to only contain valid frames.
__ Drop(2);
__ movd(arg_count, saved_arg_count); // Restore actual argument count.
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
saved_feedback_vector);
......@@ -1769,7 +1771,9 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::INTERNAL);
// Save incoming new target or generator
__ Push(kJavaScriptCallNewTargetRegister);
__ CallRuntime(Runtime::kStackGuard, 0);
__ SmiTag(frame_size);
__ Push(frame_size);
__ CallRuntime(Runtime::kStackGuardWithGap, 1);
__ Pop(kJavaScriptCallNewTargetRegister);
}
......
......@@ -1622,7 +1622,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
Register closure = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure.
Register feedback_vector = rbx;
Register feedback_vector = r11;
__ LoadTaggedPointerField(
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector,
......@@ -1690,6 +1690,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ RecordComment("[ Stack/interrupt check");
Label call_stack_guard;
Register frame_size = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
{
// Stack check. This folds the checks for both the interrupt stack limit
// check and the real stack limit into one by just checking for the
......@@ -1699,9 +1701,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
//
// TODO(v8:11429): Backport this folded check to the
// InterpreterEntryTrampoline.
Register frame_size = r11;
__ movzxwl(frame_size,
FieldOperand(bytecode_array, BytecodeArray::kFrameSizeOffset));
__ Move(kScratchRegister, rsp);
DCHECK_NE(frame_size, new_target);
__ subq(kScratchRegister, frame_size);
......@@ -1740,7 +1739,9 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::INTERNAL);
// Save incoming new target or generator
__ Push(new_target);
__ CallRuntime(Runtime::kStackGuard, 0);
__ SmiTag(frame_size);
__ Push(frame_size);
__ CallRuntime(Runtime::kStackGuardWithGap, 1);
__ Pop(new_target);
}
......
......@@ -333,19 +333,17 @@ void StoreTransitionDescriptor::InitializePlatformSpecific(
void BaselineOutOfLinePrologueDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {kContextRegister,
kJSFunctionRegister,
kJavaScriptCallArgCountRegister,
kJavaScriptCallExtraArg1Register,
kJavaScriptCallNewTargetRegister,
kInterpreterBytecodeArrayRegister};
// TODO(v8:11421): Implement on other platforms.
#if V8_TARGET_ARCH_IA32
// TODO(v8:11503): Use register names that can be defined in each
// architecture indenpendently of the interpreter registers.
Register registers[] = {kContextRegister, kJSFunctionRegister,
kJavaScriptCallArgCountRegister, ecx,
kJavaScriptCallNewTargetRegister};
data->InitializePlatformSpecific(kParameterCount, registers);
#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
Register registers[] = {
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
kInterpreterBytecodeArrayRegister, kJavaScriptCallNewTargetRegister};
data->InitializePlatformSpecific(kParameterCount, registers);
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32 || \
V8_TARGET_ARCH_ARM
data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount,
registers);
#else
InitializePlatformUnimplemented(data, kParameterCount);
#endif
......
......@@ -1466,16 +1466,26 @@ class V8_EXPORT_PRIVATE TailCallOptimizedCodeSlotDescriptor
class BaselineOutOfLinePrologueDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kCalleeContext, kClosure,
kJavaScriptCallArgCount,
kInterpreterBytecodeArray,
kJavaScriptCallNewTarget)
kJavaScriptCallArgCount, kStackFrameSize,
kJavaScriptCallNewTarget,
kInterpreterBytecodeArray)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kCalleeContext
MachineType::AnyTagged(), // kClosure
MachineType::Int32(), // kJavaScriptCallArgCount
MachineType::AnyTagged(), // kInterpreterBytecodeArray
MachineType::AnyTagged()) // kJavaScriptCallNewTarget
MachineType::Int32(), // kStackFrameSize
MachineType::AnyTagged(), // kJavaScriptCallNewTarget
MachineType::AnyTagged()) // kInterpreterBytecodeArray
DECLARE_DESCRIPTOR(BaselineOutOfLinePrologueDescriptor,
CallInterfaceDescriptor)
#if V8_TARGET_ARCH_IA32
static const bool kPassLastArgsOnStack = true;
#else
static const bool kPassLastArgsOnStack = false;
#endif
// Pass bytecode array through the stack.
static const int kStackArgumentsCount = kPassLastArgsOnStack ? 1 : 0;
};
class BaselineLeaveFrameDescriptor : public CallInterfaceDescriptor {
......
......@@ -1438,8 +1438,8 @@
}], # no_simd_sse == True
##############################################################################
# TODO(v8:11421): Port baseline compiler to ia32, Arm, MIPS, S390 and PPC
['arch not in (x64, arm64)', {
# TODO(v8:11421): Port baseline compiler to other architectures.
['arch not in (x64, arm64, ia32, arm)', {
'baseline/*': [SKIP],
}],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment