Commit bbeccc35 authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

[liftoff][x64] Add explicit stack check for large frames

Add an explicit check for the available stack space before allocating a
large frame. Even though this typically does not cause problems on x64,
we should do it to be consistent with other platforms and with TurboFan
code.

After also fixing ia32 (https://crrev.com/c/3059075), we can add a
DCHECK to verify that we never overflow the stack space by more than
4KB (https://crrev.com/c/3059076).

R=ahaas@chromium.org

Bug: v8:12017
Change-Id: I4f407dc6a83d4a71636066777706f23d05002111
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3059074Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75990}
parent 43580d82
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include "src/heap/memory-chunk.h" #include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h" #include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/simd-shuffle.h" #include "src/wasm/simd-shuffle.h"
#include "src/wasm/wasm-objects.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -206,45 +207,78 @@ void LiftoffAssembler::AlignFrameSize() { ...@@ -206,45 +207,78 @@ void LiftoffAssembler::AlignFrameSize() {
max_used_spill_offset_ = RoundUp(max_used_spill_offset_, kSystemPointerSize); max_used_spill_offset_ = RoundUp(max_used_spill_offset_, kSystemPointerSize);
} }
void LiftoffAssembler::PatchPrepareStackFrame(int offset, void LiftoffAssembler::PatchPrepareStackFrame(
SafepointTableBuilder*) { int offset, SafepointTableBuilder* safepoint_table_builder) {
// The frame_size includes the frame marker and the instance slot. Both are // The frame_size includes the frame marker and the instance slot. Both are
// pushed as part of frame construction, so we don't need to allocate memory // pushed as part of frame construction, so we don't need to allocate memory
// for them anymore. // for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize; int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
DCHECK_EQ(0, frame_size % kSystemPointerSize);
// Need to align sp to system pointer size. // We can't run out of space when patching, just pass anything big enough to
DCHECK_EQ(frame_size, RoundUp(frame_size, kSystemPointerSize)); // not cause the assembler to try to grow the buffer.
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 64; constexpr int kAvailableSpace = 64;
Assembler patching_assembler( Assembler patching_assembler(
AssemblerOptions{}, AssemblerOptions{},
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
#if V8_TARGET_OS_WIN
if (frame_size > kStackPageSize) { if (V8_LIKELY(frame_size < 4 * KB)) {
// Generate OOL code (at the end of the function, where the current // This is the standard case for small frames: just subtract from SP and be
// assembler is pointing) to do the explicit stack limit check (see // done with it.
// https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-6.0/aa227153(v=vs.60)). patching_assembler.sub_sp_32(frame_size);
// At the function start, emit a jump to that OOL code (from {offset} to DCHECK_EQ(liftoff::kSubSpSize, patching_assembler.pc_offset());
// {pc_offset()}).
int ool_offset = pc_offset() - offset;
patching_assembler.jmp_rel(ool_offset);
DCHECK_GE(liftoff::kSubSpSize, patching_assembler.pc_offset());
patching_assembler.Nop(liftoff::kSubSpSize -
patching_assembler.pc_offset());
// Now generate the OOL code.
AllocateStackSpace(frame_size);
// Jump back to the start of the function (from {pc_offset()} to {offset +
// kSubSpSize}).
int func_start_offset = offset + liftoff::kSubSpSize - pc_offset();
jmp_rel(func_start_offset);
return; return;
} }
#endif
patching_assembler.sub_sp_32(frame_size); // The frame size is bigger than 4KB, so we might overflow the available stack
DCHECK_EQ(liftoff::kSubSpSize, patching_assembler.pc_offset()); // space if we first allocate the frame and then do the stack check (we will
// need some remaining stack space for throwing the exception). That's why we
// check the available stack space before we allocate the frame. To do this we
// replace the {__ sub(sp, framesize)} with a jump to OOL code that does this
// "extended stack check".
//
// The OOL code can simply be generated here with the normal assembler,
// because all other code generation, including OOL code, has already finished
// when {PatchPrepareStackFrame} is called. The function prologue then jumps
// to the current {pc_offset()} to execute the OOL code for allocating the
// large frame.
// Emit the unconditional branch in the function prologue (from {offset} to
// {pc_offset()}).
patching_assembler.jmp_rel(pc_offset() - offset);
DCHECK_GE(liftoff::kSubSpSize, patching_assembler.pc_offset());
patching_assembler.Nop(liftoff::kSubSpSize - patching_assembler.pc_offset());
// If the frame is bigger than the stack, we throw the stack overflow
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
Label continuation;
if (frame_size < FLAG_stack_size * 1024) {
movq(kScratchRegister,
FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
movq(kScratchRegister, Operand(kScratchRegister, 0));
addq(kScratchRegister, Immediate(frame_size));
cmpq(rsp, kScratchRegister);
j(above_equal, &continuation, Label::kNear);
}
near_call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// The call will not return; just define an empty safepoint.
safepoint_table_builder->DefineSafepoint(this);
AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
bind(&continuation);
// Now allocate the stack space. Note that this might do more than just
// decrementing the SP; consult {TurboAssembler::AllocateStackSpace}.
AllocateStackSpace(frame_size);
// Jump back to the start of the function, from {pc_offset()} to
// right after the reserved space for the {__ sub(sp, sp, framesize)} (which
// is a branch now).
int func_start_offset = offset + liftoff::kSubSpSize;
jmp_rel(func_start_offset - pc_offset());
} }
void LiftoffAssembler::FinishCode() {} void LiftoffAssembler::FinishCode() {}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment