Commit 47acbf34 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Stay within reserved stack space for register moves

When executing register moves, we might need to spill registers to the
stack. Ensure that we don't exceed the reserved stack space for the
current frame.

R=ahaas@chromium.org

Bug: v8:7366, v8:6600
Change-Id: Ic11ff2ff5f46535c3663ef4cf62b095f6c8ba637
Reviewed-on: https://chromium-review.googlesource.com/883282
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50847}
parent c53f9f97
...@@ -186,7 +186,8 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) { ...@@ -186,7 +186,8 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig, void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc, compiler::CallDescriptor* call_desc,
Register target) { Register target,
uint32_t* max_used_spill_slot) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
......
...@@ -186,7 +186,8 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) { ...@@ -186,7 +186,8 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig, void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc, compiler::CallDescriptor* call_desc,
Register target) { Register target,
uint32_t* max_used_spill_slot) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
......
...@@ -604,8 +604,9 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) { ...@@ -604,8 +604,9 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig, void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc, compiler::CallDescriptor* call_desc,
Register target) { Register target,
PrepareCall(sig, call_desc, &target); uint32_t* max_used_spill_slot) {
PrepareCall(sig, call_desc, max_used_spill_slot, &target);
if (target == no_reg) { if (target == no_reg) {
add(esp, Immediate(kPointerSize)); add(esp, Immediate(kPointerSize));
call(Operand(esp, -4)); call(Operand(esp, -4));
......
...@@ -99,6 +99,9 @@ class StackTransferRecipe { ...@@ -99,6 +99,9 @@ class StackTransferRecipe {
LoadStackSlot(register_moves_.back().dst, next_spill_slot, rm.type); LoadStackSlot(register_moves_.back().dst, next_spill_slot, rm.type);
DCHECK_EQ(1, src_reg_use_count[spill_reg.liftoff_code()]); DCHECK_EQ(1, src_reg_use_count[spill_reg.liftoff_code()]);
src_reg_use_count[spill_reg.liftoff_code()] = 0; src_reg_use_count[spill_reg.liftoff_code()] = 0;
if (next_spill_slot > max_used_spill_slot_) {
max_used_spill_slot_ = next_spill_slot;
}
++next_spill_slot; ++next_spill_slot;
executed_moves = 1; executed_moves = 1;
} }
...@@ -182,6 +185,8 @@ class StackTransferRecipe { ...@@ -182,6 +185,8 @@ class StackTransferRecipe {
register_loads_.emplace_back(dst, stack_index, type); register_loads_.emplace_back(dst, stack_index, type);
} }
uint32_t max_used_spill_slot() const { return max_used_spill_slot_; }
private: private:
// TODO(clemensh): Avoid unconditionally allocating on the heap. // TODO(clemensh): Avoid unconditionally allocating on the heap.
std::vector<RegisterMove> register_moves_; std::vector<RegisterMove> register_moves_;
...@@ -189,6 +194,7 @@ class StackTransferRecipe { ...@@ -189,6 +194,7 @@ class StackTransferRecipe {
LiftoffRegList move_dst_regs_; LiftoffRegList move_dst_regs_;
LiftoffRegList move_src_regs_; LiftoffRegList move_src_regs_;
LiftoffAssembler* const asm_; LiftoffAssembler* const asm_;
uint32_t max_used_spill_slot_ = 0;
}; };
static constexpr ValueType kWasmIntPtr = static constexpr ValueType kWasmIntPtr =
...@@ -391,6 +397,7 @@ void LiftoffAssembler::SpillAllRegisters() { ...@@ -391,6 +397,7 @@ void LiftoffAssembler::SpillAllRegisters() {
void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig, void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc, compiler::CallDescriptor* call_desc,
uint32_t* max_used_spill_slot,
Register* target) { Register* target) {
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count()); uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
// Parameter 0 is the wasm context. // Parameter 0 is the wasm context.
...@@ -462,6 +469,10 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig, ...@@ -462,6 +469,10 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
// Execute the stack transfers before filling the context register. // Execute the stack transfers before filling the context register.
stack_transfers.Execute(); stack_transfers.Execute();
// Record the maximum used stack slot index, such that we can bail out if the
// stack grew too large.
*max_used_spill_slot = stack_transfers.max_used_spill_slot();
// Reset register use counters. // Reset register use counters.
cache_state_.reset_used_registers(); cache_state_.reset_used_registers();
......
...@@ -271,8 +271,10 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -271,8 +271,10 @@ class LiftoffAssembler : public TurboAssembler {
// Load parameters into the right registers / stack slots for the call. // Load parameters into the right registers / stack slots for the call.
// Move {*target} into another register if needed and update {*target} to that // Move {*target} into another register if needed and update {*target} to that
// register, or {no_reg} if target was spilled to the stack. // register, or {no_reg} if target was spilled to the stack.
// TODO(clemensh): Remove {max_used_spill_slot} once we support arbitrary
// stack sizes.
void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*, void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*,
Register* target = nullptr); uint32_t* max_used_spill_slot, Register* target = nullptr);
// Process return values of the call. // Process return values of the call.
void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*); void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*);
...@@ -365,9 +367,11 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -365,9 +367,11 @@ class LiftoffAssembler : public TurboAssembler {
inline void CallNativeWasmCode(Address addr); inline void CallNativeWasmCode(Address addr);
inline void CallRuntime(Zone* zone, Runtime::FunctionId fid); inline void CallRuntime(Zone* zone, Runtime::FunctionId fid);
// TODO(clemensh): Remove {max_used_spill_slot} once we support arbitrary
// stack sizes.
inline void CallIndirect(wasm::FunctionSig* sig, inline void CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc, compiler::CallDescriptor* call_desc, Register target,
Register target); uint32_t* max_used_spill_slot);
// Reserve space in the current frame, store address to space in {addr}. // Reserve space in the current frame, store address to space in {addr}.
inline void AllocateStackSlot(Register addr, uint32_t size); inline void AllocateStackSlot(Register addr, uint32_t size);
......
...@@ -963,7 +963,12 @@ class LiftoffCompiler { ...@@ -963,7 +963,12 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_desc = compiler::CallDescriptor* call_desc =
compiler::GetWasmCallDescriptor(compilation_zone_, operand.sig); compiler::GetWasmCallDescriptor(compilation_zone_, operand.sig);
__ PrepareCall(operand.sig, call_desc); uint32_t max_used_spill_slot = 0;
__ PrepareCall(operand.sig, call_desc, &max_used_spill_slot);
if (max_used_spill_slot >
__ num_locals() + LiftoffAssembler::kMaxValueStackHeight) {
unsupported(decoder, "value stack grows too large in call");
}
source_position_table_builder_->AddPosition( source_position_table_builder_->AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false); __ pc_offset(), SourcePosition(decoder->position()), false);
...@@ -1076,7 +1081,12 @@ class LiftoffCompiler { ...@@ -1076,7 +1081,12 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_desc = compiler::CallDescriptor* call_desc =
compiler::GetWasmCallDescriptor(compilation_zone_, operand.sig); compiler::GetWasmCallDescriptor(compilation_zone_, operand.sig);
__ CallIndirect(operand.sig, call_desc, scratch.gp()); uint32_t max_used_spill_slot = 0;
__ CallIndirect(operand.sig, call_desc, scratch.gp(), &max_used_spill_slot);
if (max_used_spill_slot >
__ num_locals() + LiftoffAssembler::kMaxValueStackHeight) {
unsupported(decoder, "value stack grows too large in indirect call");
}
safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0, safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt); Safepoint::kNoLazyDeopt);
......
...@@ -186,7 +186,8 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) { ...@@ -186,7 +186,8 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig, void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc, compiler::CallDescriptor* call_desc,
Register target) { Register target,
uint32_t* max_used_spill_slot) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
......
...@@ -186,7 +186,8 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) { ...@@ -186,7 +186,8 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig, void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc, compiler::CallDescriptor* call_desc,
Register target) { Register target,
uint32_t* max_used_spill_slot) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
......
...@@ -186,7 +186,8 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) { ...@@ -186,7 +186,8 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig, void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc, compiler::CallDescriptor* call_desc,
Register target) { Register target,
uint32_t* max_used_spill_slot) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
......
...@@ -186,7 +186,8 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) { ...@@ -186,7 +186,8 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig, void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc, compiler::CallDescriptor* call_desc,
Register target) { Register target,
uint32_t* max_used_spill_slot) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
......
...@@ -598,8 +598,9 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) { ...@@ -598,8 +598,9 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig, void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_desc, compiler::CallDescriptor* call_desc,
Register target) { Register target,
PrepareCall(sig, call_desc, &target); uint32_t* max_used_spill_slot) {
PrepareCall(sig, call_desc, max_used_spill_slot, &target);
if (target == no_reg) { if (target == no_reg) {
popq(kScratchRegister); popq(kScratchRegister);
target = kScratchRegister; target = kScratchRegister;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment