Commit 7071504d authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Handle statically-known memory OOB better

If memory is statically known to be out of bounds, do not generate code
for the load or store, and also mark the rest of the current block
unreachable to avoid unnecessary code generation.
This also prevents us from having to special-case illegal memory
offsets in the LiftoffAssembler. For valid code, the offset will always
be smaller than 2GB.

R=ahaas@chromium.org

Bug: v8:6600
Change-Id: Ib5a9006780098e9f2ab9eda4bac7939f15612ae0
Reviewed-on: https://chromium-review.googlesource.com/939821Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51623}
parent 567dcad1
......@@ -114,23 +114,13 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
DCHECK_EQ(type.value_type() == kWasmI64, dst.is_pair());
Register src = no_reg;
// Wasm memory is limited to a size <2GB, so all offsets can be encoded as
// immediate value (in 31 bits, interpreted as signed value).
// If the offset is bigger, we always trap and this code is not reached.
DCHECK(is_uint31(offset_imm));
Operand src_op = offset_reg == no_reg
? Operand(src_addr, offset_imm)
: Operand(src_addr, offset_reg, times_1, offset_imm);
// max_offset can overflow, but then is_uint31(offset_imm) is false and
// max_offset will not be used.
uint32_t max_offset = offset_imm + 4 * (type.value() == LoadType::kI64Load);
if (is_uint31(offset_imm) && is_uint31(max_offset)) {
// The immediate(s) can not be encoded in the operand. Load the offset to a
// register first.
src = GetUnusedRegister(kGpReg, pinned).gp();
mov(src, Immediate(offset_imm));
if (offset_reg != no_reg) {
emit_ptrsize_add(src, src, offset_reg);
}
src_op = Operand(src_addr, src, times_1, 0);
}
if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) {
......@@ -178,13 +168,11 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break;
case LoadType::kI64Load: {
// Compute the operand for the load of the upper half.
DCHECK(is_uint31(offset_imm + 4));
Operand upper_src_op =
offset_reg == no_reg
? Operand(src_addr, offset_imm + 4)
: Operand(src_addr, offset_reg, times_1, offset_imm + 4);
if (src != no_reg) {
upper_src_op = Operand(src_addr, src, times_1, 4);
}
// The high word has to be mov'ed first, such that this is the protected
// instruction. The mov of the low word cannot segfault.
mov(dst.high_gp(), upper_src_op);
......@@ -207,23 +195,13 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
DCHECK_EQ(type.value_type() == kWasmI64, src.is_pair());
Register dst = no_reg;
// Wasm memory is limited to a size <2GB, so all offsets can be encoded as
// immediate value (in 31 bits, interpreted as signed value).
// If the offset is bigger, we always trap and this code is not reached.
DCHECK(is_uint31(offset_imm));
Operand dst_op = offset_reg == no_reg
? Operand(dst_addr, offset_imm)
: Operand(dst_addr, offset_reg, times_1, offset_imm);
// max_offset can overflow, but then is_uint31(offset_imm) is false and
// max_offset will not be used.
uint32_t max_offset = offset_imm + 4 * (type.value() == StoreType::kI64Store);
if (is_uint31(offset_imm) && is_uint31(max_offset)) {
// The immediate(s) can not be encoded in the operand. Load the offset to a
// register first.
dst = pinned.set(GetUnusedRegister(kGpReg, pinned).gp());
mov(dst, Immediate(offset_imm));
if (offset_reg != no_reg) {
emit_ptrsize_add(dst, dst, offset_reg);
}
dst_op = Operand(dst_addr, dst, times_1, 0);
}
if (protected_store_pc) *protected_store_pc = pc_offset();
switch (type.value()) {
......@@ -254,13 +232,11 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
break;
case StoreType::kI64Store: {
// Compute the operand for the store of the upper half.
DCHECK(is_uint31(offset_imm + 4));
Operand upper_dst_op =
offset_reg == no_reg
? Operand(dst_addr, offset_imm + 4)
: Operand(dst_addr, offset_reg, times_1, offset_imm + 4);
if (dst != no_reg) {
upper_dst_op = Operand(dst_addr, dst, times_1, 4);
}
// The high word has to be mov'ed first, such that this is the protected
// instruction. The mov of the low word cannot segfault.
mov(upper_dst_op, src.high_gp());
......
......@@ -949,19 +949,33 @@ class LiftoffCompiler {
return out_of_line_code_.back().label.get();
}
void BoundsCheckMem(uint32_t access_size, uint32_t offset, Register index,
wasm::WasmCodePosition position, LiftoffRegList pinned) {
DCHECK(!env_->use_trap_handler);
if (FLAG_wasm_no_bounds_checks) return;
// Returns true if the memory access is statically known to be out of bounds
// (a jump to the trap was generated then); return false otherwise.
bool BoundsCheckMem(Decoder* decoder, uint32_t access_size, uint32_t offset,
Register index, LiftoffRegList pinned) {
const bool statically_oob =
access_size > max_size_ || offset > max_size_ - access_size;
if (!statically_oob &&
(FLAG_wasm_no_bounds_checks || env_->use_trap_handler)) {
return false;
}
Label* trap_label =
AddOutOfLineTrap(position, Builtins::kThrowWasmTrapMemOutOfBounds);
Label* trap_label = AddOutOfLineTrap(
decoder->position(), Builtins::kThrowWasmTrapMemOutOfBounds);
if (access_size > max_size_ || offset > max_size_ - access_size) {
// The access will be out of bounds, even for the largest memory.
if (statically_oob) {
__ emit_jump(trap_label);
return;
Control* current_block = decoder->control_at(0);
if (current_block->reachable()) {
current_block->reachability = kSpecOnlyReachable;
}
return true;
}
DCHECK(!env_->use_trap_handler);
DCHECK(!FLAG_wasm_no_bounds_checks);
uint32_t end_offset = offset + access_size - 1;
// If the end offset is larger than the smallest memory, dynamically check
......@@ -984,6 +998,7 @@ class LiftoffCompiler {
__ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32, index,
effective_size_reg.gp());
return false;
}
void TraceMemoryOperation(bool is_store, MachineRepresentation rep,
......@@ -1057,10 +1072,8 @@ class LiftoffCompiler {
if (!CheckSupportedType(decoder, kTypes_ilfd, value_type, "load")) return;
LiftoffRegList pinned;
Register index = pinned.set(__ PopToRegister(kGpReg)).gp();
if (!env_->use_trap_handler) {
// Emit an explicit bounds check.
BoundsCheckMem(type.size(), operand.offset, index, decoder->position(),
pinned);
if (BoundsCheckMem(decoder, type.size(), operand.offset, index, pinned)) {
return;
}
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
......@@ -1091,10 +1104,8 @@ class LiftoffCompiler {
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(rc));
Register index = pinned.set(__ PopToRegister(kGpReg, pinned)).gp();
if (!env_->use_trap_handler) {
// Emit an explicit bounds check.
BoundsCheckMem(type.size(), operand.offset, index, decoder->position(),
pinned);
if (BoundsCheckMem(decoder, type.size(), operand.offset, index, pinned)) {
return;
}
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
......
......@@ -41,19 +41,14 @@ static constexpr Register kCCallLastArgAddrReg = rax;
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
uint32_t offset_imm, LiftoffRegList pinned) {
if (offset_imm > kMaxInt) {
// The immediate can not be encoded in the operand. Load it to a register
// first.
Register total_offset = assm->GetUnusedRegister(kGpReg, pinned).gp();
assm->movl(total_offset, Immediate(offset_imm));
if (offset != no_reg) {
assm->emit_ptrsize_add(total_offset, addr, offset);
}
return Operand(addr, total_offset, times_1, 0);
}
// Wasm memory is limited to a size <2GB, so all offsets can be encoded as
// immediate value (in 31 bits, interpreted as signed value).
// If the offset is bigger, we always trap and this code is not reached.
DCHECK(is_uint31(offset_imm));
if (offset == no_reg) return Operand(addr, offset_imm);
return Operand(addr, offset, times_1, offset_imm);
}
} // namespace liftoff
uint32_t LiftoffAssembler::PrepareStackFrame() {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment