Commit 674ec087 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Implement memory bounds checks

Add support for memory operations without trap handling, i.e. emit
memory bounds checks.

Drive-by: Reorganize liftoff-assembler-defs.h.

R=titzer@chromium.org

Bug: v8:6600, v8:7210
Change-Id: I30d84dfcaabd4bd9d147e007e525d00fa474b155
Reviewed-on: https://chromium-review.googlesource.com/824275
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50141}
parent 68975751
......@@ -65,7 +65,13 @@ void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
void LiftoffAssembler::emit_i32_test(Register reg) {}
void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {}
void LiftoffAssembler::emit_jump(Label* label) {}
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {}
void LiftoffAssembler::CallTrapCallbackForTesting() {}
......
......@@ -65,7 +65,13 @@ void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
void LiftoffAssembler::emit_i32_test(Register reg) {}
void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {}
void LiftoffAssembler::emit_jump(Label* label) {}
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {}
void LiftoffAssembler::CallTrapCallbackForTesting() {}
......
......@@ -296,9 +296,16 @@ void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
}
}
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {
test(reg, reg);
j(zero, label);
void LiftoffAssembler::emit_i32_test(Register reg) { test(reg, reg); }
void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
cmp(lhs, rhs);
}
void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
j(cond, label);
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
......
......@@ -8,62 +8,57 @@
#include "src/reglist.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "src/x64/assembler-x64.h"
#endif
namespace v8 {
namespace internal {
namespace wasm {
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
#if V8_TARGET_ARCH_IA32
constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
static constexpr RegList kLiftoffAssemblerGpCacheRegs =
constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<eax, ecx, edx, ebx, esi, edi>();
// Omit xmm7, which is the kScratchDoubleReg.
static constexpr RegList kLiftoffAssemblerFpCacheRegs =
constexpr RegList kLiftoffAssemblerFpCacheRegs =
DoubleRegister::ListOf<xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6>();
} // namespace wasm
} // namespace internal
} // namespace v8
#elif V8_TARGET_ARCH_X64
#include "src/x64/assembler-x64.h"
namespace v8 {
namespace internal {
namespace wasm {
constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
static constexpr RegList kLiftoffAssemblerGpCacheRegs =
constexpr RegList kLiftoffAssemblerGpCacheRegs =
Register::ListOf<rax, rcx, rdx, rbx, rsi, rdi>();
static constexpr RegList kLiftoffAssemblerFpCacheRegs =
constexpr RegList kLiftoffAssemblerFpCacheRegs =
DoubleRegister::ListOf<xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7>();
} // namespace wasm
} // namespace internal
} // namespace v8
#else
namespace v8 {
namespace internal {
namespace wasm {
constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
static constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
#endif
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
constexpr Condition kEqual = equal;
constexpr Condition kUnsignedGreaterEqual = above_equal;
#else
// On unimplemented platforms, just make this compile.
constexpr Condition kEqual = static_cast<Condition>(0);
constexpr Condition kUnsignedGreaterEqual = static_cast<Condition>(0);
#endif
} // namespace wasm
} // namespace internal
} // namespace v8
#endif
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
......@@ -281,7 +281,10 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
inline void JumpIfZero(Register, Label*);
inline void emit_i32_test(Register);
inline void emit_i32_compare(Register, Register);
inline void emit_jump(Label*);
inline void emit_cond_jump(Condition, Label*);
inline void CallTrapCallbackForTesting();
......
......@@ -78,11 +78,22 @@ class LiftoffCompiler {
: asm_(liftoff_asm),
call_desc_(call_desc),
env_(env),
min_size_(env_->module->initial_pages * wasm::WasmModule::kPageSize),
max_size_((env_->module->has_maximum_pages
? env_->module->maximum_pages
: wasm::kV8MaxWasmMemoryPages) *
wasm::WasmModule::kPageSize),
runtime_exception_support_(runtime_exception_support),
source_position_table_builder_(source_position_table_builder),
compilation_zone_(liftoff_asm->isolate()->allocator(),
"liftoff compilation"),
safepoint_table_builder_(&compilation_zone_) {}
safepoint_table_builder_(&compilation_zone_) {
// Check for overflow in max_size_.
DCHECK_EQ(max_size_, uint64_t{env_->module->has_maximum_pages
? env_->module->maximum_pages
: wasm::kV8MaxWasmMemoryPages} *
wasm::WasmModule::kPageSize);
}
bool ok() const { return ok_; }
......@@ -248,7 +259,6 @@ class LiftoffCompiler {
// Therefore we emit a call to C here instead of a call to the runtime.
__ CallTrapCallbackForTesting();
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ set_has_frame(false);
__ Ret();
return;
}
......@@ -558,7 +568,8 @@ class LiftoffCompiler {
TraceCacheState(decoder);
Label cont_false;
Register value = __ PopToRegister(kGpReg).gp();
__ JumpIfZero(value, &cont_false);
__ emit_i32_test(value);
__ emit_cond_jump(kEqual, &cont_false);
Br(target);
__ bind(&cont_false);
......@@ -571,6 +582,43 @@ class LiftoffCompiler {
void Else(Decoder* decoder, Control* if_block) {
unsupported(decoder, "else");
}
void BoundsCheckMem(uint32_t access_size, uint32_t offset, Register index,
wasm::WasmCodePosition position, LiftoffRegList pinned) {
if (FLAG_wasm_no_bounds_checks) return;
// Add OOL code.
Label* trap_label = AddTrapCode(kTrapMemOutOfBounds, position);
if (access_size > max_size_ || offset > max_size_ - access_size) {
// The access will be out of bounds, even for the largest memory.
__ emit_jump(trap_label);
return;
}
uint32_t end_offset = offset + access_size - 1;
// If the end offset is larger than the smallest memory, dynamically check
// the end offset against the actual memory size, which is not known at
// compile time. Otherwise, only one check is required (see below).
LiftoffRegister end_offset_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned);
__ LoadFromContext(mem_size.gp(), offsetof(WasmContext, mem_size), 4);
__ LoadConstant(end_offset_reg, WasmValue(end_offset));
if (end_offset >= min_size_) {
__ emit_i32_compare(end_offset_reg.gp(), mem_size.gp());
__ emit_cond_jump(kUnsignedGreaterEqual, trap_label);
}
// Just reuse the end_offset register for computing the effective size.
LiftoffRegister effective_size_reg = end_offset_reg;
__ emit_i32_sub(effective_size_reg.gp(), mem_size.gp(),
end_offset_reg.gp());
__ emit_i32_compare(index, effective_size_reg.gp());
__ emit_cond_jump(kUnsignedGreaterEqual, trap_label);
}
void LoadMem(Decoder* decoder, LoadType type,
const MemoryAccessOperand<validate>& operand,
const Value& index_val, Value* result) {
......@@ -579,7 +627,9 @@ class LiftoffCompiler {
LiftoffRegList pinned;
Register index = pinned.set(__ PopToRegister(kGpReg)).gp();
if (!env_->use_trap_handler) {
return unsupported(decoder, "non-traphandler");
// Emit an explicit bounds check.
BoundsCheckMem(type.size(), operand.offset, index, decoder->position(),
pinned);
}
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
......@@ -602,6 +652,11 @@ class LiftoffCompiler {
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(rc));
Register index = pinned.set(__ PopToRegister(kGpReg, pinned)).gp();
if (!env_->use_trap_handler) {
// Emit an explicit bounds check.
BoundsCheckMem(type.size(), operand.offset, index, decoder->position(),
pinned);
}
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
__ Store(addr, index, operand.offset, value, type, pinned);
......@@ -670,10 +725,13 @@ class LiftoffCompiler {
LiftoffAssembler* const asm_;
compiler::CallDescriptor* const call_desc_;
compiler::ModuleEnv* const env_;
compiler::RuntimeExceptionSupport runtime_exception_support_;
// {min_size_} and {max_size_} are cached values computed from the ModuleEnv.
const uint32_t min_size_;
const uint32_t max_size_;
const compiler::RuntimeExceptionSupport runtime_exception_support_;
bool ok_ = true;
std::vector<TrapOolCode> trap_ool_code_;
SourcePositionTableBuilder* source_position_table_builder_;
SourcePositionTableBuilder* const source_position_table_builder_;
// Zone used to store information during compilation. The result will be
// stored independently, such that this zone can die together with the
// LiftoffCompiler after compilation.
......
......@@ -65,7 +65,13 @@ void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
void LiftoffAssembler::emit_i32_test(Register reg) {}
void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {}
void LiftoffAssembler::emit_jump(Label* label) {}
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {}
void LiftoffAssembler::CallTrapCallbackForTesting() {}
......
......@@ -65,7 +65,13 @@ void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
void LiftoffAssembler::emit_i32_test(Register reg) {}
void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {}
void LiftoffAssembler::emit_jump(Label* label) {}
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {}
void LiftoffAssembler::CallTrapCallbackForTesting() {}
......
......@@ -65,7 +65,13 @@ void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
void LiftoffAssembler::emit_i32_test(Register reg) {}
void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {}
void LiftoffAssembler::emit_jump(Label* label) {}
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {}
void LiftoffAssembler::CallTrapCallbackForTesting() {}
......
......@@ -65,7 +65,13 @@ void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
void LiftoffAssembler::emit_i32_test(Register reg) {}
void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {}
void LiftoffAssembler::emit_jump(Label* label) {}
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {}
void LiftoffAssembler::CallTrapCallbackForTesting() {}
......
......@@ -308,9 +308,16 @@ void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
}
}
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {
testl(reg, reg);
j(zero, label);
void LiftoffAssembler::emit_i32_test(Register reg) { testl(reg, reg); }
void LiftoffAssembler::emit_i32_compare(Register lhs, Register rhs) {
cmpl(lhs, rhs);
}
void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label) {
j(cond, label);
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
......
......@@ -36,4 +36,9 @@
# tested standalone.
'fail/modules-skip*': [SKIP],
}], # ALWAYS
['variant == liftoff', {
# Memory tracing is not yet implemented in liftoff (crbug.com/v8/7210).
'wasm-trace-memory': [SKIP],
}], # variant == liftoff
]
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment