Commit 352e408b authored by Ben L. Titzer's avatar Ben L. Titzer Committed by Commit Bot

[wasm] Support 4GiB memories in Liftoff

Add codegen support for up to 4GiB memories in Liftoff code.

This CL also adds three new mjsunit tests that stress large WASM
memories (1, 2, and 4 GiB) and checks that accesses near these
boundaries properly generate traps.

Note there is still some trickiness around the setting of:
  1.) the flag --wasm-max-mem-pages
  2.) wasm-limits.h kSpecMaxWasmMemoryPages = 65536
  3.) wasm-limits.h kV8MaxWasmMemoryPages = 32767

In particular, the allocation of memories is still limited to
3.) and the runtime flag can only lower this limit.

The above means that the tests for 2GiB and 4GiB memories will silently
OOM by design until 3.) is changed (though they currently pass with
manual testing). I argue it is better to include these tests up front,
since they will immediately trigger if their memory allocation succeeds.

Therefore the plan is to lift the restriction on 3.) after removing
all other other internal V8 limitations including array buffers and views.

R=clemensh@chromium.org
CC=mstarzinger@chromium.org
BUG=v8:7881

Change-Id: I3205ac2daf5c9a84364c670a2c3ef2258e5649f6
Reviewed-on: https://chromium-review.googlesource.com/1151309
Commit-Queue: Ben Titzer <titzer@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54754}
parent af010cfb
...@@ -3038,19 +3038,14 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index, ...@@ -3038,19 +3038,14 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
return index; return index;
} }
uint64_t min_size = const bool statically_oob = access_size > env_->max_memory_size ||
env_->module->initial_pages * uint64_t{wasm::kWasmPageSize}; offset > env_->max_memory_size - access_size;
uint64_t max_size = if (statically_oob) {
(env_->module->has_maximum_pages ? env_->module->maximum_pages
: wasm::kV8MaxWasmMemoryPages) *
uint64_t{wasm::kWasmPageSize};
if (access_size > max_size || offset > max_size - access_size) {
// The access will be out of bounds, even for the largest memory. // The access will be out of bounds, even for the largest memory.
TrapIfEq32(wasm::kTrapMemOutOfBounds, Int32Constant(0), 0, position); TrapIfEq32(wasm::kTrapMemOutOfBounds, Int32Constant(0), 0, position);
return mcgraph()->IntPtrConstant(0); return mcgraph()->IntPtrConstant(0);
} }
uint64_t end_offset = offset + access_size - 1; uint64_t end_offset = uint64_t{offset} + access_size - 1u;
Node* end_offset_node = IntPtrConstant(end_offset); Node* end_offset_node = IntPtrConstant(end_offset);
// The accessed memory is [index + offset, index + end_offset]. // The accessed memory is [index + offset, index + end_offset].
...@@ -3064,7 +3059,7 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index, ...@@ -3064,7 +3059,7 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
auto m = mcgraph()->machine(); auto m = mcgraph()->machine();
Node* mem_size = instance_cache_->mem_size; Node* mem_size = instance_cache_->mem_size;
if (end_offset >= min_size) { if (end_offset >= env_->min_memory_size) {
// The end offset is larger than the smallest memory. // The end offset is larger than the smallest memory.
// Dynamically check the end offset against the dynamic memory size. // Dynamically check the end offset against the dynamic memory size.
Node* cond = graph()->NewNode(m->UintLessThan(), end_offset_node, mem_size); Node* cond = graph()->NewNode(m->UintLessThan(), end_offset_node, mem_size);
...@@ -3075,7 +3070,7 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index, ...@@ -3075,7 +3070,7 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
UintPtrMatcher match(index); UintPtrMatcher match(index);
if (match.HasValue()) { if (match.HasValue()) {
uintptr_t index_val = match.Value(); uintptr_t index_val = match.Value();
if (index_val < min_size - end_offset) { if (index_val < env_->min_memory_size - end_offset) {
// The input index is a constant and everything is statically within // The input index is a constant and everything is statically within
// bounds of the smallest possible memory. // bounds of the smallest possible memory.
return index; return index;
......
...@@ -249,7 +249,7 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, ...@@ -249,7 +249,7 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
} }
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) { void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
UNREACHABLE(); // This is a nop on arm.
} }
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
......
...@@ -227,9 +227,10 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, ...@@ -227,9 +227,10 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
// Wasm memory is limited to a size <2GB, so all offsets can be encoded as // Wasm memory is limited to a size <2GB, so all offsets can be encoded as
// immediate value (in 31 bits, interpreted as signed value). // immediate value (in 31 bits, interpreted as signed value).
// If the offset is bigger, we always trap and this code is not reached. // If the offset is bigger, we always trap and this code is not reached.
DCHECK(is_uint31(offset_imm)); // Note: We shouldn't have memories larger than 2GiB on 32-bit, but if we
// did, we encode {offset_im} as signed, and it will simply wrap around.
Operand src_op = offset_reg == no_reg Operand src_op = offset_reg == no_reg
? Operand(src_addr, offset_imm) ? Operand(src_addr, bit_cast<int32_t>(offset_imm))
: Operand(src_addr, offset_reg, times_1, offset_imm); : Operand(src_addr, offset_reg, times_1, offset_imm);
if (protected_load_pc) *protected_load_pc = pc_offset(); if (protected_load_pc) *protected_load_pc = pc_offset();
...@@ -278,10 +279,9 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, ...@@ -278,10 +279,9 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
break; break;
case LoadType::kI64Load: { case LoadType::kI64Load: {
// Compute the operand for the load of the upper half. // Compute the operand for the load of the upper half.
DCHECK(is_uint31(offset_imm + 4));
Operand upper_src_op = Operand upper_src_op =
offset_reg == no_reg offset_reg == no_reg
? Operand(src_addr, offset_imm + 4) ? Operand(src_addr, bit_cast<int32_t>(offset_imm + 4))
: Operand(src_addr, offset_reg, times_1, offset_imm + 4); : Operand(src_addr, offset_reg, times_1, offset_imm + 4);
// The high word has to be mov'ed first, such that this is the protected // The high word has to be mov'ed first, such that this is the protected
// instruction. The mov of the low word cannot segfault. // instruction. The mov of the low word cannot segfault.
...@@ -308,9 +308,8 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, ...@@ -308,9 +308,8 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
// Wasm memory is limited to a size <2GB, so all offsets can be encoded as // Wasm memory is limited to a size <2GB, so all offsets can be encoded as
// immediate value (in 31 bits, interpreted as signed value). // immediate value (in 31 bits, interpreted as signed value).
// If the offset is bigger, we always trap and this code is not reached. // If the offset is bigger, we always trap and this code is not reached.
DCHECK(is_uint31(offset_imm));
Operand dst_op = offset_reg == no_reg Operand dst_op = offset_reg == no_reg
? Operand(dst_addr, offset_imm) ? Operand(dst_addr, bit_cast<int32_t>(offset_imm))
: Operand(dst_addr, offset_reg, times_1, offset_imm); : Operand(dst_addr, offset_reg, times_1, offset_imm);
if (protected_store_pc) *protected_store_pc = pc_offset(); if (protected_store_pc) *protected_store_pc = pc_offset();
...@@ -342,10 +341,9 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, ...@@ -342,10 +341,9 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
break; break;
case StoreType::kI64Store: { case StoreType::kI64Store: {
// Compute the operand for the store of the upper half. // Compute the operand for the store of the upper half.
DCHECK(is_uint31(offset_imm + 4));
Operand upper_dst_op = Operand upper_dst_op =
offset_reg == no_reg offset_reg == no_reg
? Operand(dst_addr, offset_imm + 4) ? Operand(dst_addr, bit_cast<int32_t>(offset_imm + 4))
: Operand(dst_addr, offset_reg, times_1, offset_imm + 4); : Operand(dst_addr, offset_reg, times_1, offset_imm + 4);
// The high word has to be mov'ed first, such that this is the protected // The high word has to be mov'ed first, such that this is the protected
// instruction. The mov of the low word cannot segfault. // instruction. The mov of the low word cannot segfault.
...@@ -893,7 +891,7 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, ...@@ -893,7 +891,7 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
} }
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) { void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
UNREACHABLE(); // This is a nop on ia32.
} }
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
......
...@@ -448,6 +448,14 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -448,6 +448,14 @@ class LiftoffAssembler : public TurboAssembler {
emit_i32_add(dst, lhs, rhs); emit_i32_add(dst, lhs, rhs);
} }
} }
inline void emit_ptrsize_sub(Register dst, Register lhs, Register rhs) {
if (kPointerSize == 8) {
emit_i64_sub(LiftoffRegister(dst), LiftoffRegister(lhs),
LiftoffRegister(rhs));
} else {
emit_i32_sub(dst, lhs, rhs);
}
}
// f32 binops. // f32 binops.
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs, inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
......
...@@ -138,11 +138,6 @@ class LiftoffCompiler { ...@@ -138,11 +138,6 @@ class LiftoffCompiler {
: descriptor_( : descriptor_(
GetLoweredCallDescriptor(compilation_zone, call_descriptor)), GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
env_(env), env_(env),
min_size_(uint64_t{env_->module->initial_pages} * wasm::kWasmPageSize),
max_size_(uint64_t{env_->module->has_maximum_pages
? env_->module->maximum_pages
: wasm::kV8MaxWasmMemoryPages} *
wasm::kWasmPageSize),
compilation_zone_(compilation_zone), compilation_zone_(compilation_zone),
safepoint_table_builder_(compilation_zone_) {} safepoint_table_builder_(compilation_zone_) {}
...@@ -1321,15 +1316,15 @@ class LiftoffCompiler { ...@@ -1321,15 +1316,15 @@ class LiftoffCompiler {
// (a jump to the trap was generated then); return false otherwise. // (a jump to the trap was generated then); return false otherwise.
bool BoundsCheckMem(Decoder* decoder, uint32_t access_size, uint32_t offset, bool BoundsCheckMem(Decoder* decoder, uint32_t access_size, uint32_t offset,
Register index, LiftoffRegList pinned) { Register index, LiftoffRegList pinned) {
const bool statically_oob = const bool statically_oob = access_size > env_->max_memory_size ||
access_size > max_size_ || offset > max_size_ - access_size; offset > env_->max_memory_size - access_size;
if (!statically_oob && if (!statically_oob &&
(FLAG_wasm_no_bounds_checks || env_->use_trap_handler)) { (FLAG_wasm_no_bounds_checks || env_->use_trap_handler)) {
return false; return false;
} }
// TODO(eholk): This adds protected instruction information for the jump // TODO(wasm): This adds protected instruction information for the jump
// instruction we are about to generate. It would be better to just not add // instruction we are about to generate. It would be better to just not add
// protected instruction info when the pc is 0. // protected instruction info when the pc is 0.
Label* trap_label = AddOutOfLineTrap( Label* trap_label = AddOutOfLineTrap(
...@@ -1348,7 +1343,7 @@ class LiftoffCompiler { ...@@ -1348,7 +1343,7 @@ class LiftoffCompiler {
DCHECK(!env_->use_trap_handler); DCHECK(!env_->use_trap_handler);
DCHECK(!FLAG_wasm_no_bounds_checks); DCHECK(!FLAG_wasm_no_bounds_checks);
uint32_t end_offset = offset + access_size - 1; uint64_t end_offset = uint64_t{offset} + access_size - 1u;
// If the end offset is larger than the smallest memory, dynamically check // If the end offset is larger than the smallest memory, dynamically check
// the end offset against the actual memory size, which is not known at // the end offset against the actual memory size, which is not known at
...@@ -1356,19 +1351,30 @@ class LiftoffCompiler { ...@@ -1356,19 +1351,30 @@ class LiftoffCompiler {
LiftoffRegister end_offset_reg = LiftoffRegister end_offset_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)); pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned); LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned);
LOAD_INSTANCE_FIELD(mem_size, MemorySize, LoadType::kI32Load); LOAD_INSTANCE_FIELD(mem_size, MemorySize, kPointerLoadType);
if (kPointerSize == 8) {
__ LoadConstant(end_offset_reg, WasmValue(end_offset)); __ LoadConstant(end_offset_reg, WasmValue(end_offset));
if (end_offset >= min_size_) { } else {
__ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32, __ LoadConstant(end_offset_reg,
end_offset_reg.gp(), mem_size.gp()); WasmValue(static_cast<uint32_t>(end_offset)));
}
if (end_offset >= env_->min_memory_size) {
__ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
LiftoffAssembler::kWasmIntPtr, end_offset_reg.gp(),
mem_size.gp());
} }
// Just reuse the end_offset register for computing the effective size. // Just reuse the end_offset register for computing the effective size.
LiftoffRegister effective_size_reg = end_offset_reg; LiftoffRegister effective_size_reg = end_offset_reg;
__ emit_i32_sub(effective_size_reg.gp(), mem_size.gp(), __ emit_ptrsize_sub(effective_size_reg.gp(), mem_size.gp(),
end_offset_reg.gp()); end_offset_reg.gp());
__ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32, index, __ emit_i32_to_intptr(index, index);
__ emit_cond_jump(kUnsignedGreaterEqual, trap_label,
LiftoffAssembler::kWasmIntPtr, index,
effective_size_reg.gp()); effective_size_reg.gp());
return false; return false;
} }
...@@ -1797,9 +1803,6 @@ class LiftoffCompiler { ...@@ -1797,9 +1803,6 @@ class LiftoffCompiler {
LiftoffAssembler asm_; LiftoffAssembler asm_;
compiler::CallDescriptor* const descriptor_; compiler::CallDescriptor* const descriptor_;
ModuleEnv* const env_; ModuleEnv* const env_;
// {min_size_} and {max_size_} are cached values computed from the ModuleEnv.
const uint64_t min_size_;
const uint64_t max_size_;
bool ok_ = true; bool ok_ = true;
std::vector<OutOfLineCode> out_of_line_code_; std::vector<OutOfLineCode> out_of_line_code_;
SourcePositionTableBuilder source_position_table_builder_; SourcePositionTableBuilder source_position_table_builder_;
......
...@@ -745,7 +745,7 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, ...@@ -745,7 +745,7 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
} }
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) { void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
UNREACHABLE(); // This is a nop on mips32.
} }
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
......
...@@ -258,7 +258,11 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, ...@@ -258,7 +258,11 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
} }
void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) { void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
UNREACHABLE(); #ifdef V8_TARGET_ARCH_PPC64
BAILOUT("emit_i32_to_intptr");
#else
// This is a nop on ppc32.
#endif
} }
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
......
...@@ -50,12 +50,17 @@ inline Operand GetInstanceOperand() { return Operand(rbp, -16); } ...@@ -50,12 +50,17 @@ inline Operand GetInstanceOperand() { return Operand(rbp, -16); }
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset, inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
uint32_t offset_imm, LiftoffRegList pinned) { uint32_t offset_imm, LiftoffRegList pinned) {
// Wasm memory is limited to a size <2GB, so all offsets can be encoded as if (is_uint31(offset_imm)) {
// immediate value (in 31 bits, interpreted as signed value).
// If the offset is bigger, we always trap and this code is not reached.
DCHECK(is_uint31(offset_imm));
if (offset == no_reg) return Operand(addr, offset_imm); if (offset == no_reg) return Operand(addr, offset_imm);
return Operand(addr, offset, times_1, offset_imm); return Operand(addr, offset, times_1, offset_imm);
}
// Offset immediate does not fit in 31 bits.
Register scratch = kScratchRegister;
assm->movl(scratch, Immediate(offset_imm));
if (offset != no_reg) {
assm->addq(scratch, offset);
}
return Operand(addr, scratch, times_1, 0);
} }
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src, inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
......
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#define V8_WASM_FUNCTION_COMPILER_H_ #define V8_WASM_FUNCTION_COMPILER_H_
#include "src/wasm/function-body-decoder.h" #include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -47,6 +49,14 @@ struct ModuleEnv { ...@@ -47,6 +49,14 @@ struct ModuleEnv {
// be generated differently. // be generated differently.
const RuntimeExceptionSupport runtime_exception_support; const RuntimeExceptionSupport runtime_exception_support;
// The smallest size of any memory that could be used with this module, in
// bytes.
const uint64_t min_memory_size;
// The largest size of any memory that could be used with this module, in
// bytes.
const uint64_t max_memory_size;
const LowerSimd lower_simd; const LowerSimd lower_simd;
constexpr ModuleEnv(const WasmModule* module, UseTrapHandler use_trap_handler, constexpr ModuleEnv(const WasmModule* module, UseTrapHandler use_trap_handler,
...@@ -55,6 +65,12 @@ struct ModuleEnv { ...@@ -55,6 +65,12 @@ struct ModuleEnv {
: module(module), : module(module),
use_trap_handler(use_trap_handler), use_trap_handler(use_trap_handler),
runtime_exception_support(runtime_exception_support), runtime_exception_support(runtime_exception_support),
min_memory_size(
module ? module->initial_pages * uint64_t{wasm::kWasmPageSize} : 0),
max_memory_size(
module && module->has_maximum_pages
? (module->maximum_pages * uint64_t{wasm::kWasmPageSize})
: wasm::kSpecMaxWasmMemoryBytes),
lower_simd(lower_simd) {} lower_simd(lower_simd) {}
}; };
......
...@@ -15,6 +15,8 @@ namespace v8 { ...@@ -15,6 +15,8 @@ namespace v8 {
namespace internal { namespace internal {
namespace wasm { namespace wasm {
constexpr size_t kSpecMaxWasmMemoryPages = 65536;
// The following limits are imposed by V8 on WebAssembly modules. // The following limits are imposed by V8 on WebAssembly modules.
// The limits are agreed upon with other engines for consistency. // The limits are agreed upon with other engines for consistency.
constexpr size_t kV8MaxWasmTypes = 1000000; constexpr size_t kV8MaxWasmTypes = 1000000;
...@@ -40,7 +42,6 @@ constexpr size_t kV8MaxWasmTableEntries = 10000000; ...@@ -40,7 +42,6 @@ constexpr size_t kV8MaxWasmTableEntries = 10000000;
constexpr size_t kV8MaxWasmTables = 1; constexpr size_t kV8MaxWasmTables = 1;
constexpr size_t kV8MaxWasmMemories = 1; constexpr size_t kV8MaxWasmMemories = 1;
constexpr size_t kSpecMaxWasmMemoryPages = 65536;
static_assert(kV8MaxWasmMemoryPages <= kSpecMaxWasmMemoryPages, static_assert(kV8MaxWasmMemoryPages <= kSpecMaxWasmMemoryPages,
"v8 should not be more permissive than the spec"); "v8 should not be more permissive than the spec");
constexpr size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu; constexpr size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu;
...@@ -48,6 +49,9 @@ constexpr size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu; ...@@ -48,6 +49,9 @@ constexpr size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu;
constexpr uint64_t kV8MaxWasmMemoryBytes = constexpr uint64_t kV8MaxWasmMemoryBytes =
kV8MaxWasmMemoryPages * uint64_t{kWasmPageSize}; kV8MaxWasmMemoryPages * uint64_t{kWasmPageSize};
constexpr uint64_t kSpecMaxWasmMemoryBytes =
kSpecMaxWasmMemoryPages * uint64_t{kWasmPageSize};
constexpr uint64_t kWasmMaxHeapOffset = constexpr uint64_t kWasmMaxHeapOffset =
static_cast<uint64_t>( static_cast<uint64_t>(
std::numeric_limits<uint32_t>::max()) // maximum base value std::numeric_limits<uint32_t>::max()) // maximum base value
......
...@@ -21,13 +21,14 @@ builder.addFunction('load', kSig_i_ii) ...@@ -21,13 +21,14 @@ builder.addFunction('load', kSig_i_ii)
const module = builder.instantiate(); const module = builder.instantiate();
let start = 12; let start = 12;
let address = start; let address = start;
for (i = 1; i < 64; i++) { for (i = 0; i < 64; i++) {
// This is the address which will be accessed in the code. We cannot use // This is the address which will be accessed in the code. We cannot use
// shifts to calculate the address because JS shifts work on 32-bit integers. // shifts to calculate the address because JS shifts work on 32-bit integers.
address = (address * 2) % 4294967296; print(`address=${address}`);
if (address < kPageSize) { if (address < kPageSize) {
assertEquals(0, module.exports.load(start, i)); assertEquals(0, module.exports.load(start, i));
} else { } else {
assertTraps(kTrapMemOutOfBounds, _ => { module.exports.load(start, i);}); assertTraps(kTrapMemOutOfBounds, _ => { module.exports.load(start, i);});
} }
address = (address * 2) % 4294967296;
} }
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --wasm-max-mem-pages=16384
load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const k1MiB = 1 * 1024 * 1024;
const k1GiB = 1 * 1024 * 1024 * 1024;
const k2GiB = 2 * k1GiB;
const k3GiB = 3 * k1GiB;
const k4GiB = 4 * k1GiB;
const kMaxMemory = k1GiB;
// Indexes (and offsets) used to systematically probe the memory.
const indexes = (() => {
const a = k1GiB, b = k2GiB, c = k3GiB, d = k4GiB;
return [
0, 1, 2, 3, 4, 5, 7, 8, 9, // near 0
a-8, a-4, a+0, a+1, a+2, a+3, a+4, a+5, a+7, a+8, a+9, // near 1GiB
b-8, b-4, b+0, b+1, b+2, b+3, b+4, b+5, b+7, b+8, b+9, // near 2GiB
c-8, c-4, c+0, c+1, c+2, c+3, c+4, c+5, c+7, c+8, c+9, // near 3GiB
d-9, d-8, d-7, d-5, d-4, d-3, d-2, d-1 // near 4GiB
];
})();
(function Test() {
var memory;
function BuildAccessors(type, load_opcode, store_opcode, offset) {
builder = new WasmModuleBuilder();
builder.addImportedMemory("i", "mem");
const h = 0x80;
const m = 0x7f;
let offset_bytes = [h|((offset >>> 0) & m), // LEB encoding of offset
h|((offset >>> 7) & m),
h|((offset >>> 14) & m),
h|((offset >>> 21) & m),
0|((offset >>> 28) & m)];
builder.addFunction("load", makeSig([kWasmI32], [type]))
.addBody([ // --
kExprGetLocal, 0, // --
load_opcode, 0, ...offset_bytes, // --
]) // --
.exportFunc();
builder.addFunction("store", makeSig([kWasmI32, type], []))
.addBody([ // --
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
store_opcode, 0, ...offset_bytes, // --
]) // --
.exportFunc();
let i = builder.instantiate({i: {mem: memory}});
return {offset: offset, load: i.exports.load, store: i.exports.store};
}
function probe(a, size, offset, f) {
print(`size=${size} offset=${offset}`);
for (let i of indexes) {
let oob = (i + size + offset) > kMaxMemory;
if (oob) {
// print(` ${i} + ${offset} OOB`);
assertThrows(() => a.store(i, f(i)));
assertThrows(() => a.load(i));
} else {
// print(` ${i} = ${f(i)}`);
a.store(i, f(i));
assertEquals(f(i), a.load(i));
}
}
}
try {
const kPages = kMaxMemory / kPageSize;
memory = new WebAssembly.Memory({initial: kPages, maximum: kPages});
} catch (e) {
print("OOM: sorry, best effort max memory size test.");
return;
}
assertEquals(kMaxMemory, memory.buffer.byteLength);
for (let offset of indexes) {
let a = BuildAccessors(kWasmI32, kExprI32LoadMem, kExprI32StoreMem, offset);
probe(a, 4, offset, i => (0xaabbccee ^ ((i >> 11) * 0x110005)) | 0);
}
for (let offset of indexes) {
let a = BuildAccessors(kWasmI32, kExprI32LoadMem8U, kExprI32StoreMem8, offset);
probe(a, 1, offset, i => (0xee ^ ((i >> 11) * 0x05)) & 0xFF);
}
for (let offset of indexes) {
let a = BuildAccessors(kWasmF64, kExprF64LoadMem, kExprF64StoreMem, offset);
probe(a, 8, offset, i => 0xaabbccee ^ ((i >> 11) * 0x110005));
}
})();
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --wasm-max-mem-pages=32768
load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const k1MiB = 1 * 1024 * 1024;
const k1GiB = 1 * 1024 * 1024 * 1024;
const k2GiB = 2 * k1GiB;
const k3GiB = 3 * k1GiB;
const k4GiB = 4 * k1GiB;
const kMaxMemory = k2GiB;
// Indexes (and offsets) used to systematically probe the memory.
const indexes = (() => {
const a = k1GiB, b = k2GiB, c = k3GiB, d = k4GiB;
return [
0, 1, 2, 3, 4, 5, 7, 8, 9, // near 0
a-8, a-4, a+0, a+1, a+2, a+3, a+4, a+5, a+7, a+8, a+9, // near 1GiB
b-8, b-4, b+0, b+1, b+2, b+3, b+4, b+5, b+7, b+8, b+9, // near 2GiB
c-8, c-4, c+0, c+1, c+2, c+3, c+4, c+5, c+7, c+8, c+9, // near 3GiB
d-9, d-8, d-7, d-5, d-4, d-3, d-2, d-1 // near 4GiB
];
})();
(function Test() {
var memory;
function BuildAccessors(type, load_opcode, store_opcode, offset) {
builder = new WasmModuleBuilder();
builder.addImportedMemory("i", "mem");
const h = 0x80;
const m = 0x7f;
let offset_bytes = [h|((offset >>> 0) & m), // LEB encoding of offset
h|((offset >>> 7) & m),
h|((offset >>> 14) & m),
h|((offset >>> 21) & m),
0|((offset >>> 28) & m)];
builder.addFunction("load", makeSig([kWasmI32], [type]))
.addBody([ // --
kExprGetLocal, 0, // --
load_opcode, 0, ...offset_bytes, // --
]) // --
.exportFunc();
builder.addFunction("store", makeSig([kWasmI32, type], []))
.addBody([ // --
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
store_opcode, 0, ...offset_bytes, // --
]) // --
.exportFunc();
let i = builder.instantiate({i: {mem: memory}});
return {offset: offset, load: i.exports.load, store: i.exports.store};
}
function probe(a, size, offset, f) {
print(`size=${size} offset=${offset}`);
for (let i of indexes) {
let oob = (i + size + offset) > kMaxMemory;
if (oob) {
// print(` ${i} + ${offset} OOB`);
assertThrows(() => a.store(i, f(i)));
assertThrows(() => a.load(i));
} else {
// print(` ${i} = ${f(i)}`);
a.store(i, f(i));
assertEquals(f(i), a.load(i));
}
}
}
try {
let kPages = kMaxMemory / kPageSize;
memory = new WebAssembly.Memory({initial: kPages, maximum: kPages});
} catch (e) {
print("OOM: sorry, best effort max memory size test.");
return;
}
assertEquals(kMaxMemory, memory.buffer.byteLength);
for (let offset of indexes) {
let a = BuildAccessors(kWasmI32, kExprI32LoadMem, kExprI32StoreMem, offset);
probe(a, 4, offset, i => (0xaabbccee ^ ((i >> 11) * 0x110005)) | 0);
}
for (let offset of indexes) {
let a = BuildAccessors(kWasmI32, kExprI32LoadMem8U, kExprI32StoreMem8, offset);
probe(a, 1, offset, i => (0xee ^ ((i >> 11) * 0x05)) & 0xFF);
}
for (let offset of indexes) {
let a = BuildAccessors(kWasmF64, kExprF64LoadMem, kExprF64StoreMem, offset);
probe(a, 8, offset, i => 0xaabbccee ^ ((i >> 11) * 0x110005));
}
})();
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const k1MiB = 1 * 1024 * 1024;
const k1GiB = 1 * 1024 * 1024 * 1024;
const k2GiB = 2 * k1GiB;
const k3GiB = 3 * k1GiB;
const k4GiB = 4 * k1GiB;
const kMaxMemory = k4GiB;
// Indexes (and offsets) used to systematically probe the memory.
const indexes = (() => {
const a = k1GiB, b = k2GiB, c = k3GiB, d = k4GiB;
return [
0, 1, 2, 3, 4, 5, 7, 8, 9, // near 0
a-8, a-4, a+0, a+1, a+2, a+3, a+4, a+5, a+7, a+8, a+9, // near 1GiB
b-8, b-4, b+0, b+1, b+2, b+3, b+4, b+5, b+7, b+8, b+9, // near 2GiB
c-8, c-4, c+0, c+1, c+2, c+3, c+4, c+5, c+7, c+8, c+9, // near 3GiB
d-9, d-8, d-7, d-5, d-4, d-3, d-2, d-1 // near 4GiB
];
})();
(function Test() {
var memory;
function BuildAccessors(type, load_opcode, store_opcode, offset) {
builder = new WasmModuleBuilder();
builder.addImportedMemory("i", "mem");
const h = 0x80;
const m = 0x7f;
let offset_bytes = [h|((offset >>> 0) & m), // LEB encoding of offset
h|((offset >>> 7) & m),
h|((offset >>> 14) & m),
h|((offset >>> 21) & m),
0|((offset >>> 28) & m)];
builder.addFunction("load", makeSig([kWasmI32], [type]))
.addBody([ // --
kExprGetLocal, 0, // --
load_opcode, 0, ...offset_bytes, // --
]) // --
.exportFunc();
builder.addFunction("store", makeSig([kWasmI32, type], []))
.addBody([ // --
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
store_opcode, 0, ...offset_bytes, // --
]) // --
.exportFunc();
let i = builder.instantiate({i: {mem: memory}});
return {offset: offset, load: i.exports.load, store: i.exports.store};
}
function probe(a, size, offset, f) {
print(`size=${size} offset=${offset}`);
for (let i of indexes) {
let oob = (i + size + offset) > kMaxMemory;
if (oob) {
// print(` ${i} + ${offset} OOB`);
assertThrows(() => a.store(i, f(i)));
assertThrows(() => a.load(i));
} else {
// print(` ${i} = ${f(i)}`);
a.store(i, f(i));
assertEquals(f(i), a.load(i));
}
}
}
try {
let kPages = kMaxMemory / kPageSize;
memory = new WebAssembly.Memory({initial: kPages, maximum: kPages});
} catch (e) {
print("OOM: sorry, best effort max memory size test.");
return;
}
assertEquals(kMaxMemory, memory.buffer.byteLength);
for (let offset of indexes) {
let a = BuildAccessors(kWasmI32, kExprI32LoadMem, kExprI32StoreMem, offset);
probe(a, 4, offset, i => (0xaabbccee ^ ((i >> 11) * 0x110005)) | 0);
}
for (let offset of indexes) {
let a = BuildAccessors(kWasmI32, kExprI32LoadMem8U, kExprI32StoreMem8, offset);
probe(a, 1, offset, i => (0xee ^ ((i >> 11) * 0x05)) & 0xFF);
}
for (let offset of indexes) {
let a = BuildAccessors(kWasmF64, kExprF64LoadMem, kExprF64StoreMem, offset);
probe(a, 8, offset, i => 0xaabbccee ^ ((i >> 11) * 0x110005));
}
})();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment