Commit 7ad5b961 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[liftoff] Fix >=2GB memory accesses on 32-bit

We were inconsistent in handling offsets >= 2GB on 32-bit systems. The
code was still relying on this being detected as statically out of
bounds, but with the increase of {kV8MaxWasmMemoryPages} to support 4GB
memories, this is not the case any more.

This CL fixes this by again detecting such situations as statically OOB.
We do not expect to be able to allocate memories of size >2GB on such
systems. If this assumptions turns out to be wrong, we will erroneously
trap. If that happens, we will have to explicitly disallow memories of
such size on 32-bit systems.

R=jkummerow@chromium.org

Bug: v8:7881, chromium:1201340
Change-Id: Ic89a67d38fb860eb8a48a4ff51bc02c53f8a2c2a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2848467Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74175}
parent 8e735324
...@@ -769,13 +769,8 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, ...@@ -769,13 +769,8 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm, Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned, LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) { uint32_t* protected_load_pc, bool is_load_mem) {
// If offset_imm cannot be converted to int32 safely, we abort as a separate // Offsets >=2GB are statically OOB on 32-bit systems.
// check should cause this code to never be executed. DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
// TODO(7881): Support when >2GB is required.
if (!is_uint31(offset_imm)) {
TurboAssembler::Abort(AbortReason::kOffsetOutOfRange);
return;
}
liftoff::LoadInternal(this, dst, src_addr, offset_reg, liftoff::LoadInternal(this, dst, src_addr, offset_reg,
static_cast<int32_t>(offset_imm), type, pinned, static_cast<int32_t>(offset_imm), type, pinned,
protected_load_pc, is_load_mem); protected_load_pc, is_load_mem);
...@@ -785,13 +780,8 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, ...@@ -785,13 +780,8 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src, uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned, StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) { uint32_t* protected_store_pc, bool is_store_mem) {
// If offset_imm cannot be converted to int32 safely, we abort as a separate // Offsets >=2GB are statically OOB on 32-bit systems.
// check should cause this code to never be executed. DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
// TODO(7881): Support when >2GB is required.
if (!is_uint31(offset_imm)) {
TurboAssembler::Abort(AbortReason::kOffsetOutOfRange);
return;
}
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
if (type.value() == StoreType::kF64Store) { if (type.value() == StoreType::kF64Store) {
Register actual_dst_addr = liftoff::CalculateActualAddress( Register actual_dst_addr = liftoff::CalculateActualAddress(
......
...@@ -391,13 +391,8 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, ...@@ -391,13 +391,8 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm, Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned, LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) { uint32_t* protected_load_pc, bool is_load_mem) {
if (offset_imm > static_cast<uint32_t>(std::numeric_limits<int32_t>::max())) { // Offsets >=2GB are statically OOB on 32-bit systems.
// We do not generate code here, because such an offset should never pass DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
// the bounds check. However, the spec requires us to compile code with such
// an offset.
Trap();
return;
}
DCHECK_EQ(type.value_type() == kWasmI64, dst.is_gp_pair()); DCHECK_EQ(type.value_type() == kWasmI64, dst.is_gp_pair());
Operand src_op = offset_reg == no_reg Operand src_op = offset_reg == no_reg
? Operand(src_addr, offset_imm) ? Operand(src_addr, offset_imm)
...@@ -473,6 +468,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, ...@@ -473,6 +468,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
StoreType type, LiftoffRegList pinned, StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) { uint32_t* protected_store_pc, bool is_store_mem) {
DCHECK_EQ(type.value_type() == kWasmI64, src.is_gp_pair()); DCHECK_EQ(type.value_type() == kWasmI64, src.is_gp_pair());
// Offsets >=2GB are statically OOB on 32-bit systems.
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max()); DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
Operand dst_op = offset_reg == no_reg Operand dst_op = offset_reg == no_reg
? Operand(dst_addr, offset_imm) ? Operand(dst_addr, offset_imm)
......
...@@ -2589,10 +2589,7 @@ class LiftoffCompiler { ...@@ -2589,10 +2589,7 @@ class LiftoffCompiler {
Register BoundsCheckMem(FullDecoder* decoder, uint32_t access_size, Register BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
uint64_t offset, LiftoffRegister index, uint64_t offset, LiftoffRegister index,
LiftoffRegList pinned, ForceCheck force_check) { LiftoffRegList pinned, ForceCheck force_check) {
// If the offset does not fit in a uintptr_t, this can never succeed on this
// machine.
const bool statically_oob = const bool statically_oob =
offset > std::numeric_limits<uintptr_t>::max() ||
!base::IsInBounds<uintptr_t>(offset, access_size, !base::IsInBounds<uintptr_t>(offset, access_size,
env_->max_memory_size); env_->max_memory_size);
......
...@@ -64,9 +64,11 @@ struct CompilationEnv { ...@@ -64,9 +64,11 @@ struct CompilationEnv {
// Features enabled for this compilation. // Features enabled for this compilation.
const WasmFeatures enabled_features; const WasmFeatures enabled_features;
static constexpr uint32_t kMaxMemoryPagesAtRuntime = // We assume that memories of size >= half of the virtual address space
std::min(kV8MaxWasmMemoryPages, // cannot be allocated (see https://crbug.com/1201340).
std::numeric_limits<uintptr_t>::max() / kWasmPageSize); static constexpr uint32_t kMaxMemoryPagesAtRuntime = std::min(
kV8MaxWasmMemoryPages,
(uintptr_t{1} << (kSystemPointerSize == 4 ? 31 : 63)) / kWasmPageSize);
constexpr CompilationEnv(const WasmModule* module, constexpr CompilationEnv(const WasmModule* module,
UseTrapHandler use_trap_handler, UseTrapHandler use_trap_handler,
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
load('test/mjsunit/wasm/wasm-module-builder.js');
builder = new WasmModuleBuilder();
builder.addImportedMemory();
let leb = [0x80, 0x80, 0x80, 0x80, 0x0c];
builder.addFunction('store', makeSig([kWasmI32, kWasmI32], []))
.addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32StoreMem, 0, ...leb])
.exportFunc();
builder.toModule();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment