Commit e68728a2 authored by Deepti Gandluri's avatar Deepti Gandluri Committed by Commit Bot

[wasm] Add templatized methods for static bounds checks

The IsInBounds function is used in a few different places, when used for
bounds checks on 32-bit platforms, size_t for max_memory_size leads to
incorrect out of bounds accesses as size_t is not guaranteed to be
64-bit on all platforms. Use specific uint32_t, uint64_t methods for
Wasm bounds checking instead of size_t.

Bug: chromium:1080902
Change-Id: I0e21f0a310382c8ed0703c8302200d3352495c13
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2256858
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68500}
parent c4e66e3b
......@@ -27,7 +27,9 @@ inline constexpr bool IsInRange(T value, U lower_limit, U higher_limit) {
// Checks if [index, index+length) is in range [0, max). Note that this check
// works even if {index+length} would wrap around.
inline constexpr bool IsInBounds(size_t index, size_t length, size_t max) {
template <typename T,
typename = typename std::enable_if<std::is_unsigned<T>::value>::type>
inline constexpr bool IsInBounds(T index, T length, T max) {
return length <= max && index <= (max - length);
}
......
......@@ -3493,7 +3493,7 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
return index;
}
if (!base::IsInBounds(offset, access_size, env_->max_memory_size)) {
if (!base::IsInBounds<uint64_t>(offset, access_size, env_->max_memory_size)) {
// The access will be out of bounds, even for the largest memory.
TrapIfEq32(wasm::kTrapMemOutOfBounds, Int32Constant(0), 0, position);
return mcgraph()->IntPtrConstant(0);
......
......@@ -1934,7 +1934,7 @@ class LiftoffCompiler {
uint32_t offset, Register index, LiftoffRegList pinned,
ForceCheck force_check) {
const bool statically_oob =
!base::IsInBounds(offset, access_size, env_->max_memory_size);
!base::IsInBounds<uint64_t>(offset, access_size, env_->max_memory_size);
if (!force_check && !statically_oob &&
(!FLAG_wasm_bounds_checks || env_->use_trap_handler)) {
......
......@@ -526,8 +526,10 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
auto table_object = handle(WasmTableObject::cast(instance->tables().get(
elem_segment.table_index)),
isolate_);
size_t table_size = table_object->current_length();
if (!base::IsInBounds(base, elem_segment.entries.size(), table_size)) {
uint32_t table_size = table_object->current_length();
if (!base::IsInBounds<uint32_t>(
base, static_cast<uint32_t>(elem_segment.entries.size()),
table_size)) {
thrower_->LinkError("table initializer is out of bounds");
return {};
}
......@@ -539,8 +541,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
for (const WasmDataSegment& seg : module_->data_segments) {
if (!seg.active) continue;
uint32_t base = EvalUint32InitExpr(instance, seg.dest_addr);
if (!base::IsInBounds(base, seg.source.length(),
instance->memory_size())) {
if (!base::IsInBounds<uint64_t>(base, seg.source.length(),
instance->memory_size())) {
thrower_->LinkError("data segment is out of bounds");
return {};
}
......@@ -732,7 +734,8 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
if (size == 0) continue;
uint32_t dest_offset = EvalUint32InitExpr(instance, segment.dest_addr);
DCHECK(base::IsInBounds(dest_offset, size, instance->memory_size()));
DCHECK(base::IsInBounds<uint64_t>(dest_offset, size,
instance->memory_size()));
byte* dest = instance->memory_start() + dest_offset;
const byte* src = wire_bytes.begin() + segment.source.offset();
memcpy(dest, src, size);
......@@ -1668,11 +1671,12 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
// TODO(wasm): Move this functionality into wasm-objects, since it is used
// for both instantiation and in the implementation of the table.init
// instruction.
if (!base::IsInBounds(dst, count, table_object->current_length()) ||
!base::IsInBounds(src, count,
instance->dropped_elem_segments()[segment_index] == 0
? elem_segment.entries.size()
: 0)) {
if (!base::IsInBounds<uint64_t>(dst, count, table_object->current_length()) ||
!base::IsInBounds<uint64_t>(
src, count,
instance->dropped_elem_segments()[segment_index] == 0
? elem_segment.entries.size()
: 0)) {
return false;
}
......
......@@ -500,13 +500,13 @@ int32_t memory_init_wrapper(Address data) {
uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t src = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t seg_index = ReadAndIncrementOffset<uint32_t>(data, &offset);
size_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
size_t mem_size = instance.memory_size();
if (!base::IsInBounds(dst, size, mem_size)) return kOutOfBounds;
uint64_t mem_size = instance.memory_size();
if (!base::IsInBounds<uint64_t>(dst, size, mem_size)) return kOutOfBounds;
size_t seg_size = instance.data_segment_sizes()[seg_index];
if (!base::IsInBounds(src, size, seg_size)) return kOutOfBounds;
uint32_t seg_size = instance.data_segment_sizes()[seg_index];
if (!base::IsInBounds<uint32_t>(src, size, seg_size)) return kOutOfBounds;
byte* seg_start =
reinterpret_cast<byte*>(instance.data_segment_starts()[seg_index]);
......@@ -525,11 +525,11 @@ int32_t memory_copy_wrapper(Address data) {
WasmInstanceObject instance = WasmInstanceObject::cast(raw_instance);
uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t src = ReadAndIncrementOffset<uint32_t>(data, &offset);
size_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
size_t mem_size = instance.memory_size();
if (!base::IsInBounds(dst, size, mem_size)) return kOutOfBounds;
if (!base::IsInBounds(src, size, mem_size)) return kOutOfBounds;
uint64_t mem_size = instance.memory_size();
if (!base::IsInBounds<uint64_t>(dst, size, mem_size)) return kOutOfBounds;
if (!base::IsInBounds<uint64_t>(src, size, mem_size)) return kOutOfBounds;
// Use std::memmove, because the ranges can overlap.
std::memmove(EffectiveAddress(instance, dst), EffectiveAddress(instance, src),
......@@ -550,10 +550,10 @@ int32_t memory_fill_wrapper(Address data) {
uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint8_t value =
static_cast<uint8_t>(ReadAndIncrementOffset<uint32_t>(data, &offset));
size_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
size_t mem_size = instance.memory_size();
if (!base::IsInBounds(dst, size, mem_size)) return kOutOfBounds;
uint64_t mem_size = instance.memory_size();
if (!base::IsInBounds<uint64_t>(dst, size, mem_size)) return kOutOfBounds;
std::memset(EffectiveAddress(instance, dst), value, size);
return kSuccess;
......
......@@ -1515,8 +1515,8 @@ class WasmInterpreterInternals {
if (effective_index < index) {
return kNullAddress; // wraparound => oob
}
if (!base::IsInBounds(effective_index, sizeof(mtype),
instance_object_->memory_size())) {
if (!base::IsInBounds<uint64_t>(effective_index, sizeof(mtype),
instance_object_->memory_size())) {
return kNullAddress; // oob
}
return EffectiveAddress(effective_index);
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --experimental-wasm-threads
load("test/mjsunit/wasm/wasm-module-builder.js");
let memory = new WebAssembly.Memory({
initial: 1,
maximum: 10,
shared: true
});
let builder = new WasmModuleBuilder();
builder.addImportedMemory("m", "imported_mem", 0, 1 << 16, "shared");
builder.addFunction("main", kSig_i_v).addBody([
kExprI32Const, 0,
kAtomicPrefix,
kExprI32AtomicLoad16U, 1, 0]).exportAs("main");
let module = new WebAssembly.Module(builder.toBuffer());
let instance = new WebAssembly.Instance(module, {
m: {
imported_mem: memory
}
});
instance.exports.main();
......@@ -135,8 +135,8 @@ TYPED_TEST(UtilsTest, PassesFilterTest) {
TEST(UtilsTest, IsInBounds) {
// for column consistency and terseness
#define INB(x, y, z) EXPECT_TRUE(base::IsInBounds(x, y, z))
#define OOB(x, y, z) EXPECT_FALSE(base::IsInBounds(x, y, z))
#define INB(x, y, z) EXPECT_TRUE(base::IsInBounds<size_t>(x, y, z))
#define OOB(x, y, z) EXPECT_FALSE(base::IsInBounds<size_t>(x, y, z))
INB(0, 0, 1);
INB(0, 1, 1);
INB(1, 0, 1);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment