Commit 5f4f57eb authored by Ben Smith's avatar Ben Smith Committed by Commit Bot

[wasm] Fix out-of-bound behavior for bulk ops

The bulk memory operations should not bounds check ahead of time, but
instead should write as many bytes as possible until the first
out-of-bounds access.

Bug: v8:8890
Change-Id: Ia8179fe268fc65816c34a8f3461ed0a0d35600aa
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1497520
Commit-Queue: Ben Smith <binji@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60040}
parent d077f9b5
...@@ -3405,37 +3405,32 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index, ...@@ -3405,37 +3405,32 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
return index; return index;
} }
// Check that the range [start, start + size) is in the range [0, max). Node* WasmGraphBuilder::BoundsCheckRange(Node* start, Node** size, Node* max,
void WasmGraphBuilder::BoundsCheckRange(Node* start, Node* size, Node* max,
wasm::WasmCodePosition position) { wasm::WasmCodePosition position) {
// The accessed memory is [start, end), where {end} is {start + size}. We
// want to check that {start + size <= max}, making sure that {start + size}
// doesn't overflow. This can be expressed as {start <= max - size} as long
// as {max - size} isn't negative, which is true if {size <= max}.
auto m = mcgraph()->machine(); auto m = mcgraph()->machine();
Node* cond = graph()->NewNode(m->Uint32LessThanOrEqual(), size, max); // The region we are trying to access is [start, start+size). If
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position); // {start} > {max}, none of this region is valid, so we trap. Otherwise,
// there may be a subset of the region that is valid. {max - start} is the
// This produces a positive number, since {size <= max}. // maximum valid size, so if {max - start < size}, then the region is
Node* effective_size = graph()->NewNode(m->Int32Sub(), max, size); // partially out-of-bounds.
TrapIfTrue(wasm::kTrapMemOutOfBounds,
// Introduce the actual bounds check. graph()->NewNode(m->Uint32LessThan(), max, start), position);
Node* check = Node* sub = graph()->NewNode(m->Int32Sub(), max, start);
graph()->NewNode(m->Uint32LessThanOrEqual(), start, effective_size); Node* fail = graph()->NewNode(m->Uint32LessThan(), sub, *size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, check, position); Diamond d(graph(), mcgraph()->common(), fail, BranchHint::kFalse);
d.Chain(Control());
// TODO(binji): Does this need addtional untrusted_code_mitigations_ mask *size = d.Phi(MachineRepresentation::kWord32, sub, *size);
// like BoundsCheckMem above? return fail;
} }
Node* WasmGraphBuilder::BoundsCheckMemRange(Node* start, Node* size, Node* WasmGraphBuilder::BoundsCheckMemRange(Node** start, Node** size,
wasm::WasmCodePosition position) { wasm::WasmCodePosition position) {
// TODO(binji): Support trap handler. // TODO(binji): Support trap handler and no bounds check mode.
if (!FLAG_wasm_no_bounds_checks) { Node* fail =
BoundsCheckRange(start, size, instance_cache_->mem_size, position); BoundsCheckRange(*start, size, instance_cache_->mem_size, position);
} *start = graph()->NewNode(mcgraph()->machine()->IntAdd(), MemBuffer(0),
return graph()->NewNode(mcgraph()->machine()->IntAdd(), MemBuffer(0), Uint32ToUintptr(*start));
Uint32ToUintptr(start)); return fail;
} }
const Operator* WasmGraphBuilder::GetSafeLoadOperator(int offset, const Operator* WasmGraphBuilder::GetSafeLoadOperator(int offset,
...@@ -4377,10 +4372,11 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst, ...@@ -4377,10 +4372,11 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
Node* src, Node* size, Node* src, Node* size,
wasm::WasmCodePosition position) { wasm::WasmCodePosition position) {
CheckDataSegmentIsPassiveAndNotDropped(data_segment_index, position); CheckDataSegmentIsPassiveAndNotDropped(data_segment_index, position);
dst = BoundsCheckMemRange(dst, size, position); Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
MachineOperatorBuilder* m = mcgraph()->machine(); auto m = mcgraph()->machine();
Node* seg_index = Uint32Constant(data_segment_index); Node* seg_index = Uint32Constant(data_segment_index);
Node* src_fail;
{ {
// Load segment size from WasmInstanceObject::data_segment_sizes. // Load segment size from WasmInstanceObject::data_segment_sizes.
...@@ -4394,7 +4390,7 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst, ...@@ -4394,7 +4390,7 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
Effect(), Control())); Effect(), Control()));
// Bounds check the src index against the segment size. // Bounds check the src index against the segment size.
BoundsCheckRange(src, size, seg_size, position); src_fail = BoundsCheckRange(src, &size, seg_size, position);
} }
{ {
...@@ -4418,7 +4414,10 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst, ...@@ -4418,7 +4414,10 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
MachineType sig_types[] = {MachineType::Pointer(), MachineType::Pointer(), MachineType sig_types[] = {MachineType::Pointer(), MachineType::Pointer(),
MachineType::Uint32()}; MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types); MachineSignature sig(0, 3, sig_types);
return BuildCCall(&sig, function, dst, src, size); BuildCCall(&sig, function, dst, src, size);
return TrapIfTrue(wasm::kTrapMemOutOfBounds,
graph()->NewNode(m->Word32Or(), dst_fail, src_fail),
position);
} }
Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index, Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
...@@ -4435,25 +4434,51 @@ Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index, ...@@ -4435,25 +4434,51 @@ Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size, Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
wasm::WasmCodePosition position) { wasm::WasmCodePosition position) {
dst = BoundsCheckMemRange(dst, size, position); auto m = mcgraph()->machine();
src = BoundsCheckMemRange(src, size, position); // The data must be copied backward if the regions overlap and src < dst. The
// regions overlap if {src + size > dst && dst + size > src}. Since we already
// test that {src < dst}, we know that {dst + size > src}, so this simplifies
// to just {src + size > dst}. That sum can overflow, but if we subtract
// {size} from both sides of the inequality we get the equivalent test
// {size > dst - src}.
Node* copy_backward = graph()->NewNode(
m->Word32And(), graph()->NewNode(m->Uint32LessThan(), src, dst),
graph()->NewNode(m->Uint32LessThan(),
graph()->NewNode(m->Int32Sub(), dst, src), size));
Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
// Trap without copying any bytes if we are copying backward and the copy is
// partially out-of-bounds. We only need to check that the dst region is
// out-of-bounds, because we know that {src < dst}, so the src region is
// always out of bounds if the dst region is.
TrapIfTrue(wasm::kTrapMemOutOfBounds,
graph()->NewNode(m->Word32And(), dst_fail, copy_backward),
position);
Node* src_fail = BoundsCheckMemRange(&src, &size, position);
Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant( Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
ExternalReference::wasm_memory_copy())); ExternalReference::wasm_memory_copy()));
MachineType sig_types[] = {MachineType::Pointer(), MachineType::Pointer(), MachineType sig_types[] = {MachineType::Pointer(), MachineType::Pointer(),
MachineType::Uint32()}; MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types); MachineSignature sig(0, 3, sig_types);
return BuildCCall(&sig, function, dst, src, size); BuildCCall(&sig, function, dst, src, size);
return TrapIfTrue(wasm::kTrapMemOutOfBounds,
graph()->NewNode(m->Word32Or(), dst_fail, src_fail),
position);
} }
Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size, Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
wasm::WasmCodePosition position) { wasm::WasmCodePosition position) {
dst = BoundsCheckMemRange(dst, size, position); Node* fail = BoundsCheckMemRange(&dst, &size, position);
Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant( Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
ExternalReference::wasm_memory_fill())); ExternalReference::wasm_memory_fill()));
MachineType sig_types[] = {MachineType::Pointer(), MachineType::Uint32(), MachineType sig_types[] = {MachineType::Pointer(), MachineType::Uint32(),
MachineType::Uint32()}; MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types); MachineSignature sig(0, 3, sig_types);
return BuildCCall(&sig, function, dst, value, size); BuildCCall(&sig, function, dst, value, size);
return TrapIfTrue(wasm::kTrapMemOutOfBounds, fail, position);
} }
Node* WasmGraphBuilder::CheckElemSegmentIsPassiveAndNotDropped( Node* WasmGraphBuilder::CheckElemSegmentIsPassiveAndNotDropped(
......
...@@ -441,11 +441,16 @@ class WasmGraphBuilder { ...@@ -441,11 +441,16 @@ class WasmGraphBuilder {
Node* BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset, Node* BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset,
wasm::WasmCodePosition, EnforceBoundsCheck); wasm::WasmCodePosition, EnforceBoundsCheck);
// Check that the range [start, start + size) is in the range [0, max). // Check that the range [start, start + size) is in the range [0, max).
void BoundsCheckRange(Node* start, Node* size, Node* max, // Also updates *size with the valid range. Returns true if the range is
// partially out-of-bounds, traps if it is completely out-of-bounds.
Node* BoundsCheckRange(Node* start, Node** size, Node* max,
wasm::WasmCodePosition); wasm::WasmCodePosition);
// BoundsCheckMemRange receives a uint32 {start} and {size} and returns // BoundsCheckMemRange receives a uint32 {start} and {size}, and checks if it
// a pointer into memory at that index, if it is in bounds. // is in bounds. Also updates *size with the valid range, and converts *start
Node* BoundsCheckMemRange(Node* start, Node* size, wasm::WasmCodePosition); // to a pointer into memory at that index. Returns true if the range is
// partially out-of-bounds, traps if it is completely out-of-bounds.
Node* BoundsCheckMemRange(Node** start, Node** size, wasm::WasmCodePosition);
Node* CheckBoundsAndAlignment(uint8_t access_size, Node* index, Node* CheckBoundsAndAlignment(uint8_t access_size, Node* index,
uint32_t offset, wasm::WasmCodePosition); uint32_t offset, wasm::WasmCodePosition);
......
...@@ -74,6 +74,21 @@ inline constexpr bool IsInBounds(size_t index, size_t length, size_t max) { ...@@ -74,6 +74,21 @@ inline constexpr bool IsInBounds(size_t index, size_t length, size_t max) {
return length <= max && index <= (max - length); return length <= max && index <= (max - length);
} }
// Checks if [index, index+length) is in range [0, max). If not, {length} is
// clamped to its valid range. Note that this check works even if
// {index+length} would wrap around.
template <typename T>
inline bool ClampToBounds(T index, T* length, T max) {
if (index > max) {
*length = 0;
return false;
}
T avail = max - index;
bool oob = *length > avail;
if (oob) *length = avail;
return !oob;
}
// X must be a power of 2. Returns the number of trailing zeros. // X must be a power of 2. Returns the number of trailing zeros.
template <typename T, template <typename T,
typename = typename std::enable_if<std::is_integral<T>::value>::type> typename = typename std::enable_if<std::is_integral<T>::value>::type>
......
...@@ -1481,11 +1481,12 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance, ...@@ -1481,11 +1481,12 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
// TODO(wasm): Move this functionality into wasm-objects, since it is used // TODO(wasm): Move this functionality into wasm-objects, since it is used
// for both instantiation and in the implementation of the table.init // for both instantiation and in the implementation of the table.init
// instruction. // instruction.
if (!IsInBounds(dst, count, table_instance.table_size)) return false; bool ok = ClampToBounds<size_t>(dst, &count, table_instance.table_size);
if (!IsInBounds(src, count, elem_segment.entries.size())) return false; // Use & instead of && so the clamp is not short-circuited.
ok &= ClampToBounds<size_t>(src, &count, elem_segment.entries.size());
const WasmModule* module = instance->module(); const WasmModule* module = instance->module();
for (uint32_t i = 0; i < count; ++i) { for (size_t i = 0; i < count; ++i) {
uint32_t func_index = elem_segment.entries[src + i]; uint32_t func_index = elem_segment.entries[src + i];
int entry_index = static_cast<int>(dst + i); int entry_index = static_cast<int>(dst + i);
...@@ -1547,7 +1548,7 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance, ...@@ -1547,7 +1548,7 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
instance, func_index); instance, func_index);
} }
} }
return true; return ok;
} }
void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) { void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
......
...@@ -1442,9 +1442,9 @@ Address WasmInstanceObject::GetCallTarget(uint32_t func_index) { ...@@ -1442,9 +1442,9 @@ Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
namespace { namespace {
void CopyTableEntriesImpl(Handle<WasmInstanceObject> instance, uint32_t dst, void CopyTableEntriesImpl(Handle<WasmInstanceObject> instance, uint32_t dst,
uint32_t src, uint32_t count) { uint32_t src, uint32_t count, bool copy_backward) {
DCHECK(IsInBounds(dst, count, instance->indirect_function_table_size())); DCHECK(IsInBounds(dst, count, instance->indirect_function_table_size()));
if (src < dst) { if (copy_backward) {
for (uint32_t i = count; i > 0; i--) { for (uint32_t i = count; i > 0; i--) {
auto to_entry = IndirectFunctionTableEntry(instance, dst + i - 1); auto to_entry = IndirectFunctionTableEntry(instance, dst + i - 1);
auto from_entry = IndirectFunctionTableEntry(instance, src + i - 1); auto from_entry = IndirectFunctionTableEntry(instance, src + i - 1);
...@@ -1471,14 +1471,21 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate, ...@@ -1471,14 +1471,21 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
CHECK_EQ(0, table_src_index); CHECK_EQ(0, table_src_index);
CHECK_EQ(0, table_dst_index); CHECK_EQ(0, table_dst_index);
auto max = instance->indirect_function_table_size(); auto max = instance->indirect_function_table_size();
if (!IsInBounds(dst, count, max)) return false; bool copy_backward = src < dst && dst - src < count;
if (!IsInBounds(src, count, max)) return false; bool ok = ClampToBounds(dst, &count, max);
if (dst == src) return true; // no-op // Use & instead of && so the clamp is not short-circuited.
ok &= ClampToBounds(src, &count, max);
// If performing a partial copy when copying backward, then the first access
// will be out-of-bounds, so no entries should be copied.
if (copy_backward && !ok) return ok;
if (dst == src || count == 0) return ok; // no-op
if (!instance->has_table_object()) { if (!instance->has_table_object()) {
// No table object, only need to update this instance. // No table object, only need to update this instance.
CopyTableEntriesImpl(instance, dst, src, count); CopyTableEntriesImpl(instance, dst, src, count, copy_backward);
return true; return ok;
} }
Handle<WasmTableObject> table = Handle<WasmTableObject> table =
...@@ -1491,12 +1498,12 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate, ...@@ -1491,12 +1498,12 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
WasmInstanceObject::cast( WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)), dispatch_tables->get(i + kDispatchTableInstanceOffset)),
isolate); isolate);
CopyTableEntriesImpl(target_instance, dst, src, count); CopyTableEntriesImpl(target_instance, dst, src, count, copy_backward);
} }
// Copy the function entries. // Copy the function entries.
Handle<FixedArray> functions(table->elements(), isolate); Handle<FixedArray> functions(table->elements(), isolate);
if (src < dst) { if (copy_backward) {
for (uint32_t i = count; i > 0; i--) { for (uint32_t i = count; i > 0; i--) {
functions->set(dst + i - 1, functions->get(src + i - 1)); functions->set(dst + i - 1, functions->get(src + i - 1));
} }
...@@ -1505,7 +1512,7 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate, ...@@ -1505,7 +1512,7 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
functions->set(dst + i, functions->get(src + i)); functions->set(dst + i, functions->get(src + i));
} }
} }
return true; return ok;
} }
// static // static
......
...@@ -71,8 +71,35 @@ function getMemoryInit(mem, segment_data) { ...@@ -71,8 +71,35 @@ function getMemoryInit(mem, segment_data) {
memoryInit(0, 5, 5); memoryInit(0, 5, 5);
assertBufferContents(u8a, [5, 6, 7, 8, 9, 0, 0, 0, 0, 0, assertBufferContents(u8a, [5, 6, 7, 8, 9, 0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
// Copy 0 bytes does nothing.
memoryInit(10, 1, 0);
assertBufferContents(u8a, [5, 6, 7, 8, 9, 0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
// Copy 0 at end of memory region or data segment is OK.
memoryInit(kPageSize, 0, 0);
memoryInit(0, 10, 0);
})(); })();
(function TestMemoryInitOutOfBoundsData() {
const mem = new WebAssembly.Memory({initial: 1});
const memoryInit = getMemoryInit(mem, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
const u8a = new Uint8Array(mem.buffer);
const last5Bytes = new Uint8Array(mem.buffer, kPageSize - 5);
// Write all values up to the out-of-bounds write.
assertTraps(kTrapMemOutOfBounds, () => memoryInit(kPageSize - 5, 0, 6));
assertBufferContents(last5Bytes, [0, 1, 2, 3, 4]);
// Write all values up to the out-of-bounds read.
u8a.fill(0);
assertTraps(kTrapMemOutOfBounds, () => memoryInit(0, 5, 6));
assertBufferContents(u8a, [5, 6, 7, 8, 9]);
})();
(function TestMemoryInitOutOfBounds() { (function TestMemoryInitOutOfBounds() {
const mem = new WebAssembly.Memory({initial: 1}); const mem = new WebAssembly.Memory({initial: 1});
// Create a data segment that has a length of kPageSize. // Create a data segment that has a length of kPageSize.
...@@ -91,6 +118,10 @@ function getMemoryInit(mem, segment_data) { ...@@ -91,6 +118,10 @@ function getMemoryInit(mem, segment_data) {
assertTraps(kTrapMemOutOfBounds, () => memoryInit(1000, 0, kPageSize)); assertTraps(kTrapMemOutOfBounds, () => memoryInit(1000, 0, kPageSize));
assertTraps(kTrapMemOutOfBounds, () => memoryInit(kPageSize, 0, 1)); assertTraps(kTrapMemOutOfBounds, () => memoryInit(kPageSize, 0, 1));
// Copy 0 out-of-bounds fails.
assertTraps(kTrapMemOutOfBounds, () => memoryInit(kPageSize + 1, 0, 0));
assertTraps(kTrapMemOutOfBounds, () => memoryInit(0, kPageSize + 1, 0));
// Make sure bounds aren't checked with 32-bit wrapping. // Make sure bounds aren't checked with 32-bit wrapping.
assertTraps(kTrapMemOutOfBounds, () => memoryInit(1, 1, -1)); assertTraps(kTrapMemOutOfBounds, () => memoryInit(1, 1, -1));
...@@ -206,6 +237,10 @@ function getMemoryCopy(mem) { ...@@ -206,6 +237,10 @@ function getMemoryCopy(mem) {
memoryCopy(10, 1, 0); memoryCopy(10, 1, 0);
assertBufferContents(u8a, [0, 11, 22, 33, 44, 55, 66, 77, 0, 0, assertBufferContents(u8a, [0, 11, 22, 33, 44, 55, 66, 77, 0, 0,
11, 22, 33, 44, 55, 66, 77]); 11, 22, 33, 44, 55, 66, 77]);
// Copy 0 at end of memory region is OK.
memoryCopy(kPageSize, 0, 0);
memoryCopy(0, kPageSize, 0);
})(); })();
(function TestMemoryCopyOverlapping() { (function TestMemoryCopyOverlapping() {
...@@ -226,6 +261,36 @@ function getMemoryCopy(mem) { ...@@ -226,6 +261,36 @@ function getMemoryCopy(mem) {
assertBufferContents(u8a, [10, 20, 30, 20, 30]); assertBufferContents(u8a, [10, 20, 30, 20, 30]);
})(); })();
(function TestMemoryCopyOutOfBoundsData() {
const mem = new WebAssembly.Memory({initial: 1});
const memoryCopy = getMemoryCopy(mem);
const u8a = new Uint8Array(mem.buffer);
const first5Bytes = new Uint8Array(mem.buffer, 0, 5);
const last5Bytes = new Uint8Array(mem.buffer, kPageSize - 5);
u8a.set([11, 22, 33, 44, 55, 66, 77, 88]);
// Write all values up to the out-of-bounds access.
assertTraps(kTrapMemOutOfBounds, () => memoryCopy(kPageSize - 5, 0, 6));
assertBufferContents(last5Bytes, [11, 22, 33, 44, 55]);
// Copy overlapping with destination < source. Copy will happen forwards, up
// to the out-of-bounds access.
u8a.fill(0);
last5Bytes.set([11, 22, 33, 44, 55]);
assertTraps(
kTrapMemOutOfBounds, () => memoryCopy(0, kPageSize - 5, kPageSize));
assertBufferContents(first5Bytes, [11, 22, 33, 44, 55]);
// Copy overlapping with source < destination. Copy would happen backwards,
// but the first byte to copy is out-of-bounds, so no data should be written.
u8a.fill(0);
first5Bytes.set([11, 22, 33, 44, 55]);
assertTraps(
kTrapMemOutOfBounds, () => memoryCopy(kPageSize - 5, 0, kPageSize));
assertBufferContents(last5Bytes, [0, 0, 0, 0, 0]);
})();
(function TestMemoryCopyOutOfBounds() { (function TestMemoryCopyOutOfBounds() {
const mem = new WebAssembly.Memory({initial: 1}); const mem = new WebAssembly.Memory({initial: 1});
const memoryCopy = getMemoryCopy(mem); const memoryCopy = getMemoryCopy(mem);
...@@ -242,6 +307,10 @@ function getMemoryCopy(mem) { ...@@ -242,6 +307,10 @@ function getMemoryCopy(mem) {
assertTraps(kTrapMemOutOfBounds, () => memoryCopy(1000, 0, kPageSize)); assertTraps(kTrapMemOutOfBounds, () => memoryCopy(1000, 0, kPageSize));
assertTraps(kTrapMemOutOfBounds, () => memoryCopy(kPageSize, 0, 1)); assertTraps(kTrapMemOutOfBounds, () => memoryCopy(kPageSize, 0, 1));
// Copy 0 out-of-bounds fails.
assertTraps(kTrapMemOutOfBounds, () => memoryCopy(kPageSize + 1, 0, 0));
assertTraps(kTrapMemOutOfBounds, () => memoryCopy(0, kPageSize + 1, 0));
// Make sure bounds aren't checked with 32-bit wrapping. // Make sure bounds aren't checked with 32-bit wrapping.
assertTraps(kTrapMemOutOfBounds, () => memoryCopy(1, 1, -1)); assertTraps(kTrapMemOutOfBounds, () => memoryCopy(1, 1, -1));
...@@ -282,6 +351,9 @@ function getMemoryFill(mem) { ...@@ -282,6 +351,9 @@ function getMemoryFill(mem) {
// Fill 0 bytes does nothing. // Fill 0 bytes does nothing.
memoryFill(4, 66, 0); memoryFill(4, 66, 0);
assertBufferContents(u8a, [0, 33, 33, 33, 66, 66, 66, 66]); assertBufferContents(u8a, [0, 33, 33, 33, 66, 66, 66, 66]);
// Fill 0 at end of memory region is OK.
memoryFill(kPageSize, 66, 0);
})(); })();
(function TestMemoryFillValueWrapsToByte() { (function TestMemoryFillValueWrapsToByte() {
...@@ -295,6 +367,17 @@ function getMemoryFill(mem) { ...@@ -295,6 +367,17 @@ function getMemoryFill(mem) {
assertBufferContents(u8a, [expected, expected, expected]); assertBufferContents(u8a, [expected, expected, expected]);
})(); })();
(function TestMemoryFillOutOfBoundsData() {
const mem = new WebAssembly.Memory({initial: 1});
const memoryFill = getMemoryFill(mem);
const v = 123;
// Write all values up to the out-of-bound access.
assertTraps(kTrapMemOutOfBounds, () => memoryFill(kPageSize - 5, v, 999));
const u8a = new Uint8Array(mem.buffer, kPageSize - 6);
assertBufferContents(u8a, [0, 123, 123, 123, 123, 123]);
})();
(function TestMemoryFillOutOfBounds() { (function TestMemoryFillOutOfBounds() {
const mem = new WebAssembly.Memory({initial: 1}); const mem = new WebAssembly.Memory({initial: 1});
const memoryFill = getMemoryFill(mem); const memoryFill = getMemoryFill(mem);
...@@ -307,6 +390,9 @@ function getMemoryFill(mem) { ...@@ -307,6 +390,9 @@ function getMemoryFill(mem) {
assertTraps(kTrapMemOutOfBounds, () => memoryFill(1000, v, kPageSize)); assertTraps(kTrapMemOutOfBounds, () => memoryFill(1000, v, kPageSize));
assertTraps(kTrapMemOutOfBounds, () => memoryFill(kPageSize, v, 1)); assertTraps(kTrapMemOutOfBounds, () => memoryFill(kPageSize, v, 1));
// Fill 0 out-of-bounds fails.
assertTraps(kTrapMemOutOfBounds, () => memoryFill(kPageSize + 1, v, 0));
// Make sure bounds aren't checked with 32-bit wrapping. // Make sure bounds aren't checked with 32-bit wrapping.
assertTraps(kTrapMemOutOfBounds, () => memoryFill(1, v, -1)); assertTraps(kTrapMemOutOfBounds, () => memoryFill(1, v, -1));
......
...@@ -24,7 +24,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js"); ...@@ -24,7 +24,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let instance = builder.instantiate(); let instance = builder.instantiate();
let copy = instance.exports.copy; let copy = instance.exports.copy;
for (let i = 0; i < kTableSize; i++) { for (let i = 0; i <= kTableSize; i++) {
copy(0, 0, i); // nop copy(0, 0, i); // nop
copy(0, i, kTableSize - i); copy(0, i, kTableSize - i);
copy(i, 0, kTableSize - i); copy(i, 0, kTableSize - i);
...@@ -147,6 +147,54 @@ function assertCall(call, ...elems) { ...@@ -147,6 +147,54 @@ function assertCall(call, ...elems) {
assertCall(call, 1, 2, 2, 1, 2); assertCall(call, 1, 2, 2, 1, 2);
})(); })();
(function TestTableCopyOobWrites() {
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
let sig_v_iii = builder.addType(kSig_v_iii);
let kTableSize = 5;
builder.setTableBounds(kTableSize, kTableSize);
{
let o = addFunctions(builder, kTableSize);
builder.addElementSegment(0, false,
[o.f0.index, o.f1.index, o.f2.index]);
}
builder.addFunction("copy", sig_v_iii)
.addBody([
kExprGetLocal, 0,
kExprGetLocal, 1,
kExprGetLocal, 2,
kNumericPrefix, kExprTableCopy, kTableZero, kTableZero])
.exportAs("copy");
builder.addExportOfKind("table", kExternalTable, 0);
let instance = builder.instantiate();
let table = instance.exports.table;
let f0 = table.get(0), f1 = table.get(1), f2 = table.get(2);
let copy = instance.exports.copy;
// Non-overlapping, src < dst.
assertThrows(() => copy(3, 0, 3));
assertTable(table, f0, f1, f2, f0, f1);
// Non-overlapping, dst < src.
assertThrows(() => copy(0, 4, 2));
assertTable(table, f1, f1, f2, f0, f1);
// Overlapping, src < dst. This is required to copy backward, but the first
// access will be out-of-bounds, so nothing changes.
assertThrows(() => copy(3, 0, 99));
assertTable(table, f1, f1, f2, f0, f1);
// Overlapping, dst < src.
assertThrows(() => copy(0, 1, 99));
assertTable(table, f1, f2, f0, f1, f1);
})();
(function TestTableCopyOob1() { (function TestTableCopyOob1() {
print(arguments.callee.name); print(arguments.callee.name);
......
...@@ -12,12 +12,12 @@ function addFunction(builder, k) { ...@@ -12,12 +12,12 @@ function addFunction(builder, k) {
return m; return m;
} }
function addFunctions(builder, count, exportf = false) { function addFunctions(builder, count) {
let o = {}; let o = {};
for (var i = 0; i < count; i++) { for (var i = 0; i < count; i++) {
let name = `f${i}`; let name = `f${i}`;
o[name] = addFunction(builder, i); o[name] = addFunction(builder, i);
if (exportf) o[name].exportAs(name); o[name].exportAs(name);
} }
return o; return o;
} }
...@@ -36,7 +36,7 @@ function assertTable(obj, ...elems) { ...@@ -36,7 +36,7 @@ function assertTable(obj, ...elems) {
builder.setTableBounds(kTableSize, kTableSize); builder.setTableBounds(kTableSize, kTableSize);
{ {
let o = addFunctions(builder, kTableSize, true); let o = addFunctions(builder, kTableSize);
builder.addPassiveElementSegment( builder.addPassiveElementSegment(
[o.f0.index, o.f1.index, o.f2.index, o.f3.index, o.f4.index, null]); [o.f0.index, o.f1.index, o.f2.index, o.f3.index, o.f4.index, null]);
} }
...@@ -56,6 +56,11 @@ function assertTable(obj, ...elems) { ...@@ -56,6 +56,11 @@ function assertTable(obj, ...elems) {
assertTable(x.table, null, null, null, null, null); assertTable(x.table, null, null, null, null, null);
// 0 count is ok in bounds, and at end of regions.
x.init0(0, 0, 0);
x.init0(kTableSize, 0, 0);
x.init0(0, kTableSize, 0);
// test actual writes. // test actual writes.
x.init0(0, 0, 1); x.init0(0, 0, 1);
assertTable(x.table, x.f0, null, null, null, null); assertTable(x.table, x.f0, null, null, null, null);
...@@ -105,6 +110,14 @@ function assertTable(obj, ...elems) { ...@@ -105,6 +110,14 @@ function assertTable(obj, ...elems) {
assertTable(x.table, null, null, null, null, null); assertTable(x.table, null, null, null, null, null);
// Write all values up to the out-of-bounds write.
assertThrows(() => x.init0(3, 0, 3));
assertTable(x.table, null, null, null, x.f0, x.f1);
// Write all values up to the out-of-bounds read.
assertThrows(() => x.init0(0, 3, 3));
assertTable(x.table, x.f3, x.f4, null, x.f0, x.f1);
// 0-count is oob. // 0-count is oob.
assertThrows(() => x.init0(kTableSize+1, 0, 0)); assertThrows(() => x.init0(kTableSize+1, 0, 0));
assertThrows(() => x.init0(0, kTableSize+1, 0)); assertThrows(() => x.init0(0, kTableSize+1, 0));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment