Commit 9e8cd78d authored by Andreas Haas's avatar Andreas Haas Committed by Commit Bot

[wasm][bulk-memory] Change bounds checks behavior

This is necessary because the spec changed.

R=mstarzinger@chromium.org

Bug: v8:9865
Change-Id: Id8b4d85eafcf368d591666907036e6aa54664e63
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1921794
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65072}
parent b5bdf0a8
......@@ -4849,9 +4849,9 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
SetControl(size_null_if_false);
Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
TrapIfTrue(wasm::kTrapMemOutOfBounds, dst_fail, position);
Node* seg_index = Uint32Constant(data_segment_index);
Node* src_fail;
{
// Load segment size from WasmInstanceObject::data_segment_sizes.
......@@ -4865,7 +4865,8 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
Effect(), Control()));
// Bounds check the src index against the segment size.
src_fail = BoundsCheckRange(src, &size, seg_size, position);
Node* src_fail = BoundsCheckRange(src, &size, seg_size, position);
TrapIfTrue(wasm::kTrapMemOutOfBounds, src_fail, position);
}
{
......@@ -4890,8 +4891,6 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
BuildCCall(&sig, function, dst, src, size);
TrapIfTrue(wasm::kTrapMemOutOfBounds,
graph()->NewNode(m->Word32Or(), dst_fail, src_fail), position);
Node* size_null_if_true =
graph()->NewNode(common->IfTrue(), size_null_branch);
......@@ -4928,20 +4927,12 @@ Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
Node* size_null_if_false =
graph()->NewNode(common->IfFalse(), size_null_branch);
SetControl(size_null_if_false);
// The data must be copied backward if src < dst.
Node* copy_backward = graph()->NewNode(m->Uint32LessThan(), src, dst);
Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
// Trap without copying any bytes if we are copying backward and the copy is
// partially out-of-bounds. We only need to check that the dst region is
// out-of-bounds, because we know that {src < dst}, so the src region is
// always out of bounds if the dst region is.
TrapIfTrue(wasm::kTrapMemOutOfBounds,
graph()->NewNode(m->Word32And(), dst_fail, copy_backward),
position);
TrapIfTrue(wasm::kTrapMemOutOfBounds, dst_fail, position);
Node* src_fail = BoundsCheckMemRange(&src, &size, position);
TrapIfTrue(wasm::kTrapMemOutOfBounds, src_fail, position);
Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
ExternalReference::wasm_memory_copy()));
......@@ -4949,8 +4940,6 @@ Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
BuildCCall(&sig, function, dst, src, size);
TrapIfTrue(wasm::kTrapMemOutOfBounds,
graph()->NewNode(m->Word32Or(), dst_fail, src_fail), position);
Node* size_null_if_true =
graph()->NewNode(common->IfTrue(), size_null_branch);
......
......@@ -1634,10 +1634,10 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
// TODO(wasm): Move this functionality into wasm-objects, since it is used
// for both instantiation and in the implementation of the table.init
// instruction.
bool ok = base::ClampToBounds<size_t>(dst, &count,
table_object->entries().length());
// Use & instead of && so the clamp is not short-circuited.
ok &= base::ClampToBounds<size_t>(src, &count, elem_segment.entries.size());
if (!base::IsInBounds(dst, count, table_object->entries().length()) ||
!base::IsInBounds(src, count, elem_segment.entries.size())) {
return false;
}
const WasmModule* module = instance->module();
for (size_t i = 0; i < count; ++i) {
......@@ -1691,7 +1691,7 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
func_index);
}
}
return ok;
return true;
}
void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
......
......@@ -1803,17 +1803,18 @@ class ThreadImpl {
return true;
}
Address dst_addr;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
auto src_max =
instance_object_->data_segment_sizes()[imm.data_segment_index];
// Use & instead of && so the clamp is not short-circuited.
ok &= base::ClampToBounds(src, &size, src_max);
if (!BoundsCheckMemRange(dst, &size, &dst_addr) ||
!base::IsInBounds(src, size, src_max)) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
Address src_addr =
instance_object_->data_segment_starts()[imm.data_segment_index] +
src;
memory_copy_wrapper(dst_addr, src_addr, size);
if (!ok) DoTrap(kTrapMemOutOfBounds, pc);
return ok;
return true;
}
case kExprDataDrop: {
DataDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
......@@ -1834,20 +1835,15 @@ class ThreadImpl {
return true;
}
Address dst_addr;
bool copy_backward = src < dst;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
// Trap without copying any bytes if we are copying backward and the
// copy is partially out-of-bounds. We only need to check that the dst
// region is out-of-bounds, because we know that {src < dst}, so the src
// region is always out of bounds if the dst region is.
if (ok || !copy_backward) {
Address src_addr;
// Use & instead of && so the bounds check is not short-circuited.
ok &= BoundsCheckMemRange(src, &size, &src_addr);
memory_copy_wrapper(dst_addr, src_addr, size);
Address src_addr;
if (!BoundsCheckMemRange(dst, &size, &dst_addr) ||
!BoundsCheckMemRange(src, &size, &src_addr)) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
if (!ok) DoTrap(kTrapMemOutOfBounds, pc);
return ok;
memory_copy_wrapper(dst_addr, src_addr, size);
return true;
}
case kExprMemoryFill: {
MemoryIndexImmediate<Decoder::kNoValidate> imm(decoder,
......
......@@ -1444,17 +1444,14 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
uint32_t max_dst = static_cast<uint32_t>(table_dst->entries().length());
uint32_t max_src = static_cast<uint32_t>(table_src->entries().length());
bool copy_backward = src < dst;
bool ok = base::ClampToBounds(dst, &count, max_dst);
// Use & instead of && so the clamp is not short-circuited.
ok &= base::ClampToBounds(src, &count, max_src);
// If performing a partial copy when copying backward, then the first access
// will be out-of-bounds, so no entries should be copied.
if (copy_backward && !ok) return ok;
if (!base::IsInBounds(dst, count, max_dst) ||
!base::IsInBounds(src, count, max_src)) {
return false;
}
// no-op
if ((dst == src && table_dst_index == table_src_index) || count == 0) {
return ok;
return true;
}
for (uint32_t i = 0; i < count; ++i) {
......@@ -1463,7 +1460,7 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
auto value = WasmTableObject::Get(isolate, table_src, src_index);
WasmTableObject::Set(isolate, table_dst, dst_index, value);
}
return ok;
return true;
}
// static
......
......@@ -95,14 +95,13 @@ WASM_EXEC_TEST(MemoryInitOutOfBoundsData) {
const uint32_t last_5_bytes = kWasmPageSize - 5;
// Write all values up to the out-of-bounds write.
// Failing memory.init should not have any effect.
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize - 5, 0, 6));
CheckMemoryEquals(&r.builder(), last_5_bytes, {0, 1, 2, 3, 4});
CheckMemoryEquals(&r.builder(), last_5_bytes, {0, 0, 0, 0, 0});
// Write all values up to the out-of-bounds read.
r.builder().BlankMemory();
CHECK_EQ(0xDEADBEEF, r.Call(0, 5, 6));
CheckMemoryEqualsFollowedByZeroes(&r.builder(), {5, 6, 7, 8, 9});
CheckMemoryEquals(&r.builder(), last_5_bytes, {0, 0, 0, 0, 0});
}
WASM_EXEC_TEST(MemoryInitOutOfBounds) {
......@@ -203,24 +202,19 @@ WASM_EXEC_TEST(MemoryCopyOutOfBoundsData) {
const uint32_t last_5_bytes = kWasmPageSize - 5;
// Copy with source < destination. Copy would happen backwards,
// but the first byte to copy is out-of-bounds, so no data should be written.
CheckMemoryEquals(&r.builder(), last_5_bytes, {0, 0, 0, 0, 0});
CHECK_EQ(0xDEADBEEF, r.Call(last_5_bytes, 0, 6));
CheckMemoryEquals(&r.builder(), last_5_bytes, {0, 0, 0, 0, 0});
// Copy overlapping with destination < source. Copy will happen forwards, up
// to the out-of-bounds access.
r.builder().BlankMemory();
memcpy(mem + last_5_bytes, data, 5);
CHECK_EQ(0xDEADBEEF, r.Call(0, last_5_bytes, kWasmPageSize));
CheckMemoryEquals(&r.builder(), 0, {11, 22, 33, 44, 55});
CheckMemoryEquals(&r.builder(), last_5_bytes, {11, 22, 33, 44, 55});
// Copy overlapping with source < destination. Copy would happen backwards,
// but the first byte to copy is out-of-bounds, so no data should be written.
r.builder().BlankMemory();
memcpy(mem, data, 5);
memcpy(mem + last_5_bytes, data, 5);
CHECK_EQ(0xDEADBEEF, r.Call(last_5_bytes, 0, kWasmPageSize));
CheckMemoryEquals(&r.builder(), last_5_bytes, {0, 0, 0, 0, 0});
CheckMemoryEquals(&r.builder(), last_5_bytes, {11, 22, 33, 44, 55});
}
WASM_EXEC_TEST(MemoryCopyOutOfBounds) {
......@@ -532,13 +526,12 @@ void TestTableInitOob(ExecutionTier execution_tier, int table_index) {
CheckTableCall(isolate, table, &r, call_index, null, null, null, null, null);
// Write all values up to the out-of-bounds write.
// Out-of-bounds table.init should not have any effect.
r.CheckCallViaJS(0xDEADBEEF, 3, 0, 3);
CheckTableCall(isolate, table, &r, call_index, null, null, null, 0, 1);
CheckTableCall(isolate, table, &r, call_index, null, null, null, null, null);
// Write all values up to the out-of-bounds read.
r.CheckCallViaJS(0xDEADBEEF, 0, 3, 3);
CheckTableCall(isolate, table, &r, call_index, 3, 4, null, 0, 1);
CheckTableCall(isolate, table, &r, call_index, null, null, null, null, null);
// 0-count is never oob.
r.CheckCallViaJS(0, kTableSize + 1, 0, 0);
......@@ -772,23 +765,18 @@ void TestTableCopyOobWrites(ExecutionTier execution_tier, int table_dst,
CheckTable(isolate, table, f0, f1, f2, f3, f4);
// Non-overlapping, src < dst. Because of src < dst, we copy backwards.
// Therefore the first access already traps, and the table is not changed.
// Failing table.copy should not have any effect.
r.CheckCallViaJS(0xDEADBEEF, 3, 0, 3);
CheckTable(isolate, table, f0, f1, f2, f3, f4);
// Non-overlapping, dst < src.
r.CheckCallViaJS(0xDEADBEEF, 0, 4, 2);
CheckTable(isolate, table, f4, f1, f2, f3, f4);
CheckTable(isolate, table, f0, f1, f2, f3, f4);
// Overlapping, src < dst. This is required to copy backward, but the first
// access will be out-of-bounds, so nothing changes.
r.CheckCallViaJS(0xDEADBEEF, 3, 0, 99);
CheckTable(isolate, table, f4, f1, f2, f3, f4);
CheckTable(isolate, table, f0, f1, f2, f3, f4);
// Overlapping, dst < src.
r.CheckCallViaJS(0xDEADBEEF, 0, 1, 99);
CheckTable(isolate, table, f1, f2, f3, f4, f4);
CheckTable(isolate, table, f0, f1, f2, f3, f4);
}
WASM_EXEC_TEST(TableCopyOobWritesFrom0To0) {
......
......@@ -184,30 +184,6 @@ function getMemoryFill(mem) {
assertEquals(0, view[0]);
})();
(function TestLazyElementSegmentBoundsCheck() {
const table = new WebAssembly.Table({initial: 3, element: 'anyfunc'});
const builder = new WasmModuleBuilder();
builder.addImportedTable('m', 'table', 1);
const f = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 42]);
const tableIndex = 0;
const isGlobal = false;
builder.addElementSegment(tableIndex, 2, isGlobal, [f.index, f.index]);
builder.addElementSegment(tableIndex, 0, isGlobal, [f.index, f.index]);
assertEquals(null, table.get(0));
assertEquals(null, table.get(1));
assertEquals(null, table.get(2));
// Instantiation fails, but still modifies the table.
assertThrows(() => builder.instantiate({m: {table}}), WebAssembly.LinkError);
// The second segment is not initialized.
assertEquals(null, table.get(0));
assertEquals(null, table.get(1));
assertEquals(42, table.get(2)());
})();
(function TestLazyDataAndElementSegments() {
const table = new WebAssembly.Table({initial: 1, element: 'anyfunc'});
const memory = new WebAssembly.Memory({initial: 1});
......@@ -228,6 +204,5 @@ function getMemoryFill(mem) {
assertThrows(
() => builder.instantiate({m: {memory, table}}), WebAssembly.LinkError);
assertEquals(42, table.get(0)());
assertEquals(0, view[0]);
})();
......@@ -59,7 +59,7 @@ assertTable([1000, 1001, 1002, 1003, 1004]);
// Non-overlapping, dst < src.
resetTable();
assertTraps(kTrapTableOutOfBounds, () => instance.exports.copy(0, 4, 2));
assertTable([1004, 1001, 1002, 1003, 1004]);
assertTable([1000, 1001, 1002, 1003, 1004]);
// Overlapping, src < dst. This is required to copy backward, but the first
// access will be out-of-bounds, so nothing changes.
......@@ -70,4 +70,4 @@ assertTable([1000, 1001, 1002, 1003, 1004]);
// Overlapping, dst < src.
resetTable();
assertTraps(kTrapTableOutOfBounds, () => instance.exports.copy(0, 1, 99));
assertTable([1001, 1002, 1003, 1004, 1004]);
assertTable([1000, 1001, 1002, 1003, 1004]);
......@@ -23,10 +23,6 @@
'proposals/bulk-memory-operations/elem': [FAIL],
'proposals/bulk-memory-operations/data': [FAIL],
'proposals/bulk-memory-operations/table_init': [FAIL],
'proposals/bulk-memory-operations/memory_init': [FAIL],
'proposals/bulk-memory-operations/table_copy': [FAIL],
'proposals/bulk-memory-operations/memory_copy': [FAIL],
# TODO(thibaudm): Spec tests do not check multi-return functions correctly.
'proposals/multi-value/call': [FAIL],
'proposals/multi-value/if': [FAIL],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment