Commit 9e8cd78d authored by Andreas Haas's avatar Andreas Haas Committed by Commit Bot

[wasm][bulk-memory] Change bounds checks behavior

This is necessary because the spec changed.

R=mstarzinger@chromium.org

Bug: v8:9865
Change-Id: Id8b4d85eafcf368d591666907036e6aa54664e63
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1921794
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65072}
parent b5bdf0a8
...@@ -4849,9 +4849,9 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst, ...@@ -4849,9 +4849,9 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
SetControl(size_null_if_false); SetControl(size_null_if_false);
Node* dst_fail = BoundsCheckMemRange(&dst, &size, position); Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
TrapIfTrue(wasm::kTrapMemOutOfBounds, dst_fail, position);
Node* seg_index = Uint32Constant(data_segment_index); Node* seg_index = Uint32Constant(data_segment_index);
Node* src_fail;
{ {
// Load segment size from WasmInstanceObject::data_segment_sizes. // Load segment size from WasmInstanceObject::data_segment_sizes.
...@@ -4865,7 +4865,8 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst, ...@@ -4865,7 +4865,8 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
Effect(), Control())); Effect(), Control()));
// Bounds check the src index against the segment size. // Bounds check the src index against the segment size.
src_fail = BoundsCheckRange(src, &size, seg_size, position); Node* src_fail = BoundsCheckRange(src, &size, seg_size, position);
TrapIfTrue(wasm::kTrapMemOutOfBounds, src_fail, position);
} }
{ {
...@@ -4890,8 +4891,6 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst, ...@@ -4890,8 +4891,6 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
MachineType::Uint32()}; MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types); MachineSignature sig(0, 3, sig_types);
BuildCCall(&sig, function, dst, src, size); BuildCCall(&sig, function, dst, src, size);
TrapIfTrue(wasm::kTrapMemOutOfBounds,
graph()->NewNode(m->Word32Or(), dst_fail, src_fail), position);
Node* size_null_if_true = Node* size_null_if_true =
graph()->NewNode(common->IfTrue(), size_null_branch); graph()->NewNode(common->IfTrue(), size_null_branch);
...@@ -4928,20 +4927,12 @@ Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size, ...@@ -4928,20 +4927,12 @@ Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
Node* size_null_if_false = Node* size_null_if_false =
graph()->NewNode(common->IfFalse(), size_null_branch); graph()->NewNode(common->IfFalse(), size_null_branch);
SetControl(size_null_if_false); SetControl(size_null_if_false);
// The data must be copied backward if src < dst.
Node* copy_backward = graph()->NewNode(m->Uint32LessThan(), src, dst);
Node* dst_fail = BoundsCheckMemRange(&dst, &size, position); Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
TrapIfTrue(wasm::kTrapMemOutOfBounds, dst_fail, position);
// Trap without copying any bytes if we are copying backward and the copy is
// partially out-of-bounds. We only need to check that the dst region is
// out-of-bounds, because we know that {src < dst}, so the src region is
// always out of bounds if the dst region is.
TrapIfTrue(wasm::kTrapMemOutOfBounds,
graph()->NewNode(m->Word32And(), dst_fail, copy_backward),
position);
Node* src_fail = BoundsCheckMemRange(&src, &size, position); Node* src_fail = BoundsCheckMemRange(&src, &size, position);
TrapIfTrue(wasm::kTrapMemOutOfBounds, src_fail, position);
Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant( Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
ExternalReference::wasm_memory_copy())); ExternalReference::wasm_memory_copy()));
...@@ -4949,8 +4940,6 @@ Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size, ...@@ -4949,8 +4940,6 @@ Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
MachineType::Uint32()}; MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types); MachineSignature sig(0, 3, sig_types);
BuildCCall(&sig, function, dst, src, size); BuildCCall(&sig, function, dst, src, size);
TrapIfTrue(wasm::kTrapMemOutOfBounds,
graph()->NewNode(m->Word32Or(), dst_fail, src_fail), position);
Node* size_null_if_true = Node* size_null_if_true =
graph()->NewNode(common->IfTrue(), size_null_branch); graph()->NewNode(common->IfTrue(), size_null_branch);
......
...@@ -1634,10 +1634,10 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance, ...@@ -1634,10 +1634,10 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
// TODO(wasm): Move this functionality into wasm-objects, since it is used // TODO(wasm): Move this functionality into wasm-objects, since it is used
// for both instantiation and in the implementation of the table.init // for both instantiation and in the implementation of the table.init
// instruction. // instruction.
bool ok = base::ClampToBounds<size_t>(dst, &count, if (!base::IsInBounds(dst, count, table_object->entries().length()) ||
table_object->entries().length()); !base::IsInBounds(src, count, elem_segment.entries.size())) {
// Use & instead of && so the clamp is not short-circuited. return false;
ok &= base::ClampToBounds<size_t>(src, &count, elem_segment.entries.size()); }
const WasmModule* module = instance->module(); const WasmModule* module = instance->module();
for (size_t i = 0; i < count; ++i) { for (size_t i = 0; i < count; ++i) {
...@@ -1691,7 +1691,7 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance, ...@@ -1691,7 +1691,7 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
func_index); func_index);
} }
} }
return ok; return true;
} }
void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) { void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
......
...@@ -1803,17 +1803,18 @@ class ThreadImpl { ...@@ -1803,17 +1803,18 @@ class ThreadImpl {
return true; return true;
} }
Address dst_addr; Address dst_addr;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
auto src_max = auto src_max =
instance_object_->data_segment_sizes()[imm.data_segment_index]; instance_object_->data_segment_sizes()[imm.data_segment_index];
// Use & instead of && so the clamp is not short-circuited. if (!BoundsCheckMemRange(dst, &size, &dst_addr) ||
ok &= base::ClampToBounds(src, &size, src_max); !base::IsInBounds(src, size, src_max)) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
Address src_addr = Address src_addr =
instance_object_->data_segment_starts()[imm.data_segment_index] + instance_object_->data_segment_starts()[imm.data_segment_index] +
src; src;
memory_copy_wrapper(dst_addr, src_addr, size); memory_copy_wrapper(dst_addr, src_addr, size);
if (!ok) DoTrap(kTrapMemOutOfBounds, pc); return true;
return ok;
} }
case kExprDataDrop: { case kExprDataDrop: {
DataDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); DataDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
...@@ -1834,20 +1835,15 @@ class ThreadImpl { ...@@ -1834,20 +1835,15 @@ class ThreadImpl {
return true; return true;
} }
Address dst_addr; Address dst_addr;
bool copy_backward = src < dst;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
// Trap without copying any bytes if we are copying backward and the
// copy is partially out-of-bounds. We only need to check that the dst
// region is out-of-bounds, because we know that {src < dst}, so the src
// region is always out of bounds if the dst region is.
if (ok || !copy_backward) {
Address src_addr; Address src_addr;
// Use & instead of && so the bounds check is not short-circuited. if (!BoundsCheckMemRange(dst, &size, &dst_addr) ||
ok &= BoundsCheckMemRange(src, &size, &src_addr); !BoundsCheckMemRange(src, &size, &src_addr)) {
memory_copy_wrapper(dst_addr, src_addr, size); DoTrap(kTrapMemOutOfBounds, pc);
return false;
} }
if (!ok) DoTrap(kTrapMemOutOfBounds, pc);
return ok; memory_copy_wrapper(dst_addr, src_addr, size);
return true;
} }
case kExprMemoryFill: { case kExprMemoryFill: {
MemoryIndexImmediate<Decoder::kNoValidate> imm(decoder, MemoryIndexImmediate<Decoder::kNoValidate> imm(decoder,
......
...@@ -1444,17 +1444,14 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate, ...@@ -1444,17 +1444,14 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
uint32_t max_dst = static_cast<uint32_t>(table_dst->entries().length()); uint32_t max_dst = static_cast<uint32_t>(table_dst->entries().length());
uint32_t max_src = static_cast<uint32_t>(table_src->entries().length()); uint32_t max_src = static_cast<uint32_t>(table_src->entries().length());
bool copy_backward = src < dst; bool copy_backward = src < dst;
bool ok = base::ClampToBounds(dst, &count, max_dst); if (!base::IsInBounds(dst, count, max_dst) ||
// Use & instead of && so the clamp is not short-circuited. !base::IsInBounds(src, count, max_src)) {
ok &= base::ClampToBounds(src, &count, max_src); return false;
}
// If performing a partial copy when copying backward, then the first access
// will be out-of-bounds, so no entries should be copied.
if (copy_backward && !ok) return ok;
// no-op // no-op
if ((dst == src && table_dst_index == table_src_index) || count == 0) { if ((dst == src && table_dst_index == table_src_index) || count == 0) {
return ok; return true;
} }
for (uint32_t i = 0; i < count; ++i) { for (uint32_t i = 0; i < count; ++i) {
...@@ -1463,7 +1460,7 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate, ...@@ -1463,7 +1460,7 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
auto value = WasmTableObject::Get(isolate, table_src, src_index); auto value = WasmTableObject::Get(isolate, table_src, src_index);
WasmTableObject::Set(isolate, table_dst, dst_index, value); WasmTableObject::Set(isolate, table_dst, dst_index, value);
} }
return ok; return true;
} }
// static // static
......
...@@ -95,14 +95,13 @@ WASM_EXEC_TEST(MemoryInitOutOfBoundsData) { ...@@ -95,14 +95,13 @@ WASM_EXEC_TEST(MemoryInitOutOfBoundsData) {
const uint32_t last_5_bytes = kWasmPageSize - 5; const uint32_t last_5_bytes = kWasmPageSize - 5;
// Write all values up to the out-of-bounds write. // Failing memory.init should not have any effect.
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize - 5, 0, 6)); CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize - 5, 0, 6));
CheckMemoryEquals(&r.builder(), last_5_bytes, {0, 1, 2, 3, 4}); CheckMemoryEquals(&r.builder(), last_5_bytes, {0, 0, 0, 0, 0});
// Write all values up to the out-of-bounds read.
r.builder().BlankMemory(); r.builder().BlankMemory();
CHECK_EQ(0xDEADBEEF, r.Call(0, 5, 6)); CHECK_EQ(0xDEADBEEF, r.Call(0, 5, 6));
CheckMemoryEqualsFollowedByZeroes(&r.builder(), {5, 6, 7, 8, 9}); CheckMemoryEquals(&r.builder(), last_5_bytes, {0, 0, 0, 0, 0});
} }
WASM_EXEC_TEST(MemoryInitOutOfBounds) { WASM_EXEC_TEST(MemoryInitOutOfBounds) {
...@@ -203,24 +202,19 @@ WASM_EXEC_TEST(MemoryCopyOutOfBoundsData) { ...@@ -203,24 +202,19 @@ WASM_EXEC_TEST(MemoryCopyOutOfBoundsData) {
const uint32_t last_5_bytes = kWasmPageSize - 5; const uint32_t last_5_bytes = kWasmPageSize - 5;
// Copy with source < destination. Copy would happen backwards, CheckMemoryEquals(&r.builder(), last_5_bytes, {0, 0, 0, 0, 0});
// but the first byte to copy is out-of-bounds, so no data should be written.
CHECK_EQ(0xDEADBEEF, r.Call(last_5_bytes, 0, 6)); CHECK_EQ(0xDEADBEEF, r.Call(last_5_bytes, 0, 6));
CheckMemoryEquals(&r.builder(), last_5_bytes, {0, 0, 0, 0, 0}); CheckMemoryEquals(&r.builder(), last_5_bytes, {0, 0, 0, 0, 0});
// Copy overlapping with destination < source. Copy will happen forwards, up
// to the out-of-bounds access.
r.builder().BlankMemory(); r.builder().BlankMemory();
memcpy(mem + last_5_bytes, data, 5); memcpy(mem + last_5_bytes, data, 5);
CHECK_EQ(0xDEADBEEF, r.Call(0, last_5_bytes, kWasmPageSize)); CHECK_EQ(0xDEADBEEF, r.Call(0, last_5_bytes, kWasmPageSize));
CheckMemoryEquals(&r.builder(), 0, {11, 22, 33, 44, 55}); CheckMemoryEquals(&r.builder(), last_5_bytes, {11, 22, 33, 44, 55});
// Copy overlapping with source < destination. Copy would happen backwards,
// but the first byte to copy is out-of-bounds, so no data should be written.
r.builder().BlankMemory(); r.builder().BlankMemory();
memcpy(mem, data, 5); memcpy(mem + last_5_bytes, data, 5);
CHECK_EQ(0xDEADBEEF, r.Call(last_5_bytes, 0, kWasmPageSize)); CHECK_EQ(0xDEADBEEF, r.Call(last_5_bytes, 0, kWasmPageSize));
CheckMemoryEquals(&r.builder(), last_5_bytes, {0, 0, 0, 0, 0}); CheckMemoryEquals(&r.builder(), last_5_bytes, {11, 22, 33, 44, 55});
} }
WASM_EXEC_TEST(MemoryCopyOutOfBounds) { WASM_EXEC_TEST(MemoryCopyOutOfBounds) {
...@@ -532,13 +526,12 @@ void TestTableInitOob(ExecutionTier execution_tier, int table_index) { ...@@ -532,13 +526,12 @@ void TestTableInitOob(ExecutionTier execution_tier, int table_index) {
CheckTableCall(isolate, table, &r, call_index, null, null, null, null, null); CheckTableCall(isolate, table, &r, call_index, null, null, null, null, null);
// Write all values up to the out-of-bounds write. // Out-of-bounds table.init should not have any effect.
r.CheckCallViaJS(0xDEADBEEF, 3, 0, 3); r.CheckCallViaJS(0xDEADBEEF, 3, 0, 3);
CheckTableCall(isolate, table, &r, call_index, null, null, null, 0, 1); CheckTableCall(isolate, table, &r, call_index, null, null, null, null, null);
// Write all values up to the out-of-bounds read.
r.CheckCallViaJS(0xDEADBEEF, 0, 3, 3); r.CheckCallViaJS(0xDEADBEEF, 0, 3, 3);
CheckTableCall(isolate, table, &r, call_index, 3, 4, null, 0, 1); CheckTableCall(isolate, table, &r, call_index, null, null, null, null, null);
// 0-count is never oob. // 0-count is never oob.
r.CheckCallViaJS(0, kTableSize + 1, 0, 0); r.CheckCallViaJS(0, kTableSize + 1, 0, 0);
...@@ -772,23 +765,18 @@ void TestTableCopyOobWrites(ExecutionTier execution_tier, int table_dst, ...@@ -772,23 +765,18 @@ void TestTableCopyOobWrites(ExecutionTier execution_tier, int table_dst,
CheckTable(isolate, table, f0, f1, f2, f3, f4); CheckTable(isolate, table, f0, f1, f2, f3, f4);
// Non-overlapping, src < dst. Because of src < dst, we copy backwards. // Failing table.copy should not have any effect.
// Therefore the first access already traps, and the table is not changed.
r.CheckCallViaJS(0xDEADBEEF, 3, 0, 3); r.CheckCallViaJS(0xDEADBEEF, 3, 0, 3);
CheckTable(isolate, table, f0, f1, f2, f3, f4); CheckTable(isolate, table, f0, f1, f2, f3, f4);
// Non-overlapping, dst < src.
r.CheckCallViaJS(0xDEADBEEF, 0, 4, 2); r.CheckCallViaJS(0xDEADBEEF, 0, 4, 2);
CheckTable(isolate, table, f4, f1, f2, f3, f4); CheckTable(isolate, table, f0, f1, f2, f3, f4);
// Overlapping, src < dst. This is required to copy backward, but the first
// access will be out-of-bounds, so nothing changes.
r.CheckCallViaJS(0xDEADBEEF, 3, 0, 99); r.CheckCallViaJS(0xDEADBEEF, 3, 0, 99);
CheckTable(isolate, table, f4, f1, f2, f3, f4); CheckTable(isolate, table, f0, f1, f2, f3, f4);
// Overlapping, dst < src.
r.CheckCallViaJS(0xDEADBEEF, 0, 1, 99); r.CheckCallViaJS(0xDEADBEEF, 0, 1, 99);
CheckTable(isolate, table, f1, f2, f3, f4, f4); CheckTable(isolate, table, f0, f1, f2, f3, f4);
} }
WASM_EXEC_TEST(TableCopyOobWritesFrom0To0) { WASM_EXEC_TEST(TableCopyOobWritesFrom0To0) {
......
...@@ -184,30 +184,6 @@ function getMemoryFill(mem) { ...@@ -184,30 +184,6 @@ function getMemoryFill(mem) {
assertEquals(0, view[0]); assertEquals(0, view[0]);
})(); })();
(function TestLazyElementSegmentBoundsCheck() {
const table = new WebAssembly.Table({initial: 3, element: 'anyfunc'});
const builder = new WasmModuleBuilder();
builder.addImportedTable('m', 'table', 1);
const f = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 42]);
const tableIndex = 0;
const isGlobal = false;
builder.addElementSegment(tableIndex, 2, isGlobal, [f.index, f.index]);
builder.addElementSegment(tableIndex, 0, isGlobal, [f.index, f.index]);
assertEquals(null, table.get(0));
assertEquals(null, table.get(1));
assertEquals(null, table.get(2));
// Instantiation fails, but still modifies the table.
assertThrows(() => builder.instantiate({m: {table}}), WebAssembly.LinkError);
// The second segment is not initialized.
assertEquals(null, table.get(0));
assertEquals(null, table.get(1));
assertEquals(42, table.get(2)());
})();
(function TestLazyDataAndElementSegments() { (function TestLazyDataAndElementSegments() {
const table = new WebAssembly.Table({initial: 1, element: 'anyfunc'}); const table = new WebAssembly.Table({initial: 1, element: 'anyfunc'});
const memory = new WebAssembly.Memory({initial: 1}); const memory = new WebAssembly.Memory({initial: 1});
...@@ -228,6 +204,5 @@ function getMemoryFill(mem) { ...@@ -228,6 +204,5 @@ function getMemoryFill(mem) {
assertThrows( assertThrows(
() => builder.instantiate({m: {memory, table}}), WebAssembly.LinkError); () => builder.instantiate({m: {memory, table}}), WebAssembly.LinkError);
assertEquals(42, table.get(0)());
assertEquals(0, view[0]); assertEquals(0, view[0]);
})(); })();
...@@ -59,7 +59,7 @@ assertTable([1000, 1001, 1002, 1003, 1004]); ...@@ -59,7 +59,7 @@ assertTable([1000, 1001, 1002, 1003, 1004]);
// Non-overlapping, dst < src. // Non-overlapping, dst < src.
resetTable(); resetTable();
assertTraps(kTrapTableOutOfBounds, () => instance.exports.copy(0, 4, 2)); assertTraps(kTrapTableOutOfBounds, () => instance.exports.copy(0, 4, 2));
assertTable([1004, 1001, 1002, 1003, 1004]); assertTable([1000, 1001, 1002, 1003, 1004]);
// Overlapping, src < dst. This is required to copy backward, but the first // Overlapping, src < dst. This is required to copy backward, but the first
// access will be out-of-bounds, so nothing changes. // access will be out-of-bounds, so nothing changes.
...@@ -70,4 +70,4 @@ assertTable([1000, 1001, 1002, 1003, 1004]); ...@@ -70,4 +70,4 @@ assertTable([1000, 1001, 1002, 1003, 1004]);
// Overlapping, dst < src. // Overlapping, dst < src.
resetTable(); resetTable();
assertTraps(kTrapTableOutOfBounds, () => instance.exports.copy(0, 1, 99)); assertTraps(kTrapTableOutOfBounds, () => instance.exports.copy(0, 1, 99));
assertTable([1001, 1002, 1003, 1004, 1004]); assertTable([1000, 1001, 1002, 1003, 1004]);
...@@ -23,10 +23,6 @@ ...@@ -23,10 +23,6 @@
'proposals/bulk-memory-operations/elem': [FAIL], 'proposals/bulk-memory-operations/elem': [FAIL],
'proposals/bulk-memory-operations/data': [FAIL], 'proposals/bulk-memory-operations/data': [FAIL],
'proposals/bulk-memory-operations/table_init': [FAIL],
'proposals/bulk-memory-operations/memory_init': [FAIL],
'proposals/bulk-memory-operations/table_copy': [FAIL],
'proposals/bulk-memory-operations/memory_copy': [FAIL],
# TODO(thibaudm): Spec tests do not check multi-return functions correctly. # TODO(thibaudm): Spec tests do not check multi-return functions correctly.
'proposals/multi-value/call': [FAIL], 'proposals/multi-value/call': [FAIL],
'proposals/multi-value/if': [FAIL], 'proposals/multi-value/if': [FAIL],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment