Commit 7a51fe24 authored by Emanuel Ziegler's avatar Emanuel Ziegler Committed by Commit Bot

[wasm][bulk-memory] Adjust bulk memory behavior to proposal phase 4

The following changes were introduced with the recent proposal update:
- OOB access with 0 length traps
- Double drop of segments is allowed
- Dropped segments are treated like having size 0 (OOB error)
- Active segments are dropped right after initialization

R=ahaas@chromium.org

Change-Id: I4e9fc4d9212841c7d858585c672143f99287520d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1946355Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Emanuel Ziegler <ecmziegler@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65305}
parent ea79fb8c
This diff is collapsed.
...@@ -370,12 +370,6 @@ class WasmGraphBuilder { ...@@ -370,12 +370,6 @@ class WasmGraphBuilder {
wasm::WasmCodePosition position); wasm::WasmCodePosition position);
Node* AtomicFence(); Node* AtomicFence();
// Returns a pointer to the dropped_data_segments array. Traps if the data
// segment is active or has been dropped.
Node* CheckDataSegmentIsPassiveAndNotDropped(uint32_t data_segment_index,
wasm::WasmCodePosition position);
Node* CheckElemSegmentIsPassiveAndNotDropped(uint32_t elem_segment_index,
wasm::WasmCodePosition position);
Node* MemoryInit(uint32_t data_segment_index, Node* dst, Node* src, Node* MemoryInit(uint32_t data_segment_index, Node* dst, Node* src,
Node* size, wasm::WasmCodePosition position); Node* size, wasm::WasmCodePosition position);
Node* MemoryCopy(Node* dst, Node* src, Node* size, Node* MemoryCopy(Node* dst, Node* src, Node* size,
......
...@@ -658,22 +658,23 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) { ...@@ -658,22 +658,23 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
uint32_t size = segment.source.length(); uint32_t size = segment.source.length();
if (enabled_.has_bulk_memory()) { if (enabled_.has_bulk_memory()) {
if (size == 0) continue;
// Passive segments are not copied during instantiation. // Passive segments are not copied during instantiation.
if (!segment.active) continue; if (!segment.active) continue;
uint32_t dest_offset = EvalUint32InitExpr(instance, segment.dest_addr); uint32_t dest_offset = EvalUint32InitExpr(instance, segment.dest_addr);
bool ok = base::ClampToBounds( bool ok = base::ClampToBounds(
dest_offset, &size, static_cast<uint32_t>(instance->memory_size())); dest_offset, &size, static_cast<uint32_t>(instance->memory_size()));
if (!ok) {
thrower_->RuntimeError("data segment is out of bounds");
return;
}
// No need to copy empty segments.
if (size == 0) continue;
Address dest_addr = Address dest_addr =
reinterpret_cast<Address>(instance->memory_start()) + dest_offset; reinterpret_cast<Address>(instance->memory_start()) + dest_offset;
Address src_addr = reinterpret_cast<Address>(wire_bytes.begin()) + Address src_addr = reinterpret_cast<Address>(wire_bytes.begin()) +
segment.source.offset(); segment.source.offset();
memory_copy_wrapper(dest_addr, src_addr, size); memory_copy_wrapper(dest_addr, src_addr, size);
if (!ok) {
thrower_->RuntimeError("data segment is out of bounds");
return;
}
} else { } else {
DCHECK(segment.active); DCHECK(segment.active);
// Segments of size == 0 are just nops. // Segments of size == 0 are just nops.
...@@ -1627,15 +1628,18 @@ void InstanceBuilder::InitializeIndirectFunctionTables( ...@@ -1627,15 +1628,18 @@ void InstanceBuilder::InitializeIndirectFunctionTables(
bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance, bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
Handle<WasmTableObject> table_object, Handle<WasmTableObject> table_object,
uint32_t table_index, uint32_t table_index, uint32_t segment_index,
const WasmElemSegment& elem_segment, uint32_t dst, uint32_t dst, uint32_t src, size_t count) {
uint32_t src, size_t count) { DCHECK_LT(segment_index, instance->module()->elem_segments.size());
if (count == 0) return true; auto& elem_segment = instance->module()->elem_segments[segment_index];
// TODO(wasm): Move this functionality into wasm-objects, since it is used // TODO(wasm): Move this functionality into wasm-objects, since it is used
// for both instantiation and in the implementation of the table.init // for both instantiation and in the implementation of the table.init
// instruction. // instruction.
if (!base::IsInBounds(dst, count, table_object->current_length()) || if (!base::IsInBounds(dst, count, table_object->current_length()) ||
!base::IsInBounds(src, count, elem_segment.entries.size())) { !base::IsInBounds(src, count,
instance->dropped_elem_segments()[segment_index] == 0
? elem_segment.entries.size()
: 0)) {
return false; return false;
} }
...@@ -1695,7 +1699,9 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance, ...@@ -1695,7 +1699,9 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
} }
void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) { void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
for (auto& elem_segment : module_->elem_segments) { for (uint32_t segment_index = 0;
segment_index < module_->elem_segments.size(); ++segment_index) {
auto& elem_segment = instance->module()->elem_segments[segment_index];
// Passive segments are not copied during instantiation. // Passive segments are not copied during instantiation.
if (!elem_segment.active) continue; if (!elem_segment.active) continue;
...@@ -1703,14 +1709,17 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) { ...@@ -1703,14 +1709,17 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
uint32_t dst = EvalUint32InitExpr(instance, elem_segment.offset); uint32_t dst = EvalUint32InitExpr(instance, elem_segment.offset);
uint32_t src = 0; uint32_t src = 0;
size_t count = elem_segment.entries.size(); size_t count = elem_segment.entries.size();
if (enabled_.has_bulk_memory() && count == 0) continue;
bool success = LoadElemSegmentImpl( bool success = LoadElemSegmentImpl(
isolate_, instance, isolate_, instance,
handle(WasmTableObject::cast( handle(WasmTableObject::cast(
instance->tables().get(elem_segment.table_index)), instance->tables().get(elem_segment.table_index)),
isolate_), isolate_),
table_index, elem_segment, dst, src, count); table_index, segment_index, dst, src, count);
// Set the active segments to being already dropped, since memory.init on
// a dropped passive segment and an active segment have the same
// behavior.
instance->dropped_elem_segments()[segment_index] = 1;
if (enabled_.has_bulk_memory()) { if (enabled_.has_bulk_memory()) {
if (!success) { if (!success) {
thrower_->RuntimeError("table initializer is out of bounds"); thrower_->RuntimeError("table initializer is out of bounds");
...@@ -1751,12 +1760,11 @@ void InstanceBuilder::InitializeExceptions( ...@@ -1751,12 +1760,11 @@ void InstanceBuilder::InitializeExceptions(
bool LoadElemSegment(Isolate* isolate, Handle<WasmInstanceObject> instance, bool LoadElemSegment(Isolate* isolate, Handle<WasmInstanceObject> instance,
uint32_t table_index, uint32_t segment_index, uint32_t dst, uint32_t table_index, uint32_t segment_index, uint32_t dst,
uint32_t src, uint32_t count) { uint32_t src, uint32_t count) {
auto& elem_segment = instance->module()->elem_segments[segment_index];
return LoadElemSegmentImpl( return LoadElemSegmentImpl(
isolate, instance, isolate, instance,
handle(WasmTableObject::cast(instance->tables().get(table_index)), handle(WasmTableObject::cast(instance->tables().get(table_index)),
isolate), isolate),
table_index, elem_segment, dst, src, count); table_index, segment_index, dst, src, count);
} }
} // namespace wasm } // namespace wasm
......
...@@ -1725,24 +1725,6 @@ class ThreadImpl { ...@@ -1725,24 +1725,6 @@ class ThreadImpl {
return true; return true;
} }
bool CheckDataSegmentIsPassiveAndNotDropped(uint32_t index, pc_t pc) {
DCHECK_LT(index, module()->num_declared_data_segments);
if (instance_object_->dropped_data_segments()[index]) {
DoTrap(kTrapDataSegmentDropped, pc);
return false;
}
return true;
}
bool CheckElemSegmentIsPassiveAndNotDropped(uint32_t index, pc_t pc) {
DCHECK_LT(index, module()->elem_segments.size());
if (instance_object_->dropped_elem_segments()[index]) {
DoTrap(kTrapElemSegmentDropped, pc);
return false;
}
return true;
}
template <typename type, typename op_type> template <typename type, typename op_type>
bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code, bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
Address* address, pc_t pc, int* const len, Address* address, pc_t pc, int* const len,
...@@ -1790,18 +1772,13 @@ class ThreadImpl { ...@@ -1790,18 +1772,13 @@ class ThreadImpl {
return true; return true;
case kExprMemoryInit: { case kExprMemoryInit: {
MemoryInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); MemoryInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
// The data segment index must be in bounds since it is required by
// validation.
DCHECK_LT(imm.data_segment_index, module()->num_declared_data_segments); DCHECK_LT(imm.data_segment_index, module()->num_declared_data_segments);
*len += imm.length; *len += imm.length;
auto size = Pop().to<uint32_t>(); auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>(); auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>(); auto dst = Pop().to<uint32_t>();
if (size == 0) {
return true;
}
if (!CheckDataSegmentIsPassiveAndNotDropped(imm.data_segment_index,
pc)) {
return false;
}
Address dst_addr; Address dst_addr;
auto src_max = auto src_max =
instance_object_->data_segment_sizes()[imm.data_segment_index]; instance_object_->data_segment_sizes()[imm.data_segment_index];
...@@ -1818,11 +1795,11 @@ class ThreadImpl { ...@@ -1818,11 +1795,11 @@ class ThreadImpl {
} }
case kExprDataDrop: { case kExprDataDrop: {
DataDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); DataDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
// The data segment index must be in bounds since it is required by
// validation.
DCHECK_LT(imm.index, module()->num_declared_data_segments);
*len += imm.length; *len += imm.length;
if (!CheckDataSegmentIsPassiveAndNotDropped(imm.index, pc)) { instance_object_->data_segment_sizes()[imm.index] = 0;
return false;
}
instance_object_->dropped_data_segments()[imm.index] = 1;
return true; return true;
} }
case kExprMemoryCopy: { case kExprMemoryCopy: {
...@@ -1831,9 +1808,6 @@ class ThreadImpl { ...@@ -1831,9 +1808,6 @@ class ThreadImpl {
auto size = Pop().to<uint32_t>(); auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>(); auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>(); auto dst = Pop().to<uint32_t>();
if (size == 0) {
return true;
}
Address dst_addr; Address dst_addr;
Address src_addr; Address src_addr;
if (!BoundsCheckMemRange(dst, &size, &dst_addr) || if (!BoundsCheckMemRange(dst, &size, &dst_addr) ||
...@@ -1852,9 +1826,6 @@ class ThreadImpl { ...@@ -1852,9 +1826,6 @@ class ThreadImpl {
auto size = Pop().to<uint32_t>(); auto size = Pop().to<uint32_t>();
auto value = Pop().to<uint32_t>(); auto value = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>(); auto dst = Pop().to<uint32_t>();
if (size == 0) {
return true;
}
Address dst_addr; Address dst_addr;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr); bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
if (!ok) { if (!ok) {
...@@ -1870,13 +1841,6 @@ class ThreadImpl { ...@@ -1870,13 +1841,6 @@ class ThreadImpl {
auto size = Pop().to<uint32_t>(); auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>(); auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>(); auto dst = Pop().to<uint32_t>();
if (size == 0) {
return true;
}
if (!CheckElemSegmentIsPassiveAndNotDropped(imm.elem_segment_index,
pc)) {
return false;
}
HandleScope scope(isolate_); // Avoid leaking handles. HandleScope scope(isolate_); // Avoid leaking handles.
bool ok = WasmInstanceObject::InitTableEntries( bool ok = WasmInstanceObject::InitTableEntries(
instance_object_->GetIsolate(), instance_object_, imm.table.index, instance_object_->GetIsolate(), instance_object_, imm.table.index,
...@@ -1887,9 +1851,6 @@ class ThreadImpl { ...@@ -1887,9 +1851,6 @@ class ThreadImpl {
case kExprElemDrop: { case kExprElemDrop: {
ElemDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); ElemDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
*len += imm.length; *len += imm.length;
if (!CheckElemSegmentIsPassiveAndNotDropped(imm.index, pc)) {
return false;
}
instance_object_->dropped_elem_segments()[imm.index] = 1; instance_object_->dropped_elem_segments()[imm.index] = 1;
return true; return true;
} }
......
...@@ -223,8 +223,6 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, data_segment_starts, Address*, ...@@ -223,8 +223,6 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, data_segment_starts, Address*,
kDataSegmentStartsOffset) kDataSegmentStartsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, data_segment_sizes, uint32_t*, PRIMITIVE_ACCESSORS(WasmInstanceObject, data_segment_sizes, uint32_t*,
kDataSegmentSizesOffset) kDataSegmentSizesOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, dropped_data_segments, byte*,
kDroppedDataSegmentsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, dropped_elem_segments, byte*, PRIMITIVE_ACCESSORS(WasmInstanceObject, dropped_elem_segments, byte*,
kDroppedElemSegmentsOffset) kDroppedElemSegmentsOffset)
......
...@@ -70,8 +70,6 @@ class WasmInstanceNativeAllocations { ...@@ -70,8 +70,6 @@ class WasmInstanceNativeAllocations {
std::make_unique<Address[]>(num_data_segments)); std::make_unique<Address[]>(num_data_segments));
SET(instance, data_segment_sizes, SET(instance, data_segment_sizes,
std::make_unique<uint32_t[]>(num_data_segments)); std::make_unique<uint32_t[]>(num_data_segments));
SET(instance, dropped_data_segments,
std::make_unique<uint8_t[]>(num_data_segments));
SET(instance, dropped_elem_segments, SET(instance, dropped_elem_segments,
std::make_unique<uint8_t[]>(num_elem_segments)); std::make_unique<uint8_t[]>(num_elem_segments));
} }
...@@ -121,7 +119,6 @@ class WasmInstanceNativeAllocations { ...@@ -121,7 +119,6 @@ class WasmInstanceNativeAllocations {
std::unique_ptr<Address[]> imported_mutable_globals_; std::unique_ptr<Address[]> imported_mutable_globals_;
std::unique_ptr<Address[]> data_segment_starts_; std::unique_ptr<Address[]> data_segment_starts_;
std::unique_ptr<uint32_t[]> data_segment_sizes_; std::unique_ptr<uint32_t[]> data_segment_sizes_;
std::unique_ptr<uint8_t[]> dropped_data_segments_;
std::unique_ptr<uint8_t[]> dropped_elem_segments_; std::unique_ptr<uint8_t[]> dropped_elem_segments_;
#undef SET #undef SET
}; };
...@@ -1379,17 +1376,16 @@ void WasmInstanceObject::InitDataSegmentArrays( ...@@ -1379,17 +1376,16 @@ void WasmInstanceObject::InitDataSegmentArrays(
num_data_segments == module->data_segments.size()); num_data_segments == module->data_segments.size());
for (size_t i = 0; i < num_data_segments; ++i) { for (size_t i = 0; i < num_data_segments; ++i) {
const wasm::WasmDataSegment& segment = module->data_segments[i]; const wasm::WasmDataSegment& segment = module->data_segments[i];
// Set the active segments to being already dropped, since memory.init on
// a dropped passive segment and an active segment have the same
// behavior.
instance->dropped_data_segments()[i] = segment.active ? 1 : 0;
// Initialize the pointer and size of passive segments. // Initialize the pointer and size of passive segments.
auto source_bytes = wire_bytes.SubVector(segment.source.offset(), auto source_bytes = wire_bytes.SubVector(segment.source.offset(),
segment.source.end_offset()); segment.source.end_offset());
instance->data_segment_starts()[i] = instance->data_segment_starts()[i] =
reinterpret_cast<Address>(source_bytes.begin()); reinterpret_cast<Address>(source_bytes.begin());
instance->data_segment_sizes()[i] = source_bytes.length(); // Set the active segments to being already dropped, since memory.init on
// a dropped passive segment and an active segment have the same
// behavior.
instance->data_segment_sizes()[i] =
segment.active ? 0 : source_bytes.length();
} }
} }
...@@ -1399,11 +1395,7 @@ void WasmInstanceObject::InitElemSegmentArrays( ...@@ -1399,11 +1395,7 @@ void WasmInstanceObject::InitElemSegmentArrays(
auto module = module_object->module(); auto module = module_object->module();
auto num_elem_segments = module->elem_segments.size(); auto num_elem_segments = module->elem_segments.size();
for (size_t i = 0; i < num_elem_segments; ++i) { for (size_t i = 0; i < num_elem_segments; ++i) {
const wasm::WasmElemSegment& segment = module->elem_segments[i]; instance->dropped_elem_segments()[i] = 0;
// Set the active segments to being already dropped, since table.init on
// a dropped passive segment and an active segment have the same
// behavior.
instance->dropped_elem_segments()[i] = segment.active ? 1 : 0;
} }
} }
...@@ -1435,8 +1427,6 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate, ...@@ -1435,8 +1427,6 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
uint32_t table_src_index, uint32_t table_src_index,
uint32_t dst, uint32_t src, uint32_t dst, uint32_t src,
uint32_t count) { uint32_t count) {
// Copying 0 elements is a no-op.
if (count == 0) return true;
CHECK_LT(table_dst_index, instance->tables().length()); CHECK_LT(table_dst_index, instance->tables().length());
CHECK_LT(table_src_index, instance->tables().length()); CHECK_LT(table_src_index, instance->tables().length());
auto table_dst = handle( auto table_dst = handle(
...@@ -1471,8 +1461,6 @@ bool WasmInstanceObject::InitTableEntries(Isolate* isolate, ...@@ -1471,8 +1461,6 @@ bool WasmInstanceObject::InitTableEntries(Isolate* isolate,
uint32_t table_index, uint32_t table_index,
uint32_t segment_index, uint32_t dst, uint32_t segment_index, uint32_t dst,
uint32_t src, uint32_t count) { uint32_t src, uint32_t count) {
// Copying 0 elements is a no-op.
if (count == 0) return true;
// Note that this implementation just calls through to module instantiation. // Note that this implementation just calls through to module instantiation.
// This is intentional, so that the runtime only depends on the object // This is intentional, so that the runtime only depends on the object
// methods, and not the module instantiation logic. // methods, and not the module instantiation logic.
......
...@@ -412,7 +412,6 @@ class WasmInstanceObject : public JSObject { ...@@ -412,7 +412,6 @@ class WasmInstanceObject : public JSObject {
DECL_PRIMITIVE_ACCESSORS(jump_table_start, Address) DECL_PRIMITIVE_ACCESSORS(jump_table_start, Address)
DECL_PRIMITIVE_ACCESSORS(data_segment_starts, Address*) DECL_PRIMITIVE_ACCESSORS(data_segment_starts, Address*)
DECL_PRIMITIVE_ACCESSORS(data_segment_sizes, uint32_t*) DECL_PRIMITIVE_ACCESSORS(data_segment_sizes, uint32_t*)
DECL_PRIMITIVE_ACCESSORS(dropped_data_segments, byte*)
DECL_PRIMITIVE_ACCESSORS(dropped_elem_segments, byte*) DECL_PRIMITIVE_ACCESSORS(dropped_elem_segments, byte*)
// Clear uninitialized padding space. This ensures that the snapshot content // Clear uninitialized padding space. This ensures that the snapshot content
...@@ -459,7 +458,6 @@ class WasmInstanceObject : public JSObject { ...@@ -459,7 +458,6 @@ class WasmInstanceObject : public JSObject {
V(kRealStackLimitAddressOffset, kSystemPointerSize) \ V(kRealStackLimitAddressOffset, kSystemPointerSize) \
V(kDataSegmentStartsOffset, kSystemPointerSize) \ V(kDataSegmentStartsOffset, kSystemPointerSize) \
V(kDataSegmentSizesOffset, kSystemPointerSize) \ V(kDataSegmentSizesOffset, kSystemPointerSize) \
V(kDroppedDataSegmentsOffset, kSystemPointerSize) \
V(kDroppedElemSegmentsOffset, kSystemPointerSize) \ V(kDroppedElemSegmentsOffset, kSystemPointerSize) \
V(kHeaderSize, 0) V(kHeaderSize, 0)
......
...@@ -128,9 +128,9 @@ WASM_EXEC_TEST(MemoryInitOutOfBounds) { ...@@ -128,9 +128,9 @@ WASM_EXEC_TEST(MemoryInitOutOfBounds) {
CHECK_EQ(0xDEADBEEF, r.Call(1000, 0, kWasmPageSize)); CHECK_EQ(0xDEADBEEF, r.Call(1000, 0, kWasmPageSize));
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize, 0, 1)); CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize, 0, 1));
// Copy 0 out-of-bounds succeeds. // Copy 0 out-of-bounds fails if target is invalid.
CHECK_EQ(0, r.Call(kWasmPageSize + 1, 0, 0)); CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize + 1, 0, 0));
CHECK_EQ(0, r.Call(0, kWasmPageSize + 1, 0)); CHECK_EQ(0xDEADBEEF, r.Call(0, kWasmPageSize + 1, 0));
// Make sure bounds aren't checked with 32-bit wrapping. // Make sure bounds aren't checked with 32-bit wrapping.
CHECK_EQ(0xDEADBEEF, r.Call(1, 1, 0xFFFFFFFF)); CHECK_EQ(0xDEADBEEF, r.Call(1, 1, 0xFFFFFFFF));
...@@ -239,9 +239,9 @@ WASM_EXEC_TEST(MemoryCopyOutOfBounds) { ...@@ -239,9 +239,9 @@ WASM_EXEC_TEST(MemoryCopyOutOfBounds) {
CHECK_EQ(0xDEADBEEF, r.Call(1000, 0, kWasmPageSize)); CHECK_EQ(0xDEADBEEF, r.Call(1000, 0, kWasmPageSize));
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize, 0, 1)); CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize, 0, 1));
// Copy 0 out-of-bounds always succeeds. // Copy 0 out-of-bounds fails if target is invalid.
CHECK_EQ(0, r.Call(kWasmPageSize + 1, 0, 0)); CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize + 1, 0, 0));
CHECK_EQ(0, r.Call(0, kWasmPageSize + 1, 0)); CHECK_EQ(0xDEADBEEF, r.Call(0, kWasmPageSize + 1, 0));
// Make sure bounds aren't checked with 32-bit wrapping. // Make sure bounds aren't checked with 32-bit wrapping.
CHECK_EQ(0xDEADBEEF, r.Call(1, 1, 0xFFFFFFFF)); CHECK_EQ(0xDEADBEEF, r.Call(1, 1, 0xFFFFFFFF));
...@@ -314,8 +314,8 @@ WASM_EXEC_TEST(MemoryFillOutOfBounds) { ...@@ -314,8 +314,8 @@ WASM_EXEC_TEST(MemoryFillOutOfBounds) {
CHECK_EQ(0xDEADBEEF, r.Call(1000, v, kWasmPageSize)); CHECK_EQ(0xDEADBEEF, r.Call(1000, v, kWasmPageSize));
CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize, v, 1)); CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize, v, 1));
// Fill 0 out-of-bounds succeeds. // Fill 0 out-of-bounds still fails.
CHECK_EQ(0, r.Call(kWasmPageSize + 1, v, 0)); CHECK_EQ(0xDEADBEEF, r.Call(kWasmPageSize + 1, v, 0));
// Make sure bounds aren't checked with 32-bit wrapping. // Make sure bounds aren't checked with 32-bit wrapping.
CHECK_EQ(0xDEADBEEF, r.Call(1, v, 0xFFFFFFFF)); CHECK_EQ(0xDEADBEEF, r.Call(1, v, 0xFFFFFFFF));
...@@ -330,7 +330,7 @@ WASM_EXEC_TEST(DataDropTwice) { ...@@ -330,7 +330,7 @@ WASM_EXEC_TEST(DataDropTwice) {
BUILD(r, WASM_DATA_DROP(0), kExprI32Const, 0); BUILD(r, WASM_DATA_DROP(0), kExprI32Const, 0);
CHECK_EQ(0, r.Call()); CHECK_EQ(0, r.Call());
CHECK_EQ(0xDEADBEEF, r.Call()); CHECK_EQ(0, r.Call());
} }
WASM_EXEC_TEST(DataDropThenMemoryInit) { WASM_EXEC_TEST(DataDropThenMemoryInit) {
...@@ -533,9 +533,9 @@ void TestTableInitOob(ExecutionTier execution_tier, int table_index) { ...@@ -533,9 +533,9 @@ void TestTableInitOob(ExecutionTier execution_tier, int table_index) {
r.CheckCallViaJS(0xDEADBEEF, 0, 3, 3); r.CheckCallViaJS(0xDEADBEEF, 0, 3, 3);
CheckTableCall(isolate, table, &r, call_index, null, null, null, null, null); CheckTableCall(isolate, table, &r, call_index, null, null, null, null, null);
// 0-count is never oob. // 0-count is still oob if target is invalid.
r.CheckCallViaJS(0, kTableSize + 1, 0, 0); r.CheckCallViaJS(0xDEADBEEF, kTableSize + 1, 0, 0);
r.CheckCallViaJS(0, 0, kTableSize + 1, 0); r.CheckCallViaJS(0xDEADBEEF, 0, kTableSize + 1, 0);
r.CheckCallViaJS(0xDEADBEEF, 0, 0, 6); r.CheckCallViaJS(0xDEADBEEF, 0, 0, 6);
r.CheckCallViaJS(0xDEADBEEF, 0, 1, 5); r.CheckCallViaJS(0xDEADBEEF, 0, 1, 5);
...@@ -821,8 +821,8 @@ void TestTableCopyOob1(ExecutionTier execution_tier, int table_dst, ...@@ -821,8 +821,8 @@ void TestTableCopyOob1(ExecutionTier execution_tier, int table_dst,
{ {
const uint32_t big = 1000000; const uint32_t big = 1000000;
r.CheckCallViaJS(0, big, 0, 0); r.CheckCallViaJS(0xDEADBEEF, big, 0, 0);
r.CheckCallViaJS(0, 0, big, 0); r.CheckCallViaJS(0xDEADBEEF, 0, big, 0);
} }
for (uint32_t big = 4294967295; big > 1000; big >>= 1) { for (uint32_t big = 4294967295; big > 1000; big >>= 1) {
...@@ -865,7 +865,7 @@ WASM_EXEC_TEST(ElemDropTwice) { ...@@ -865,7 +865,7 @@ WASM_EXEC_TEST(ElemDropTwice) {
BUILD(r, WASM_ELEM_DROP(0), kExprI32Const, 0); BUILD(r, WASM_ELEM_DROP(0), kExprI32Const, 0);
r.CheckCallViaJS(0); r.CheckCallViaJS(0);
r.CheckCallViaJS(0xDEADBEEF); r.CheckCallViaJS(0);
} }
WASM_EXEC_TEST(ElemDropThenTableInit) { WASM_EXEC_TEST(ElemDropThenTableInit) {
......
...@@ -241,7 +241,6 @@ uint32_t TestingModuleBuilder::AddPassiveDataSegment(Vector<const byte> bytes) { ...@@ -241,7 +241,6 @@ uint32_t TestingModuleBuilder::AddPassiveDataSegment(Vector<const byte> bytes) {
DCHECK_EQ(index, test_module_->data_segments.size()); DCHECK_EQ(index, test_module_->data_segments.size());
DCHECK_EQ(index, data_segment_starts_.size()); DCHECK_EQ(index, data_segment_starts_.size());
DCHECK_EQ(index, data_segment_sizes_.size()); DCHECK_EQ(index, data_segment_sizes_.size());
DCHECK_EQ(index, dropped_data_segments_.size());
// Add a passive data segment. This isn't used by function compilation, but // Add a passive data segment. This isn't used by function compilation, but
// but it keeps the index in sync. The data segment's source will not be // but it keeps the index in sync. The data segment's source will not be
...@@ -268,12 +267,10 @@ uint32_t TestingModuleBuilder::AddPassiveDataSegment(Vector<const byte> bytes) { ...@@ -268,12 +267,10 @@ uint32_t TestingModuleBuilder::AddPassiveDataSegment(Vector<const byte> bytes) {
} }
data_segment_starts_.push_back(new_data_address + old_data_size); data_segment_starts_.push_back(new_data_address + old_data_size);
data_segment_sizes_.push_back(bytes.length()); data_segment_sizes_.push_back(bytes.length());
dropped_data_segments_.push_back(0);
// The vector pointers may have moved, so update the instance object. // The vector pointers may have moved, so update the instance object.
instance_object_->set_data_segment_starts(data_segment_starts_.data()); instance_object_->set_data_segment_starts(data_segment_starts_.data());
instance_object_->set_data_segment_sizes(data_segment_sizes_.data()); instance_object_->set_data_segment_sizes(data_segment_sizes_.data());
instance_object_->set_dropped_data_segments(dropped_data_segments_.data());
return index; return index;
} }
......
...@@ -249,7 +249,6 @@ class TestingModuleBuilder { ...@@ -249,7 +249,6 @@ class TestingModuleBuilder {
std::vector<byte> data_segment_data_; std::vector<byte> data_segment_data_;
std::vector<Address> data_segment_starts_; std::vector<Address> data_segment_starts_;
std::vector<uint32_t> data_segment_sizes_; std::vector<uint32_t> data_segment_sizes_;
std::vector<byte> dropped_data_segments_;
std::vector<byte> dropped_elem_segments_; std::vector<byte> dropped_elem_segments_;
const WasmGlobal* AddGlobal(ValueType type); const WasmGlobal* AddGlobal(ValueType type);
......
...@@ -79,11 +79,12 @@ function getMemoryInit(mem, segment_data) { ...@@ -79,11 +79,12 @@ function getMemoryInit(mem, segment_data) {
// is a trap, not a validation error. // is a trap, not a validation error.
const instance = builder.instantiate(); const instance = builder.instantiate();
// Initialization succeeds, because the size is 0 which is always valid. // Initialization succeeds, because source address and size are 0
// which is valid on a dropped segment.
instance.exports.init(0); instance.exports.init(0);
// Initialization fails, because the size > 0 on dropped segment // Initialization fails, because the segment is implicitly dropped.
assertTraps(kTrapDataSegmentDropped, () => instance.exports.init(1)); assertTraps(kTrapMemOutOfBounds, () => instance.exports.init(1));
})(); })();
(function TestDataDropOnActiveSegment() { (function TestDataDropOnActiveSegment() {
...@@ -99,7 +100,8 @@ function getMemoryInit(mem, segment_data) { ...@@ -99,7 +100,8 @@ function getMemoryInit(mem, segment_data) {
.exportAs('drop'); .exportAs('drop');
const instance = builder.instantiate(); const instance = builder.instantiate();
assertTraps(kTrapDataSegmentDropped, () => instance.exports.drop()); // Drop on passive segment is equivalent to double drop which is allowed.
instance.exports.drop();
})(); })();
function getMemoryCopy(mem) { function getMemoryCopy(mem) {
...@@ -167,7 +169,9 @@ function getMemoryFill(mem) { ...@@ -167,7 +169,9 @@ function getMemoryFill(mem) {
.exportAs('drop'); .exportAs('drop');
const instance = builder.instantiate(); const instance = builder.instantiate();
assertTraps(kTrapElemSegmentDropped, () => instance.exports.drop()); // Segment already got dropped after initialization and is therefore
// not active anymore.
instance.exports.drop();
})(); })();
(function TestLazyDataSegmentBoundsCheck() { (function TestLazyDataSegmentBoundsCheck() {
...@@ -180,10 +184,10 @@ function getMemoryFill(mem) { ...@@ -180,10 +184,10 @@ function getMemoryFill(mem) {
assertEquals(0, view[kPageSize - 1]); assertEquals(0, view[kPageSize - 1]);
// Instantiation fails, but still modifies memory. // Instantiation fails, memory remains unmodified.
assertThrows(() => builder.instantiate({m: {memory}}), WebAssembly.RuntimeError); assertThrows(() => builder.instantiate({m: {memory}}), WebAssembly.RuntimeError);
assertEquals(42, view[kPageSize - 1]); assertEquals(0, view[kPageSize - 1]);
// The second segment is not initialized. // The second segment is not initialized.
assertEquals(0, view[0]); assertEquals(0, view[0]);
})(); })();
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
[ALWAYS, { [ALWAYS, {
#TODO(ahaas): Add additional stack checks on mips. #TODO(ahaas): Add additional stack checks on mips.
'skip-stack-guard-page': [PASS, ['arch == mipsel or arch == mips64el or ((arch == ppc or arch == ppc64 or arch == s390 or arch == s390x) and simulator_run)', SKIP]], 'skip-stack-guard-page': [PASS, ['arch == mipsel or arch == mips64el or ((arch == ppc or arch == ppc64 or arch == s390 or arch == s390x) and simulator_run)', SKIP]],
# TODO(v8:9144): The MVP behavior when bounds-checking segments changed in # TODO(v8:9144): The MVP behavior when bounds-checking segments changed in
# the bulk-memory proposal. Since we've enabled bulk-memory by default, we # the bulk-memory proposal. Since we've enabled bulk-memory by default, we
# need to update to use its testsuite. # need to update to use its testsuite.
...@@ -13,24 +14,16 @@ ...@@ -13,24 +14,16 @@
'binary-leb128': [FAIL], 'binary-leb128': [FAIL],
'elem': [FAIL], 'elem': [FAIL],
'data': [FAIL], 'data': [FAIL],
# TODO(mstarzinger): Roll newest tests into "js-types" repository. # TODO(mstarzinger): Roll newest tests into "js-types" repository.
'proposals/js-types/exports': [FAIL], 'proposals/js-types/exports': [FAIL],
'proposals/js-types/globals': [FAIL], 'proposals/js-types/globals': [FAIL],
'proposals/js-types/linking': [FAIL], 'proposals/js-types/linking': [FAIL],
# TODO(thibaudm): Spec tests do not check multi-return functions correctly. # TODO(thibaudm): Spec tests do not check multi-return functions correctly.
'proposals/multi-value/call': [FAIL], 'proposals/multi-value/call': [FAIL],
'proposals/multi-value/if': [FAIL], 'proposals/multi-value/if': [FAIL],
'proposals/multi-value/func': [FAIL], 'proposals/multi-value/func': [FAIL],
# TODO(ahaas): Most recent changes to the bulk-memory proposal.
'proposals/bulk-memory-operations/memory_copy': [FAIL],
'proposals/bulk-memory-operations/table_copy': [FAIL],
'proposals/bulk-memory-operations/table_init': [FAIL],
'proposals/bulk-memory-operations/memory_init': [FAIL],
'proposals/bulk-memory-operations/memory_fill': [FAIL],
'proposals/bulk-memory-operations/elem': [FAIL],
'proposals/bulk-memory-operations/data': [FAIL],
'proposals/bulk-memory-operations/bulk': [FAIL],
}], # ALWAYS }], # ALWAYS
['arch == mipsel or arch == mips64el or arch == mips or arch == mips64', { ['arch == mipsel or arch == mips64el or arch == mips or arch == mips64', {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment