Commit 94f22de6 authored by Ben Smith's avatar Ben Smith Committed by Commit Bot

[wasm] Implement bulk memory in interpreter

This CL only provides the implementation of memory.{init,copy,fill} and
data.drop.

Bug: v8:8965
Change-Id: I439f2520bfee8f147e4b0d1d31f954aaad2e14ad
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1510575
Commit-Queue: Ben Smith <binji@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60253}
parent 74714df2
...@@ -1540,6 +1540,13 @@ class ThreadImpl { ...@@ -1540,6 +1540,13 @@ class ThreadImpl {
sp_ = dest + arity; sp_ = dest + arity;
} }
inline Address EffectiveAddress(uint32_t index) {
// Compute the effective address of the access, making sure to condition
// the index even in the in-bounds case.
return reinterpret_cast<Address>(instance_object_->memory_start()) +
(index & instance_object_->memory_mask());
}
template <typename mtype> template <typename mtype>
inline Address BoundsCheckMem(uint32_t offset, uint32_t index) { inline Address BoundsCheckMem(uint32_t offset, uint32_t index) {
uint32_t effective_index = offset + index; uint32_t effective_index = offset + index;
...@@ -1550,10 +1557,15 @@ class ThreadImpl { ...@@ -1550,10 +1557,15 @@ class ThreadImpl {
instance_object_->memory_size())) { instance_object_->memory_size())) {
return kNullAddress; // oob return kNullAddress; // oob
} }
// Compute the effective address of the access, making sure to condition return EffectiveAddress(effective_index);
// the index even in the in-bounds case. }
return reinterpret_cast<Address>(instance_object_->memory_start()) +
(effective_index & instance_object_->memory_mask()); inline bool BoundsCheckMemRange(uint32_t index, uint32_t* size,
Address* out_address) {
bool ok = ClampToBounds(
index, size, static_cast<uint32_t>(instance_object_->memory_size()));
*out_address = EffectiveAddress(index);
return ok;
} }
template <typename ctype, typename mtype> template <typename ctype, typename mtype>
...@@ -1609,6 +1621,15 @@ class ThreadImpl { ...@@ -1609,6 +1621,15 @@ class ThreadImpl {
return true; return true;
} }
bool CheckDataSegmentIsPassiveAndNotDropped(uint32_t index, pc_t pc) {
DCHECK_LT(index, module()->num_declared_data_segments);
if (instance_object_->dropped_data_segments()[index]) {
DoTrap(kTrapDataSegmentDropped, pc);
return false;
}
return true;
}
template <typename type, typename op_type> template <typename type, typename op_type>
bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code, bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
Address& address, pc_t pc, int& len, Address& address, pc_t pc, int& len,
...@@ -1654,6 +1675,74 @@ class ThreadImpl { ...@@ -1654,6 +1675,74 @@ class ThreadImpl {
case kExprI64UConvertSatF64: case kExprI64UConvertSatF64:
Push(WasmValue(ExecuteI64UConvertSatF64(Pop().to<double>()))); Push(WasmValue(ExecuteI64UConvertSatF64(Pop().to<double>())));
return true; return true;
case kExprMemoryInit: {
MemoryInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
DCHECK_LT(imm.data_segment_index, module()->num_declared_data_segments);
len += imm.length;
if (!CheckDataSegmentIsPassiveAndNotDropped(imm.data_segment_index,
pc)) {
return false;
}
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
Address dst_addr;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
auto src_max =
instance_object_->data_segment_sizes()[imm.data_segment_index];
// Use & instead of && so the clamp is not short-circuited.
ok &= ClampToBounds(src, &size, src_max);
Address src_addr =
instance_object_->data_segment_starts()[imm.data_segment_index] +
src;
memory_copy_wrapper(dst_addr, src_addr, size);
if (!ok) DoTrap(kTrapMemOutOfBounds, pc);
return ok;
}
case kExprDataDrop: {
DataDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
len += imm.length;
if (!CheckDataSegmentIsPassiveAndNotDropped(imm.index, pc)) {
return false;
}
instance_object_->dropped_data_segments()[imm.index] = 1;
return true;
}
case kExprMemoryCopy: {
MemoryCopyImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
auto size = Pop().to<uint32_t>();
auto src = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
Address dst_addr;
bool copy_backward = src < dst && dst - src < size;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
// Trap without copying any bytes if we are copying backward and the
// copy is partially out-of-bounds. We only need to check that the dst
// region is out-of-bounds, because we know that {src < dst}, so the src
// region is always out of bounds if the dst region is.
if (ok || !copy_backward) {
Address src_addr;
// Use & instead of && so the bounds check is not short-circuited.
ok &= BoundsCheckMemRange(src, &size, &src_addr);
memory_copy_wrapper(dst_addr, src_addr, size);
}
if (!ok) DoTrap(kTrapMemOutOfBounds, pc);
len += imm.length;
return ok;
}
case kExprMemoryFill: {
MemoryIndexImmediate<Decoder::kNoValidate> imm(decoder,
code->at(pc + 1));
auto size = Pop().to<uint32_t>();
auto value = Pop().to<uint32_t>();
auto dst = Pop().to<uint32_t>();
Address dst_addr;
bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
memory_fill_wrapper(dst_addr, value, size);
if (!ok) DoTrap(kTrapMemOutOfBounds, pc);
len += imm.length;
return ok;
}
default: default:
FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc], FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
OpcodeName(code->start[pc])); OpcodeName(code->start[pc]));
......
...@@ -260,6 +260,7 @@ v8_source_set("cctest_sources") { ...@@ -260,6 +260,7 @@ v8_source_set("cctest_sources") {
"wasm/test-run-wasm-asmjs.cc", "wasm/test-run-wasm-asmjs.cc",
"wasm/test-run-wasm-atomics.cc", "wasm/test-run-wasm-atomics.cc",
"wasm/test-run-wasm-atomics64.cc", "wasm/test-run-wasm-atomics64.cc",
"wasm/test-run-wasm-bulk-memory.cc",
"wasm/test-run-wasm-exceptions.cc", "wasm/test-run-wasm-exceptions.cc",
"wasm/test-run-wasm-interpreter.cc", "wasm/test-run-wasm-interpreter.cc",
"wasm/test-run-wasm-js.cc", "wasm/test-run-wasm-js.cc",
......
...@@ -476,6 +476,7 @@ ...@@ -476,6 +476,7 @@
'test-run-wasm-asmjs/*': [SKIP], 'test-run-wasm-asmjs/*': [SKIP],
'test-run-wasm-atomics64/*': [SKIP], 'test-run-wasm-atomics64/*': [SKIP],
'test-run-wasm-atomics/*': [SKIP], 'test-run-wasm-atomics/*': [SKIP],
'test-run-wasm-bulk-memory/*': [SKIP],
'test-run-wasm/*': [SKIP], 'test-run-wasm/*': [SKIP],
'test-run-wasm-exceptions/*': [SKIP], 'test-run-wasm-exceptions/*': [SKIP],
'test-run-wasm-interpreter/*': [SKIP], 'test-run-wasm-interpreter/*': [SKIP],
......
This diff is collapsed.
...@@ -222,6 +222,47 @@ uint32_t TestingModuleBuilder::AddException(FunctionSig* sig) { ...@@ -222,6 +222,47 @@ uint32_t TestingModuleBuilder::AddException(FunctionSig* sig) {
return index; return index;
} }
uint32_t TestingModuleBuilder::AddPassiveDataSegment(Vector<const byte> bytes) {
uint32_t index = static_cast<uint32_t>(test_module_->data_segments.size());
DCHECK_EQ(index, test_module_->data_segments.size());
DCHECK_EQ(index, data_segment_starts_.size());
DCHECK_EQ(index, data_segment_sizes_.size());
DCHECK_EQ(index, dropped_data_segments_.size());
// Add a passive data segment. This isn't used by function compilation, but
// but it keeps the index in sync. The data segment's source will not be
// correct, since we don't store data in the module wire bytes.
test_module_->data_segments.emplace_back();
// The num_declared_data_segments (from the DataCount section) is used
// to validate the segment index, during function compilation.
test_module_->num_declared_data_segments = index + 1;
Address old_data_address =
reinterpret_cast<Address>(data_segment_data_.data());
size_t old_data_size = data_segment_data_.size();
data_segment_data_.resize(old_data_size + bytes.length());
Address new_data_address =
reinterpret_cast<Address>(data_segment_data_.data());
memcpy(data_segment_data_.data() + old_data_size, bytes.start(),
bytes.length());
// The data_segment_data_ offset may have moved, so update all the starts.
for (Address& start : data_segment_starts_) {
start += new_data_address - old_data_address;
}
data_segment_starts_.push_back(new_data_address + old_data_size);
data_segment_sizes_.push_back(bytes.length());
dropped_data_segments_.push_back(0);
// The vector pointers may have moved, so update the instance object.
instance_object_->set_data_segment_starts(data_segment_starts_.data());
instance_object_->set_data_segment_sizes(data_segment_sizes_.data());
instance_object_->set_dropped_data_segments(dropped_data_segments_.data());
return index;
}
CompilationEnv TestingModuleBuilder::CreateCompilationEnv() { CompilationEnv TestingModuleBuilder::CreateCompilationEnv() {
return { return {
test_module_ptr_, test_module_ptr_,
......
...@@ -116,6 +116,8 @@ class TestingModuleBuilder { ...@@ -116,6 +116,8 @@ class TestingModuleBuilder {
return static_cast<byte>(size - 1); return static_cast<byte>(size - 1);
} }
uint32_t mem_size() { return mem_size_; }
template <typename T> template <typename T>
T* raw_mem_start() { T* raw_mem_start() {
DCHECK(mem_start_); DCHECK(mem_start_);
...@@ -188,6 +190,8 @@ class TestingModuleBuilder { ...@@ -188,6 +190,8 @@ class TestingModuleBuilder {
uint32_t AddException(FunctionSig* sig); uint32_t AddException(FunctionSig* sig);
uint32_t AddPassiveDataSegment(Vector<const byte> bytes);
WasmFunction* GetFunctionAt(int index) { WasmFunction* GetFunctionAt(int index) {
return &test_module_->functions[index]; return &test_module_->functions[index];
} }
...@@ -232,6 +236,12 @@ class TestingModuleBuilder { ...@@ -232,6 +236,12 @@ class TestingModuleBuilder {
RuntimeExceptionSupport runtime_exception_support_; RuntimeExceptionSupport runtime_exception_support_;
LowerSimd lower_simd_; LowerSimd lower_simd_;
// Data segment arrays that are normally allocated on the instance.
std::vector<byte> data_segment_data_;
std::vector<Address> data_segment_starts_;
std::vector<uint32_t> data_segment_sizes_;
std::vector<byte> dropped_data_segments_;
const WasmGlobal* AddGlobal(ValueType type); const WasmGlobal* AddGlobal(ValueType type);
Handle<WasmInstanceObject> InitInstanceObject(); Handle<WasmInstanceObject> InitInstanceObject();
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment