Commit d31dff84 authored by Eric Holk's avatar Eric Holk Committed by Commit Bot

[wasm] always allocate memory when guard regions are needed

When using trap handlers, memory references do not get any checks inserted. This
means there is no check for a null memory as happens when the memory size is
0. Normally this would be correctly caught as an out of bounds access, since the
low memory addresses are not normally mapped. However, if they were mapped for
some reason, we would not catch the out of bounds access.

The fix is to ensure WebAssembly instances always have a guard region even if
the memory is size 0.

This is a rewrite of 5e76ff5a

Note that this can lead to a large amount of unnecessary address space usage,
so we share a single reservation for empty array buffers.

Bug: chromium:769637

Cq-Include-Trybots: luci.chromium.try:linux_chromium_rel_ng
Change-Id: Ia8e84be6d595e347d3d342959f2c374db1a3f683
Reviewed-on: https://chromium-review.googlesource.com/702657Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Commit-Queue: Eric Holk <eholk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52163}
parent 2589ea08
...@@ -57,7 +57,8 @@ void LocalArrayBufferTracker::Process(Callback callback) { ...@@ -57,7 +57,8 @@ void LocalArrayBufferTracker::Process(Callback callback) {
backing_stores_to_free->emplace_back( backing_stores_to_free->emplace_back(
allocation_base, old_buffer->allocation_length(), allocation_base, old_buffer->allocation_length(),
old_buffer->backing_store(), old_buffer->allocation_mode()); old_buffer->backing_store(), old_buffer->allocation_mode(),
old_buffer->is_wasm_memory());
it = array_buffers_.erase(it); it = array_buffers_.erase(it);
} else { } else {
UNREACHABLE(); UNREACHABLE();
......
...@@ -19051,8 +19051,9 @@ void JSArrayBuffer::FreeBackingStore() { ...@@ -19051,8 +19051,9 @@ void JSArrayBuffer::FreeBackingStore() {
if (allocation_base() == nullptr) { if (allocation_base() == nullptr) {
return; return;
} }
FreeBackingStore(GetIsolate(), {allocation_base(), allocation_length(), FreeBackingStore(GetIsolate(),
backing_store(), allocation_mode()}); {allocation_base(), allocation_length(), backing_store(),
allocation_mode(), is_wasm_memory()});
// Zero out the backing store and allocation base to avoid dangling // Zero out the backing store and allocation base to avoid dangling
// pointers. // pointers.
set_backing_store(nullptr); set_backing_store(nullptr);
...@@ -19061,12 +19062,17 @@ void JSArrayBuffer::FreeBackingStore() { ...@@ -19061,12 +19062,17 @@ void JSArrayBuffer::FreeBackingStore() {
// static // static
void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) { void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
if (allocation.mode == ArrayBuffer::Allocator::AllocationMode::kReservation) { if (allocation.mode == ArrayBuffer::Allocator::AllocationMode::kReservation) {
bool needs_free = true;
if (allocation.is_wasm_memory) {
wasm::WasmMemoryTracker* memory_tracker = wasm::WasmMemoryTracker* memory_tracker =
isolate->wasm_engine()->memory_tracker(); isolate->wasm_engine()->memory_tracker();
if (memory_tracker->IsWasmMemory(allocation.backing_store)) { if (memory_tracker->FreeMemoryIfIsWasmMemory(allocation.backing_store)) {
memory_tracker->ReleaseAllocation(allocation.backing_store); needs_free = false;
} }
}
if (needs_free) {
CHECK(FreePages(allocation.allocation_base, allocation.length)); CHECK(FreePages(allocation.allocation_base, allocation.length));
}
} else { } else {
isolate->array_buffer_allocator()->Free(allocation.allocation_base, isolate->array_buffer_allocator()->Free(allocation.allocation_base,
allocation.length); allocation.length);
......
...@@ -115,9 +115,9 @@ ArrayBuffer::Allocator::AllocationMode JSArrayBuffer::allocation_mode() const { ...@@ -115,9 +115,9 @@ ArrayBuffer::Allocator::AllocationMode JSArrayBuffer::allocation_mode() const {
bool JSArrayBuffer::is_wasm_memory() const { bool JSArrayBuffer::is_wasm_memory() const {
bool const is_wasm_memory = IsWasmMemory::decode(bit_field()); bool const is_wasm_memory = IsWasmMemory::decode(bit_field());
DCHECK(backing_store() == nullptr || DCHECK_EQ(is_wasm_memory,
GetIsolate()->wasm_engine()->memory_tracker()->IsWasmMemory( GetIsolate()->wasm_engine()->memory_tracker()->IsWasmMemory(
backing_store()) == is_wasm_memory); backing_store()));
return is_wasm_memory; return is_wasm_memory;
} }
......
...@@ -173,16 +173,18 @@ class JSArrayBuffer : public JSObject { ...@@ -173,16 +173,18 @@ class JSArrayBuffer : public JSObject {
using AllocationMode = ArrayBuffer::Allocator::AllocationMode; using AllocationMode = ArrayBuffer::Allocator::AllocationMode;
Allocation(void* allocation_base, size_t length, void* backing_store, Allocation(void* allocation_base, size_t length, void* backing_store,
AllocationMode mode) AllocationMode mode, bool is_wasm_memory)
: allocation_base(allocation_base), : allocation_base(allocation_base),
length(length), length(length),
backing_store(backing_store), backing_store(backing_store),
mode(mode) {} mode(mode),
is_wasm_memory(is_wasm_memory) {}
void* allocation_base; void* allocation_base;
size_t length; size_t length;
void* backing_store; void* backing_store;
AllocationMode mode; AllocationMode mode;
bool is_wasm_memory;
}; };
// Returns whether the buffer is tracked by the WasmMemoryTracker. // Returns whether the buffer is tracked by the WasmMemoryTracker.
......
...@@ -978,10 +978,8 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) { ...@@ -978,10 +978,8 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
DCHECK(it.is_wasm()); DCHECK(it.is_wasm());
WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame()); WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
uint8_t* mem_start = reinterpret_cast<uint8_t*>(frame->wasm_instance() uint8_t* mem_start = reinterpret_cast<uint8_t*>(
->memory_object() frame->wasm_instance()->memory_object()->array_buffer()->backing_store());
->array_buffer()
->allocation_base());
int func_index = frame->function_index(); int func_index = frame->function_index();
int pos = frame->position(); int pos = frame->position();
// TODO(titzer): eliminate dependency on WasmModule definition here. // TODO(titzer): eliminate dependency on WasmModule definition here.
......
...@@ -1719,7 +1719,10 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() { ...@@ -1719,7 +1719,10 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
DCHECK_IMPLIES(use_trap_handler(), module_->is_asm_js() || DCHECK_IMPLIES(use_trap_handler(), module_->is_asm_js() ||
memory->is_wasm_memory() || memory->is_wasm_memory() ||
memory->backing_store() == nullptr); memory->backing_store() == nullptr);
} else if (initial_pages > 0) { } else if (initial_pages > 0 || use_trap_handler()) {
// We need to unconditionally create a guard region if using trap handlers,
// even when the size is zero to prevent null-derefence issues
// (e.g. https://crbug.com/769637).
// Allocate memory if the initial size is more than 0 pages. // Allocate memory if the initial size is more than 0 pages.
memory_ = AllocateMemory(initial_pages); memory_ = AllocateMemory(initial_pages);
if (memory_.is_null()) return {}; // failed to allocate memory if (memory_.is_null()) return {}; // failed to allocate memory
......
...@@ -12,7 +12,18 @@ namespace v8 { ...@@ -12,7 +12,18 @@ namespace v8 {
namespace internal { namespace internal {
namespace wasm { namespace wasm {
void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, size_t size,
bool require_guard_regions,
void** allocation_base,
size_t* allocation_length);
WasmMemoryTracker::~WasmMemoryTracker() { WasmMemoryTracker::~WasmMemoryTracker() {
if (empty_backing_store_.allocation_base != nullptr) {
CHECK(FreePages(empty_backing_store_.allocation_base,
empty_backing_store_.allocation_length));
InternalReleaseAllocation(empty_backing_store_.buffer_start);
}
// All reserved address space should be released before the allocation tracker // All reserved address space should be released before the allocation tracker
// is destroyed. // is destroyed.
DCHECK_EQ(reserved_address_space_, 0u); DCHECK_EQ(reserved_address_space_, 0u);
...@@ -24,9 +35,9 @@ bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) { ...@@ -24,9 +35,9 @@ bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
// regions, which is currently only supported on 64-bit systems. On other // regions, which is currently only supported on 64-bit systems. On other
// platforms, we always fall back on bounds checks. // platforms, we always fall back on bounds checks.
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT
static constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB
#else #else
static constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB
#endif #endif
size_t const old_count = reserved_address_space_.fetch_add(num_bytes); size_t const old_count = reserved_address_space_.fetch_add(num_bytes);
...@@ -65,6 +76,14 @@ void WasmMemoryTracker::RegisterAllocation(void* allocation_base, ...@@ -65,6 +76,14 @@ void WasmMemoryTracker::RegisterAllocation(void* allocation_base,
WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation( WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
const void* buffer_start) { const void* buffer_start) {
if (IsEmptyBackingStore(buffer_start)) {
return AllocationData();
}
return InternalReleaseAllocation(buffer_start);
}
WasmMemoryTracker::AllocationData WasmMemoryTracker::InternalReleaseAllocation(
const void* buffer_start) {
base::LockGuard<base::Mutex> scope_lock(&mutex_); base::LockGuard<base::Mutex> scope_lock(&mutex_);
auto find_result = allocations_.find(buffer_start); auto find_result = allocations_.find(buffer_start);
...@@ -84,11 +103,6 @@ WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation( ...@@ -84,11 +103,6 @@ WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
UNREACHABLE(); UNREACHABLE();
} }
bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
base::LockGuard<base::Mutex> scope_lock(&mutex_);
return allocations_.find(buffer_start) != allocations_.end();
}
const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData( const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
const void* buffer_start) { const void* buffer_start) {
base::LockGuard<base::Mutex> scope_lock(&mutex_); base::LockGuard<base::Mutex> scope_lock(&mutex_);
...@@ -99,7 +113,51 @@ const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData( ...@@ -99,7 +113,51 @@ const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
return nullptr; return nullptr;
} }
void* TryAllocateBackingStore(Isolate* isolate, size_t size, bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
base::LockGuard<base::Mutex> scope_lock(&mutex_);
return allocations_.find(buffer_start) != allocations_.end();
}
void* WasmMemoryTracker::GetEmptyBackingStore(void** allocation_base,
size_t* allocation_length) {
if (empty_backing_store_.allocation_base == nullptr) {
constexpr size_t buffer_length = 0;
const bool require_guard_regions = trap_handler::IsTrapHandlerEnabled();
void* local_allocation_base;
size_t local_allocation_length;
void* buffer_start = TryAllocateBackingStore(
this, buffer_length, require_guard_regions, &local_allocation_base,
&local_allocation_length);
empty_backing_store_ =
AllocationData(local_allocation_base, local_allocation_length,
buffer_start, buffer_length);
}
*allocation_base = empty_backing_store_.allocation_base;
*allocation_length = empty_backing_store_.allocation_length;
return empty_backing_store_.buffer_start;
}
bool WasmMemoryTracker::IsEmptyBackingStore(const void* buffer_start) const {
return buffer_start == empty_backing_store_.buffer_start;
}
bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(const void* buffer_start) {
if (IsEmptyBackingStore(buffer_start)) {
// We don't need to do anything for the empty backing store, because this
// will be freed when WasmMemoryTracker shuts down. Return true so callers
// will not try to free the buffer on their own.
return true;
}
if (IsWasmMemory(buffer_start)) {
const AllocationData allocation = ReleaseAllocation(buffer_start);
CHECK(FreePages(allocation.allocation_base, allocation.allocation_length));
return true;
}
return false;
}
void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, size_t size,
bool require_guard_regions, bool require_guard_regions,
void** allocation_base, void** allocation_base,
size_t* allocation_length) { size_t* allocation_length) {
...@@ -108,16 +166,15 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size, ...@@ -108,16 +166,15 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
#endif #endif
// We always allocate the largest possible offset into the heap, so the // We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible. // addressable memory after the guard page can be made inaccessible.
*allocation_length = require_guard_regions *allocation_length =
require_guard_regions
? RoundUp(kWasmMaxHeapOffset, CommitPageSize()) ? RoundUp(kWasmMaxHeapOffset, CommitPageSize())
: base::bits::RoundUpToPowerOfTwo32(RoundUp( : RoundUp(
static_cast<uint32_t>(size), kWasmPageSize)); base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size)),
kWasmPageSize);
DCHECK_GE(*allocation_length, size); DCHECK_GE(*allocation_length, size);
DCHECK_GE(*allocation_length, kWasmPageSize); DCHECK_GE(*allocation_length, kWasmPageSize);
WasmMemoryTracker* const memory_tracker =
isolate->wasm_engine()->memory_tracker();
// Let the WasmMemoryTracker know we are going to reserve a bunch of // Let the WasmMemoryTracker know we are going to reserve a bunch of
// address space. // address space.
if (!memory_tracker->ReserveAddressSpace(*allocation_length)) { if (!memory_tracker->ReserveAddressSpace(*allocation_length)) {
...@@ -135,11 +192,10 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size, ...@@ -135,11 +192,10 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
void* memory = *allocation_base; void* memory = *allocation_base;
// Make the part we care about accessible. // Make the part we care about accessible.
if (size > 0) {
CHECK(SetPermissions(memory, RoundUp(size, kWasmPageSize), CHECK(SetPermissions(memory, RoundUp(size, kWasmPageSize),
PageAllocator::kReadWrite)); PageAllocator::kReadWrite));
}
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(size);
memory_tracker->RegisterAllocation(*allocation_base, *allocation_length, memory_tracker->RegisterAllocation(*allocation_base, *allocation_length,
memory, size); memory, size);
...@@ -172,12 +228,16 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size, ...@@ -172,12 +228,16 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
return Handle<JSArrayBuffer>::null(); return Handle<JSArrayBuffer>::null();
} }
void* allocation_base = nullptr; // Set by TryAllocateBackingStore WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();
size_t allocation_length = 0; // Set by TryAllocateBackingStore
// Do not reserve memory till non zero memory is encountered. // Set by TryAllocateBackingStore or GetEmptyBackingStore
void* memory = (size == 0) ? nullptr void* allocation_base = nullptr;
size_t allocation_length = 0;
void* memory = (size == 0) ? memory_tracker->GetEmptyBackingStore(
&allocation_base, &allocation_length)
: TryAllocateBackingStore( : TryAllocateBackingStore(
isolate, size, require_guard_regions, memory_tracker, size, require_guard_regions,
&allocation_base, &allocation_length); &allocation_base, &allocation_length);
if (size > 0 && memory == nullptr) { if (size > 0 && memory == nullptr) {
...@@ -192,6 +252,9 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size, ...@@ -192,6 +252,9 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
} }
#endif #endif
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(size);
constexpr bool is_external = false; constexpr bool is_external = false;
return SetupArrayBuffer(isolate, memory, size, is_external, shared); return SetupArrayBuffer(isolate, memory, size, is_external, shared);
} }
......
...@@ -31,12 +31,13 @@ class WasmMemoryTracker { ...@@ -31,12 +31,13 @@ class WasmMemoryTracker {
void* buffer_start, size_t buffer_length); void* buffer_start, size_t buffer_length);
struct AllocationData { struct AllocationData {
void* const allocation_base = nullptr; void* allocation_base = nullptr;
size_t const allocation_length = 0; size_t allocation_length = 0;
void* const buffer_start = nullptr; void* buffer_start = nullptr;
size_t const buffer_length = 0; size_t buffer_length = 0;
private: private:
AllocationData() = default;
AllocationData(void* allocation_base, size_t allocation_length, AllocationData(void* allocation_base, size_t allocation_length,
void* buffer_start, size_t buffer_length) void* buffer_start, size_t buffer_length)
: allocation_base(allocation_base), : allocation_base(allocation_base),
...@@ -68,7 +69,21 @@ class WasmMemoryTracker { ...@@ -68,7 +69,21 @@ class WasmMemoryTracker {
// buffer is not tracked. // buffer is not tracked.
const AllocationData* FindAllocationData(const void* buffer_start); const AllocationData* FindAllocationData(const void* buffer_start);
// Empty WebAssembly memories are all backed by a shared inaccessible
// reservation. This method creates this store or returns the existing one if
// already created.
void* GetEmptyBackingStore(void** allocation_base, size_t* allocation_length);
bool IsEmptyBackingStore(const void* buffer_start) const;
// Checks if a buffer points to a Wasm memory and if so does any necessary
// work to reclaim the buffer. If this function returns false, the caller must
// free the buffer manually.
bool FreeMemoryIfIsWasmMemory(const void* buffer_start);
private: private:
AllocationData InternalReleaseAllocation(const void* buffer_start);
// Clients use a two-part process. First they "reserve" the address space, // Clients use a two-part process. First they "reserve" the address space,
// which signifies an intent to actually allocate it. This determines whether // which signifies an intent to actually allocate it. This determines whether
// doing the allocation would put us over our limit. Once there is a // doing the allocation would put us over our limit. Once there is a
...@@ -89,6 +104,11 @@ class WasmMemoryTracker { ...@@ -89,6 +104,11 @@ class WasmMemoryTracker {
// buffer, rather than by the start of the allocation. // buffer, rather than by the start of the allocation.
std::unordered_map<const void*, AllocationData> allocations_; std::unordered_map<const void*, AllocationData> allocations_;
// Empty backing stores still need to be backed by mapped pages when using
// trap handlers. Because this could eat up address space quickly, we keep a
// shared backing store here.
AllocationData empty_backing_store_;
DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker); DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker);
}; };
......
...@@ -371,6 +371,7 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate, ...@@ -371,6 +371,7 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
new_size > kMaxInt) { new_size > kMaxInt) {
return Handle<JSArrayBuffer>::null(); return Handle<JSArrayBuffer>::null();
} }
// Try to adjust the permissions and reuse the old backing store
if (((use_trap_handler && !old_buffer->is_external() && if (((use_trap_handler && !old_buffer->is_external() &&
new_size < old_buffer->allocation_length()) || new_size < old_buffer->allocation_length()) ||
old_size == new_size) && old_size == new_size) &&
...@@ -379,6 +380,8 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate, ...@@ -379,6 +380,8 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
if (old_size != new_size) { if (old_size != new_size) {
// If adjusting permissions fails, propagate error back to return // If adjusting permissions fails, propagate error back to return
// failure to grow. // failure to grow.
DCHECK(!isolate->wasm_engine()->memory_tracker()->IsEmptyBackingStore(
old_mem_start));
if (!i::SetPermissions(old_mem_start, new_size, if (!i::SetPermissions(old_mem_start, new_size,
PageAllocator::kReadWrite)) { PageAllocator::kReadWrite)) {
return Handle<JSArrayBuffer>::null(); return Handle<JSArrayBuffer>::null();
...@@ -396,6 +399,8 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate, ...@@ -396,6 +399,8 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
wasm::SetupArrayBuffer(isolate, backing_store, new_size, is_external); wasm::SetupArrayBuffer(isolate, backing_store, new_size, is_external);
return new_buffer; return new_buffer;
} else { } else {
// We couldn't reuse the old backing store, so create a new one and copy the
// old contents in.
Handle<JSArrayBuffer> new_buffer; Handle<JSArrayBuffer> new_buffer;
new_buffer = wasm::NewArrayBuffer(isolate, new_size, use_trap_handler); new_buffer = wasm::NewArrayBuffer(isolate, new_size, use_trap_handler);
if (new_buffer.is_null() || old_size == 0) return new_buffer; if (new_buffer.is_null() || old_size == 0) return new_buffer;
......
...@@ -1091,9 +1091,10 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMemMemSize) { ...@@ -1091,9 +1091,10 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMemMemSize) {
auto const contents = v8::Utils::ToLocal(buffer)->Externalize(); auto const contents = v8::Utils::ToLocal(buffer)->Externalize();
int32_t result = WasmMemoryObject::Grow(isolate, mem_obj, 0); int32_t result = WasmMemoryObject::Grow(isolate, mem_obj, 0);
CHECK_EQ(16, result); CHECK_EQ(16, result);
constexpr bool is_wasm_memory = true;
const JSArrayBuffer::Allocation allocation{ const JSArrayBuffer::Allocation allocation{
contents.AllocationBase(), contents.AllocationLength(), contents.Data(), contents.AllocationBase(), contents.AllocationLength(), contents.Data(),
contents.AllocationMode()}; contents.AllocationMode(), is_wasm_memory};
JSArrayBuffer::FreeBackingStore(isolate, allocation); JSArrayBuffer::FreeBackingStore(isolate, allocation);
} }
Cleanup(); Cleanup();
...@@ -1114,9 +1115,10 @@ TEST(Run_WasmModule_Buffer_Externalized_Detach) { ...@@ -1114,9 +1115,10 @@ TEST(Run_WasmModule_Buffer_Externalized_Detach) {
isolate, 16 * kWasmPageSize, require_guard_regions); isolate, 16 * kWasmPageSize, require_guard_regions);
auto const contents = v8::Utils::ToLocal(buffer)->Externalize(); auto const contents = v8::Utils::ToLocal(buffer)->Externalize();
wasm::DetachMemoryBuffer(isolate, buffer, true); wasm::DetachMemoryBuffer(isolate, buffer, true);
constexpr bool is_wasm_memory = true;
const JSArrayBuffer::Allocation allocation{ const JSArrayBuffer::Allocation allocation{
contents.AllocationBase(), contents.AllocationLength(), contents.Data(), contents.AllocationBase(), contents.AllocationLength(), contents.Data(),
contents.AllocationMode()}; contents.AllocationMode(), is_wasm_memory};
JSArrayBuffer::FreeBackingStore(isolate, allocation); JSArrayBuffer::FreeBackingStore(isolate, allocation);
} }
Cleanup(); Cleanup();
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder
.addMemory()
.addFunction("main", kSig_v_v)
.addBody([kExprI32Const, 4,
kExprI32Const, 8,
kExprI32StoreMem, 0, 16])
.exportAs("main");
let instance = builder.instantiate();
assertTraps(kTrapMemOutOfBounds, instance.exports.main);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment