Commit 365b637c authored by Deepti Gandluri's avatar Deepti Gandluri Committed by Commit Bot

Reland "[wasm] Lazy update instances on a shared Memory.Grow"

This is a reland of 80f06d6f

Original change's description:
> [wasm] Lazy update instances on a shared Memory.Grow
> 
>  - Introduce a GROW_SHARED_MEMORY interrupt, and handler
>  - Memory objects for isolates are updated on a stack check, add
>    tracking for isolates that hit the stack check
>  - When enough memory is not reserved ahead of time, fail to grow
>  - Add tracking for externalized buffers in the MemoryTracker so
>    that the MemoryTracker will know when backing_stores can be freed.
>  - For shared buffer, do not always allocate a new buffer when
>    growing an externalized buffer
> 
> 
> Change-Id: I9cf1be19f2f165fa6ea4096869f7d6365304c8c4
> Bug: v8:8564
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1472430
> Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
> Reviewed-by: Ben Smith <binji@chromium.org>
> Reviewed-by: Andreas Haas <ahaas@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#60064}

Bug: v8:8564
Change-Id: Id0cf8e42a9d54ac702dba351e248a1b92713c98a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1506357Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60071}
parent b22e6cfd
...@@ -640,6 +640,12 @@ Object StackGuard::HandleInterrupts() { ...@@ -640,6 +640,12 @@ Object StackGuard::HandleInterrupts() {
isolate_->heap()->HandleGCRequest(); isolate_->heap()->HandleGCRequest();
} }
if (CheckAndClearInterrupt(GROW_SHARED_MEMORY)) {
TRACE_INTERRUPT("GROW_SHARED_MEMORY");
isolate_->wasm_engine()->memory_tracker()->UpdateSharedMemoryInstances(
isolate_);
}
if (CheckAndClearInterrupt(TERMINATE_EXECUTION)) { if (CheckAndClearInterrupt(TERMINATE_EXECUTION)) {
TRACE_INTERRUPT("TERMINATE_EXECUTION"); TRACE_INTERRUPT("TERMINATE_EXECUTION");
return isolate_->TerminateExecution(); return isolate_->TerminateExecution();
......
...@@ -96,7 +96,8 @@ class V8_EXPORT_PRIVATE StackGuard final { ...@@ -96,7 +96,8 @@ class V8_EXPORT_PRIVATE StackGuard final {
V(GC_REQUEST, GC, 1) \ V(GC_REQUEST, GC, 1) \
V(INSTALL_CODE, InstallCode, 2) \ V(INSTALL_CODE, InstallCode, 2) \
V(API_INTERRUPT, ApiInterrupt, 3) \ V(API_INTERRUPT, ApiInterrupt, 3) \
V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 4) V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 4) \
V(GROW_SHARED_MEMORY, GrowSharedMemory, 5)
#define V(NAME, Name, id) \ #define V(NAME, Name, id) \
inline bool Check##Name() { return CheckInterrupt(NAME); } \ inline bool Check##Name() { return CheckInterrupt(NAME); } \
......
...@@ -2922,6 +2922,8 @@ void Isolate::Deinit() { ...@@ -2922,6 +2922,8 @@ void Isolate::Deinit() {
optimizing_compile_dispatcher_ = nullptr; optimizing_compile_dispatcher_ = nullptr;
} }
wasm_engine()->memory_tracker()->DeleteSharedMemoryObjectsOnIsolate(this);
heap_.mark_compact_collector()->EnsureSweepingCompleted(); heap_.mark_compact_collector()->EnsureSweepingCompleted();
heap_.memory_allocator()->unmapper()->EnsureUnmappingCompleted(); heap_.memory_allocator()->unmapper()->EnsureUnmappingCompleted();
......
...@@ -77,11 +77,7 @@ void* JSArrayBuffer::allocation_base() const { ...@@ -77,11 +77,7 @@ void* JSArrayBuffer::allocation_base() const {
} }
bool JSArrayBuffer::is_wasm_memory() const { bool JSArrayBuffer::is_wasm_memory() const {
bool const is_wasm_memory = IsWasmMemoryBit::decode(bit_field()); return IsWasmMemoryBit::decode(bit_field());
DCHECK_EQ(is_wasm_memory,
GetIsolate()->wasm_engine()->memory_tracker()->IsWasmMemory(
backing_store()));
return is_wasm_memory;
} }
void JSArrayBuffer::set_is_wasm_memory(bool is_wasm_memory) { void JSArrayBuffer::set_is_wasm_memory(bool is_wasm_memory) {
......
...@@ -69,11 +69,7 @@ void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) { ...@@ -69,11 +69,7 @@ void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
if (allocation.is_wasm_memory) { if (allocation.is_wasm_memory) {
wasm::WasmMemoryTracker* memory_tracker = wasm::WasmMemoryTracker* memory_tracker =
isolate->wasm_engine()->memory_tracker(); isolate->wasm_engine()->memory_tracker();
if (!memory_tracker->FreeMemoryIfIsWasmMemory(isolate, memory_tracker->FreeMemoryIfIsWasmMemory(isolate, allocation.backing_store);
allocation.backing_store)) {
CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
allocation.length));
}
} else { } else {
isolate->array_buffer_allocator()->Free(allocation.allocation_base, isolate->array_buffer_allocator()->Free(allocation.allocation_base,
allocation.length); allocation.length);
......
...@@ -922,6 +922,9 @@ Maybe<bool> ValueSerializer::WriteWasmMemory(Handle<WasmMemoryObject> object) { ...@@ -922,6 +922,9 @@ Maybe<bool> ValueSerializer::WriteWasmMemory(Handle<WasmMemoryObject> object) {
return Nothing<bool>(); return Nothing<bool>();
} }
isolate_->wasm_engine()->memory_tracker()->RegisterWasmMemoryAsShared(
object, isolate_);
WriteTag(SerializationTag::kWasmMemoryTransfer); WriteTag(SerializationTag::kWasmMemoryTransfer);
WriteZigZag<int32_t>(object->maximum_pages()); WriteZigZag<int32_t>(object->maximum_pages());
return WriteJSReceiver(Handle<JSReceiver>(object->array_buffer(), isolate_)); return WriteJSReceiver(Handle<JSReceiver>(object->array_buffer(), isolate_));
...@@ -1866,6 +1869,9 @@ MaybeHandle<WasmMemoryObject> ValueDeserializer::ReadWasmMemory() { ...@@ -1866,6 +1869,9 @@ MaybeHandle<WasmMemoryObject> ValueDeserializer::ReadWasmMemory() {
Handle<WasmMemoryObject> result = Handle<WasmMemoryObject> result =
WasmMemoryObject::New(isolate_, buffer, maximum_pages); WasmMemoryObject::New(isolate_, buffer, maximum_pages);
isolate_->wasm_engine()->memory_tracker()->RegisterWasmMemoryAsShared(
result, isolate_);
AddObjectWithID(id, result); AddObjectWithID(id, result);
return result; return result;
} }
......
...@@ -167,6 +167,7 @@ WasmMemoryTracker::~WasmMemoryTracker() { ...@@ -167,6 +167,7 @@ WasmMemoryTracker::~WasmMemoryTracker() {
// is destroyed. // is destroyed.
DCHECK_EQ(reserved_address_space_, 0u); DCHECK_EQ(reserved_address_space_, 0u);
DCHECK_EQ(allocated_address_space_, 0u); DCHECK_EQ(allocated_address_space_, 0u);
DCHECK(allocations_.empty());
} }
void* WasmMemoryTracker::TryAllocateBackingStoreForTesting( void* WasmMemoryTracker::TryAllocateBackingStoreForTesting(
...@@ -178,7 +179,8 @@ void* WasmMemoryTracker::TryAllocateBackingStoreForTesting( ...@@ -178,7 +179,8 @@ void* WasmMemoryTracker::TryAllocateBackingStoreForTesting(
void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory, void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory,
void* buffer_start) { void* buffer_start) {
ReleaseAllocation(nullptr, buffer_start); base::MutexGuard scope_lock(&mutex_);
ReleaseAllocation_Locked(nullptr, buffer_start);
CHECK(FreePages(GetPlatformPageAllocator(), CHECK(FreePages(GetPlatformPageAllocator(),
reinterpret_cast<void*>(memory.begin()), memory.size())); reinterpret_cast<void*>(memory.begin()), memory.size()));
} }
...@@ -219,14 +221,11 @@ void WasmMemoryTracker::RegisterAllocation(Isolate* isolate, ...@@ -219,14 +221,11 @@ void WasmMemoryTracker::RegisterAllocation(Isolate* isolate,
buffer_start, buffer_length}); buffer_start, buffer_length});
} }
WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation( WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation_Locked(
Isolate* isolate, const void* buffer_start) { Isolate* isolate, const void* buffer_start) {
base::MutexGuard scope_lock(&mutex_);
auto find_result = allocations_.find(buffer_start); auto find_result = allocations_.find(buffer_start);
CHECK_NE(find_result, allocations_.end()); CHECK_NE(find_result, allocations_.end());
if (find_result != allocations_.end()) {
size_t num_bytes = find_result->second.allocation_length; size_t num_bytes = find_result->second.allocation_length;
DCHECK_LE(num_bytes, reserved_address_space_); DCHECK_LE(num_bytes, reserved_address_space_);
DCHECK_LE(num_bytes, allocated_address_space_); DCHECK_LE(num_bytes, allocated_address_space_);
...@@ -240,8 +239,6 @@ WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation( ...@@ -240,8 +239,6 @@ WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
AllocationData allocation_data = find_result->second; AllocationData allocation_data = find_result->second;
allocations_.erase(find_result); allocations_.erase(find_result);
return allocation_data; return allocation_data;
}
UNREACHABLE();
} }
const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData( const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
...@@ -259,6 +256,13 @@ bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) { ...@@ -259,6 +256,13 @@ bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
return allocations_.find(buffer_start) != allocations_.end(); return allocations_.find(buffer_start) != allocations_.end();
} }
bool WasmMemoryTracker::IsWasmSharedMemory(const void* buffer_start) {
base::MutexGuard scope_lock(&mutex_);
const auto& result = allocations_.find(buffer_start);
// Should be a wasm allocation, and registered as a shared allocation.
return (result != allocations_.end() && result->second.is_shared);
}
bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) { bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) {
base::MutexGuard scope_lock(&mutex_); base::MutexGuard scope_lock(&mutex_);
const auto allocation = allocations_.find(buffer_start); const auto allocation = allocations_.find(buffer_start);
...@@ -276,15 +280,291 @@ bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) { ...@@ -276,15 +280,291 @@ bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) {
bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(Isolate* isolate, bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(Isolate* isolate,
const void* buffer_start) { const void* buffer_start) {
if (IsWasmMemory(buffer_start)) { base::MutexGuard scope_lock(&mutex_);
const AllocationData allocation = ReleaseAllocation(isolate, buffer_start); const auto& result = allocations_.find(buffer_start);
if (result == allocations_.end()) return false;
if (result->second.is_shared) {
// This is a shared WebAssembly.Memory allocation
FreeMemoryIfNotShared_Locked(isolate, buffer_start);
return true;
}
// This is a WebAssembly.Memory allocation
const AllocationData allocation =
ReleaseAllocation_Locked(isolate, buffer_start);
CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base, CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
allocation.allocation_length)); allocation.allocation_length));
return true; return true;
}
void WasmMemoryTracker::RegisterWasmMemoryAsShared(
Handle<WasmMemoryObject> object, Isolate* isolate) {
const void* backing_store = object->array_buffer()->backing_store();
// TODO(V8:8810): This should be a DCHECK, currently some tests do not
// use a full WebAssembly.Memory, and fail on registering so return early.
if (!IsWasmMemory(backing_store)) return;
{
base::MutexGuard scope_lock(&mutex_);
// Register as shared allocation when it is post messaged. This happens only
// the first time a buffer is shared over Postmessage, and track all the
// memory objects that are associated with this backing store.
RegisterSharedWasmMemory_Locked(object, isolate);
// Add isolate to backing store mapping.
isolates_per_buffer_[backing_store].emplace(isolate);
}
}
void WasmMemoryTracker::SetPendingUpdateOnGrow(Handle<JSArrayBuffer> old_buffer,
size_t new_size) {
base::MutexGuard scope_lock(&mutex_);
// Keep track of the new size of the buffer associated with each backing
// store.
AddBufferToGrowMap_Locked(old_buffer, new_size);
// Request interrupt to GROW_SHARED_MEMORY to other isolates
TriggerSharedGrowInterruptOnAllIsolates_Locked(old_buffer);
}
void WasmMemoryTracker::UpdateSharedMemoryInstances(Isolate* isolate) {
base::MutexGuard scope_lock(&mutex_);
// For every buffer in the grow_entry_map_, update the size for all the
// memory objects associated with this isolate.
for (auto it = grow_update_map_.begin(); it != grow_update_map_.end();) {
UpdateSharedMemoryStateOnInterrupt_Locked(isolate, it->first, it->second);
// If all the isolates that share this buffer have hit a stack check, their
// memory objects are updated, and this grow entry can be erased.
if (AreAllIsolatesUpdated_Locked(it->first)) {
it = grow_update_map_.erase(it);
} else {
it++;
}
}
}
void WasmMemoryTracker::RegisterSharedWasmMemory_Locked(
Handle<WasmMemoryObject> object, Isolate* isolate) {
DCHECK(object->array_buffer()->is_shared());
void* backing_store = object->array_buffer()->backing_store();
// The allocation of a WasmMemoryObject should always be registered with the
// WasmMemoryTracker.
const auto& result = allocations_.find(backing_store);
if (result == allocations_.end()) return;
// Register the allocation as shared, if not alreadt marked as shared.
if (!result->second.is_shared) result->second.is_shared = true;
// Create persistent global handles for the memory objects that are shared
GlobalHandles* global_handles = isolate->global_handles();
object = global_handles->Create(*object);
// Add to memory_object_vector to track memory objects, instance objects
// that will need to be updated on a Grow call
result->second.memory_object_vector.push_back(
SharedMemoryObjectState(object, isolate));
}
void WasmMemoryTracker::AddBufferToGrowMap_Locked(
Handle<JSArrayBuffer> old_buffer, size_t new_size) {
void* backing_store = old_buffer->backing_store();
auto entry = grow_update_map_.find(old_buffer->backing_store());
if (entry == grow_update_map_.end()) {
// No pending grow for this backing store, add to map.
grow_update_map_.emplace(backing_store, new_size);
return;
}
// If grow on the same buffer is requested before the update is complete,
// the new_size should always be greater or equal to the old_size. Equal
// in the case that grow(0) is called, but new buffer handles are mandated
// by the Spec.
CHECK_LE(entry->second, new_size);
entry->second = new_size;
// Flush instances_updated everytime a new grow size needs to be updates
ClearUpdatedInstancesOnPendingGrow_Locked(backing_store);
}
void WasmMemoryTracker::TriggerSharedGrowInterruptOnAllIsolates_Locked(
Handle<JSArrayBuffer> old_buffer) {
// Request a GrowShareMemory interrupt on all the isolates that share
// the backing store.
const auto& isolates = isolates_per_buffer_.find(old_buffer->backing_store());
for (const auto& isolate : isolates->second) {
isolate->stack_guard()->RequestGrowSharedMemory();
}
}
void WasmMemoryTracker::UpdateSharedMemoryStateOnInterrupt_Locked(
Isolate* isolate, void* backing_store, size_t new_size) {
// Update objects only if there are memory objects that share this backing
// store, and this isolate is marked as one of the isolates that shares this
// buffer.
if (MemoryObjectsNeedUpdate_Locked(isolate, backing_store)) {
UpdateMemoryObjectsForIsolate_Locked(isolate, backing_store, new_size);
// As the memory objects are updated, add this isolate to a set of isolates
// that are updated on grow. This state is maintained to track if all the
// isolates that share the backing store have hit a StackCheck.
isolates_updated_on_grow_[backing_store].emplace(isolate);
}
}
bool WasmMemoryTracker::AreAllIsolatesUpdated_Locked(
const void* backing_store) {
const auto& buffer_isolates = isolates_per_buffer_.find(backing_store);
// No isolates share this buffer.
if (buffer_isolates == isolates_per_buffer_.end()) return true;
const auto& updated_isolates = isolates_updated_on_grow_.find(backing_store);
// Some isolates share the buffer, but no isolates have been updated yet.
if (updated_isolates == isolates_updated_on_grow_.end()) return false;
if (buffer_isolates->second == updated_isolates->second) {
// If all the isolates that share this backing_store have hit a stack check,
// and the memory objects have been updated, remove the entry from the
// updatemap, and return true.
isolates_updated_on_grow_.erase(backing_store);
return true;
}
return false;
}
void WasmMemoryTracker::ClearUpdatedInstancesOnPendingGrow_Locked(
const void* backing_store) {
// On multiple grows to the same buffer, the entries for that buffer should be
// flushed. This is done so that any consecutive grows to the same buffer will
// update all instances that share this buffer.
const auto& value = isolates_updated_on_grow_.find(backing_store);
if (value != isolates_updated_on_grow_.end()) {
value->second.clear();
}
}
void WasmMemoryTracker::UpdateMemoryObjectsForIsolate_Locked(
Isolate* isolate, void* backing_store, size_t new_size) {
const auto& result = allocations_.find(backing_store);
if (result == allocations_.end() || !result->second.is_shared) return;
for (const auto& memory_obj_state : result->second.memory_object_vector) {
DCHECK_NE(memory_obj_state.isolate, nullptr);
if (isolate == memory_obj_state.isolate) {
HandleScope scope(isolate);
Handle<WasmMemoryObject> memory_object = memory_obj_state.memory_object;
DCHECK(memory_object->IsWasmMemoryObject());
DCHECK(memory_object->array_buffer()->is_shared());
// Permissions adjusted, but create a new buffer with new size
// and old attributes. Buffer has already been allocated,
// just create a new buffer with same backing store.
bool is_external = memory_object->array_buffer()->is_external();
Handle<JSArrayBuffer> new_buffer = SetupArrayBuffer(
isolate, backing_store, new_size, is_external, SharedFlag::kShared);
memory_obj_state.memory_object->update_instances(isolate, new_buffer);
}
}
}
bool WasmMemoryTracker::MemoryObjectsNeedUpdate_Locked(
Isolate* isolate, const void* backing_store) {
// Return true if this buffer has memory_objects it needs to update.
const auto& result = allocations_.find(backing_store);
if (result == allocations_.end() || !result->second.is_shared) return false;
// Only update if the buffer has memory objects that need to be updated.
if (result->second.memory_object_vector.empty()) return false;
const auto& isolate_entry = isolates_per_buffer_.find(backing_store);
return (isolate_entry != isolates_per_buffer_.end() &&
isolate_entry->second.count(isolate) != 0);
}
void WasmMemoryTracker::FreeMemoryIfNotShared_Locked(
Isolate* isolate, const void* backing_store) {
RemoveSharedBufferState_Locked(isolate, backing_store);
if (CanFreeSharedMemory_Locked(backing_store)) {
const AllocationData allocation =
ReleaseAllocation_Locked(isolate, backing_store);
CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
allocation.allocation_length));
}
}
bool WasmMemoryTracker::CanFreeSharedMemory_Locked(const void* backing_store) {
const auto& value = isolates_per_buffer_.find(backing_store);
// If no isolates share this buffer, backing store can be freed.
// Erase the buffer entry.
if (value == isolates_per_buffer_.end()) return true;
if (value->second.empty()) {
// If no isolates share this buffer, the global handles to memory objects
// associated with this buffer should have been destroyed.
// DCHECK(shared_memory_map_.find(backing_store) ==
// shared_memory_map_.end());
return true;
} }
return false; return false;
} }
void WasmMemoryTracker::RemoveSharedBufferState_Locked(
Isolate* isolate, const void* backing_store) {
if (isolate != nullptr) {
DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(isolate, backing_store);
RemoveIsolateFromBackingStore_Locked(isolate, backing_store);
} else {
// This happens for externalized contents cleanup shared memory state
// associated with this buffer across isolates.
DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(backing_store);
}
}
void WasmMemoryTracker::DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
const void* backing_store) {
const auto& result = allocations_.find(backing_store);
CHECK(result != allocations_.end() && result->second.is_shared);
auto& object_vector = result->second.memory_object_vector;
if (object_vector.empty()) return;
for (const auto& mem_obj_state : object_vector) {
GlobalHandles::Destroy(mem_obj_state.memory_object.location());
}
object_vector.clear();
// Remove isolate from backing store map.
isolates_per_buffer_.erase(backing_store);
}
void WasmMemoryTracker::DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
Isolate* isolate, const void* backing_store) {
// This gets called when an internal handle to the ArrayBuffer should be
// freed, on heap tear down for that isolate, remove the memory objects
// that are associated with this buffer and isolate.
const auto& result = allocations_.find(backing_store);
CHECK(result != allocations_.end() && result->second.is_shared);
auto& object_vector = result->second.memory_object_vector;
if (object_vector.empty()) return;
for (auto it = object_vector.begin(); it != object_vector.end();) {
if (isolate == it->isolate) {
GlobalHandles::Destroy(it->memory_object.location());
it = object_vector.erase(it);
} else {
++it;
}
}
}
void WasmMemoryTracker::RemoveIsolateFromBackingStore_Locked(
Isolate* isolate, const void* backing_store) {
const auto& isolates = isolates_per_buffer_.find(backing_store);
if (isolates == isolates_per_buffer_.end() || isolates->second.empty())
return;
isolates->second.erase(isolate);
}
void WasmMemoryTracker::DeleteSharedMemoryObjectsOnIsolate(Isolate* isolate) {
base::MutexGuard scope_lock(&mutex_);
// This is possible for buffers that are externalized, and their handles have
// been freed, the backing store wasn't released because externalized contents
// were using it.
if (isolates_per_buffer_.empty()) return;
for (auto& entry : isolates_per_buffer_) {
if (entry.second.find(isolate) == entry.second.end()) continue;
const void* backing_store = entry.first;
entry.second.erase(isolate);
DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(isolate, backing_store);
}
for (auto& buffer_isolates : isolates_updated_on_grow_) {
auto& isolates = buffer_isolates.second;
isolates.erase(isolate);
}
}
void WasmMemoryTracker::AddAddressSpaceSample(Isolate* isolate) { void WasmMemoryTracker::AddAddressSpaceSample(Isolate* isolate) {
// Report address space usage in MiB so the full range fits in an int on all // Report address space usage in MiB so the full range fits in an int on all
// platforms. // platforms.
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <atomic> #include <atomic>
#include <unordered_map> #include <unordered_map>
#include <unordered_set>
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/flags.h" #include "src/flags.h"
...@@ -39,11 +40,27 @@ class WasmMemoryTracker { ...@@ -39,11 +40,27 @@ class WasmMemoryTracker {
size_t allocation_length, void* buffer_start, size_t allocation_length, void* buffer_start,
size_t buffer_length); size_t buffer_length);
struct SharedMemoryObjectState {
Handle<WasmMemoryObject> memory_object;
Isolate* isolate;
SharedMemoryObjectState() = default;
SharedMemoryObjectState(Handle<WasmMemoryObject> memory_object,
Isolate* isolate)
: memory_object(memory_object), isolate(isolate) {}
};
struct AllocationData { struct AllocationData {
void* allocation_base = nullptr; void* allocation_base = nullptr;
size_t allocation_length = 0; size_t allocation_length = 0;
void* buffer_start = nullptr; void* buffer_start = nullptr;
size_t buffer_length = 0; size_t buffer_length = 0;
bool is_shared = false;
// Track Wasm Memory instances across isolates, this is populated on
// PostMessage using persistent handles for memory objects.
std::vector<WasmMemoryTracker::SharedMemoryObjectState>
memory_object_vector;
private: private:
AllocationData() = default; AllocationData() = default;
...@@ -81,11 +98,11 @@ class WasmMemoryTracker { ...@@ -81,11 +98,11 @@ class WasmMemoryTracker {
// Decreases the amount of reserved address space. // Decreases the amount of reserved address space.
void ReleaseReservation(size_t num_bytes); void ReleaseReservation(size_t num_bytes);
// Removes an allocation from the tracker.
AllocationData ReleaseAllocation(Isolate* isolate, const void* buffer_start);
bool IsWasmMemory(const void* buffer_start); bool IsWasmMemory(const void* buffer_start);
bool IsWasmSharedMemory(const void* buffer_start);
// Returns whether the given buffer is a Wasm memory with guard regions large // Returns whether the given buffer is a Wasm memory with guard regions large
// enough to safely use trap handlers. // enough to safely use trap handlers.
bool HasFullGuardRegions(const void* buffer_start); bool HasFullGuardRegions(const void* buffer_start);
...@@ -99,6 +116,26 @@ class WasmMemoryTracker { ...@@ -99,6 +116,26 @@ class WasmMemoryTracker {
// free the buffer manually. // free the buffer manually.
bool FreeMemoryIfIsWasmMemory(Isolate* isolate, const void* buffer_start); bool FreeMemoryIfIsWasmMemory(Isolate* isolate, const void* buffer_start);
// When WebAssembly.Memory is transferred over PostMessage, register the
// allocation as shared and track the memory objects that will need
// updating if memory is resized.
void RegisterWasmMemoryAsShared(Handle<WasmMemoryObject> object,
Isolate* isolate);
// This method is called when the underlying backing store is grown, but
// instances that share the backing_store have not yet been updated.
void SetPendingUpdateOnGrow(Handle<JSArrayBuffer> old_buffer,
size_t new_size);
// Interrupt handler for GROW_SHARED_MEMORY interrupt. Update memory objects
// and instances that share the memory objects after a Grow call.
void UpdateSharedMemoryInstances(Isolate* isolate);
// Due to timing of when buffers are garbage collected, vs. when isolate
// object handles are destroyed, it is possible to leak global handles. To
// avoid this, cleanup any global handles on isolate destruction if any exist.
void DeleteSharedMemoryObjectsOnIsolate(Isolate* isolate);
// Allocation results are reported to UMA // Allocation results are reported to UMA
// //
// See wasm_memory_allocation_result in counters.h // See wasm_memory_allocation_result in counters.h
...@@ -114,8 +151,70 @@ class WasmMemoryTracker { ...@@ -114,8 +151,70 @@ class WasmMemoryTracker {
}; };
private: private:
void AddAddressSpaceSample(Isolate* isolate); // Helper methods to free memory only if not shared by other isolates, memory
// objects.
void FreeMemoryIfNotShared_Locked(Isolate* isolate,
const void* backing_store);
bool CanFreeSharedMemory_Locked(const void* backing_store);
void RemoveSharedBufferState_Locked(Isolate* isolate,
const void* backing_store);
// Registers the allocation as shared, and tracks all the memory objects
// associates with this allocation across isolates.
void RegisterSharedWasmMemory_Locked(Handle<WasmMemoryObject> object,
Isolate* isolate);
// Map the new size after grow to the buffer backing store, so that instances
// and memory objects that share the WebAssembly.Memory across isolates can
// be updated..
void AddBufferToGrowMap_Locked(Handle<JSArrayBuffer> old_buffer,
size_t new_size);
// Trigger a GROW_SHARED_MEMORY interrupt on all the isolates that have memory
// objects that share this buffer.
void TriggerSharedGrowInterruptOnAllIsolates_Locked(
Handle<JSArrayBuffer> old_buffer);
// When isolates hit a stack check, update the memory objects associated with
// that isolate.
void UpdateSharedMemoryStateOnInterrupt_Locked(Isolate* isolate,
void* backing_store,
size_t new_size);
// Check if all the isolates that share a backing_store have hit a stack
// check. If a stack check is hit, and the backing store is pending grow,
// this isolate will have updated memory objects.
bool AreAllIsolatesUpdated_Locked(const void* backing_store);
// If a grow call is made to a buffer with a pending grow, and all the
// isolates that share this buffer have not hit a StackCheck, clear the set of
// already updated instances so they can be updated with the new size on the
// most recent grow call.
void ClearUpdatedInstancesOnPendingGrow_Locked(const void* backing_store);
// Helper functions to update memory objects on grow, and maintain state for
// which isolates hit a stack check.
void UpdateMemoryObjectsForIsolate_Locked(Isolate* isolate,
void* backing_store,
size_t new_size);
bool MemoryObjectsNeedUpdate_Locked(Isolate* isolate,
const void* backing_store);
// Destroy global handles to memory objects, and remove backing store from
// isolates_per_buffer on Free.
void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
Isolate* isolate, const void* backing_store);
void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
const void* backing_store);
void RemoveIsolateFromBackingStore_Locked(Isolate* isolate,
const void* backing_store);
// Removes an allocation from the tracker.
AllocationData ReleaseAllocation_Locked(Isolate* isolate,
const void* buffer_start);
void AddAddressSpaceSample(Isolate* isolate);
// Clients use a two-part process. First they "reserve" the address space, // Clients use a two-part process. First they "reserve" the address space,
// which signifies an intent to actually allocate it. This determines whether // which signifies an intent to actually allocate it. This determines whether
// doing the allocation would put us over our limit. Once there is a // doing the allocation would put us over our limit. Once there is a
...@@ -132,10 +231,36 @@ class WasmMemoryTracker { ...@@ -132,10 +231,36 @@ class WasmMemoryTracker {
size_t allocated_address_space_ = 0; size_t allocated_address_space_ = 0;
//////////////////////////////////////////////////////////////////////////////
// Protected by {mutex_}:
// Track Wasm memory allocation information. This is keyed by the start of the // Track Wasm memory allocation information. This is keyed by the start of the
// buffer, rather than by the start of the allocation. // buffer, rather than by the start of the allocation.
std::unordered_map<const void*, AllocationData> allocations_; std::unordered_map<const void*, AllocationData> allocations_;
// Maps each buffer to the isolates that share the backing store.
std::unordered_map<const void*, std::unordered_set<Isolate*>>
isolates_per_buffer_;
// Maps which isolates have had a grow interrupt handled on the buffer. This
// is maintained to ensure that the instances are updated with the right size
// on Grow.
std::unordered_map<const void*, std::unordered_set<Isolate*>>
isolates_updated_on_grow_;
// Maps backing stores(void*) to the size of the underlying memory in
// (size_t). An entry to this map is made on a grow call to the corresponding
// backing store. On consecutive grow calls to the same backing store,
// the size entry is updated. This entry is made right after the mprotect
// call to change the protections on a backing_store, so the memory objects
// have not been updated yet. The backing store entry in this map is erased
// when all the memory objects, or instances that share this backing store
// have their bounds updated.
std::unordered_map<void*, size_t> grow_update_map_;
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker); DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker);
}; };
......
...@@ -910,12 +910,30 @@ void WasmTableObject::ClearDispatchTables(Isolate* isolate, ...@@ -910,12 +910,30 @@ void WasmTableObject::ClearDispatchTables(Isolate* isolate,
} }
namespace { namespace {
bool AdjustBufferPermissions(Isolate* isolate, Handle<JSArrayBuffer> old_buffer,
size_t new_size) {
if (new_size > old_buffer->allocation_length()) return false;
void* old_mem_start = old_buffer->backing_store();
size_t old_size = old_buffer->byte_length();
if (old_size != new_size) {
DCHECK_NOT_NULL(old_mem_start);
DCHECK_GE(new_size, old_size);
// If adjusting permissions fails, propagate error back to return
// failure to grow.
if (!i::SetPermissions(GetPlatformPageAllocator(), old_mem_start, new_size,
PageAllocator::kReadWrite)) {
return false;
}
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(new_size - old_size);
}
return true;
}
MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate, MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate,
Handle<JSArrayBuffer> old_buffer, Handle<JSArrayBuffer> old_buffer,
size_t new_size) { size_t new_size) {
CHECK_EQ(0, new_size % wasm::kWasmPageSize); CHECK_EQ(0, new_size % wasm::kWasmPageSize);
size_t old_size = old_buffer->byte_length();
void* old_mem_start = old_buffer->backing_store();
// Reusing the backing store from externalized buffers causes problems with // Reusing the backing store from externalized buffers causes problems with
// Blink's array buffers. The connection between the two is lost, which can // Blink's array buffers. The connection between the two is lost, which can
// lead to Blink not knowing about the other reference to the buffer and // lead to Blink not knowing about the other reference to the buffer and
...@@ -932,10 +950,12 @@ MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate, ...@@ -932,10 +950,12 @@ MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate,
// If the old buffer had full guard regions, we can only safely use the new // If the old buffer had full guard regions, we can only safely use the new
// buffer if it also has full guard regions. Otherwise, we'd have to // buffer if it also has full guard regions. Otherwise, we'd have to
// recompile all the instances using this memory to insert bounds checks. // recompile all the instances using this memory to insert bounds checks.
void* old_mem_start = old_buffer->backing_store();
if (memory_tracker->HasFullGuardRegions(old_mem_start) && if (memory_tracker->HasFullGuardRegions(old_mem_start) &&
!memory_tracker->HasFullGuardRegions(new_buffer->backing_store())) { !memory_tracker->HasFullGuardRegions(new_buffer->backing_store())) {
return {}; return {};
} }
size_t old_size = old_buffer->byte_length();
if (old_size == 0) return new_buffer; if (old_size == 0) return new_buffer;
memcpy(new_buffer->backing_store(), old_mem_start, old_size); memcpy(new_buffer->backing_store(), old_mem_start, old_size);
DCHECK(old_buffer.is_null() || !old_buffer->is_shared()); DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
...@@ -943,18 +963,7 @@ MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate, ...@@ -943,18 +963,7 @@ MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate,
i::wasm::DetachMemoryBuffer(isolate, old_buffer, free_memory); i::wasm::DetachMemoryBuffer(isolate, old_buffer, free_memory);
return new_buffer; return new_buffer;
} else { } else {
if (old_size != new_size) { if (!AdjustBufferPermissions(isolate, old_buffer, new_size)) return {};
DCHECK_NOT_NULL(old_buffer->backing_store());
// If adjusting permissions fails, propagate error back to return
// failure to grow.
if (!i::SetPermissions(GetPlatformPageAllocator(), old_mem_start,
new_size, PageAllocator::kReadWrite)) {
return {};
}
DCHECK_GE(new_size, old_size);
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(new_size - old_size);
}
// NOTE: We must allocate a new array buffer here because the spec // NOTE: We must allocate a new array buffer here because the spec
// assumes that ArrayBuffers do not change size. // assumes that ArrayBuffers do not change size.
void* backing_store = old_buffer->backing_store(); void* backing_store = old_buffer->backing_store();
...@@ -1074,15 +1083,32 @@ void WasmMemoryObject::AddInstance(Isolate* isolate, ...@@ -1074,15 +1083,32 @@ void WasmMemoryObject::AddInstance(Isolate* isolate,
SetInstanceMemory(instance, buffer); SetInstanceMemory(instance, buffer);
} }
void WasmMemoryObject::update_instances(Isolate* isolate,
Handle<JSArrayBuffer> buffer) {
if (has_instances()) {
Handle<WeakArrayList> instances(this->instances(), isolate);
for (int i = 0; i < instances->length(); i++) {
MaybeObject elem = instances->Get(i);
HeapObject heap_object;
if (elem->GetHeapObjectIfWeak(&heap_object)) {
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(heap_object), isolate);
SetInstanceMemory(instance, buffer);
} else {
DCHECK(elem->IsCleared());
}
}
}
set_array_buffer(*buffer);
}
// static // static
int32_t WasmMemoryObject::Grow(Isolate* isolate, int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<WasmMemoryObject> memory_object, Handle<WasmMemoryObject> memory_object,
uint32_t pages) { uint32_t pages) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "GrowMemory"); TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "GrowMemory");
Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate); Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate);
// TODO(gdeepti): Remove check for is_shared when Growing Shared memory if (!old_buffer->is_growable()) return -1;
// is supported.
if (!old_buffer->is_growable() || old_buffer->is_shared()) return -1;
// Checks for maximum memory size, compute new size. // Checks for maximum memory size, compute new size.
uint32_t maximum_pages = wasm::max_mem_pages(); uint32_t maximum_pages = wasm::max_mem_pages();
...@@ -1102,28 +1128,45 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate, ...@@ -1102,28 +1128,45 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
size_t new_size = size_t new_size =
static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize; static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
// Grow the buffer. // Memory is grown, but the memory objects and instances are not yet updated.
// Handle this in the interrupt handler so that it's safe for all the isolates
// that share this buffer to be updated safely.
Handle<JSArrayBuffer> new_buffer; Handle<JSArrayBuffer> new_buffer;
if (!MemoryGrowBuffer(isolate, old_buffer, new_size).ToHandle(&new_buffer)) { if (old_buffer->is_shared()) {
// Adjust protections for the buffer.
if (!AdjustBufferPermissions(isolate, old_buffer, new_size)) {
return -1; return -1;
} }
wasm::WasmMemoryTracker* const memory_tracker =
// Update instances if any. isolate->wasm_engine()->memory_tracker();
if (memory_object->has_instances()) { void* backing_store = old_buffer->backing_store();
Handle<WeakArrayList> instances(memory_object->instances(), isolate); if (memory_tracker->IsWasmSharedMemory(backing_store)) {
for (int i = 0; i < instances->length(); i++) { // This memory is shared between different isolates.
MaybeObject elem = instances->Get(i); DCHECK(old_buffer->is_shared());
HeapObject heap_object; // Update pending grow state, and trigger a grow interrupt on all the
if (elem->GetHeapObjectIfWeak(&heap_object)) { // isolates that share this buffer.
Handle<WasmInstanceObject> instance( isolate->wasm_engine()->memory_tracker()->SetPendingUpdateOnGrow(
WasmInstanceObject::cast(heap_object), isolate); old_buffer, new_size);
SetInstanceMemory(instance, new_buffer); // Handle interrupts for this isolate so that the instances with this
} else { // isolate are updated.
DCHECK(elem->IsCleared()); isolate->stack_guard()->HandleInterrupts();
// Failure to allocate, or adjust pemissions already handled here, and
// updates to instances handled in the interrupt handler safe to return.
return static_cast<uint32_t>(old_size / wasm::kWasmPageSize);
} }
// SharedArrayBuffer, but not shared across isolates. Setup a new buffer
// with updated permissions and update the instances.
new_buffer = wasm::SetupArrayBuffer(isolate, backing_store, new_size,
old_buffer->is_external());
memory_object->update_instances(isolate, new_buffer);
} else {
if (!MemoryGrowBuffer(isolate, old_buffer, new_size)
.ToHandle(&new_buffer)) {
return -1;
} }
} }
memory_object->set_array_buffer(*new_buffer); // Update instances if any.
memory_object->update_instances(isolate, new_buffer);
return static_cast<uint32_t>(old_size / wasm::kWasmPageSize); return static_cast<uint32_t>(old_size / wasm::kWasmPageSize);
} }
......
...@@ -330,6 +330,8 @@ class WasmMemoryObject : public JSObject { ...@@ -330,6 +330,8 @@ class WasmMemoryObject : public JSObject {
Isolate* isolate, uint32_t initial, uint32_t maximum, Isolate* isolate, uint32_t initial, uint32_t maximum,
bool is_shared_memory); bool is_shared_memory);
void update_instances(Isolate* isolate, Handle<JSArrayBuffer> buffer);
static int32_t Grow(Isolate*, Handle<WasmMemoryObject>, uint32_t pages); static int32_t Grow(Isolate*, Handle<WasmMemoryObject>, uint32_t pages);
OBJECT_CONSTRUCTORS(WasmMemoryObject, JSObject); OBJECT_CONSTRUCTORS(WasmMemoryObject, JSObject);
......
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// TODO(v8:8832): Enable --stress-opt on these tests
// Flags: --experimental-wasm-threads --no-stress-opt
load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestGrowSharedMemoryWithoutPostMessage() {
print(arguments.callee.name);
let memory = new WebAssembly.Memory({initial: 1, maximum: 5, shared: true});
assertEquals(memory.buffer.byteLength, kPageSize);
assertEquals(1, memory.grow(1));
assertEquals(memory.buffer.byteLength, 2 * kPageSize);
})();
function assertIsWasmSharedMemory(memory) {
assertTrue(memory instanceof Object,
"Memory is not an object");
assertTrue(memory instanceof WebAssembly.Memory,
"Object is not WebAssembly.Memory" );
assertTrue(memory.buffer instanceof SharedArrayBuffer,
"Memory.buffer is not a SharedArrayBuffer");
assertTrue(Object.isFrozen(memory.buffer),
"Memory.buffer not frozen");
}
function assertTrue(value, msg) {
if (!value) {
postMessage("Error: " + msg);
throw new Error("Exit"); // To stop testing.
}
}
let workerHelpers = assertTrue.toString() + assertIsWasmSharedMemory.toString();
(function TestPostMessageWithGrow() {
print(arguments.callee.name);
let worker = new Worker(workerHelpers +
`onmessage = function(obj) {
assertIsWasmSharedMemory(obj.memory);
assertTrue(1 === obj.memory.grow(1));
assertTrue(obj.memory.buffer.byteLength === obj.expected_size);
assertIsWasmSharedMemory(obj.memory);
postMessage("OK");
}`, {type: 'string'});
let memory = new WebAssembly.Memory({initial: 1, maximum: 5, shared: true});
let obj = {memory: memory, expected_size: 2 * kPageSize};
assertEquals(obj.memory.buffer.byteLength, kPageSize);
worker.postMessage(obj);
assertEquals("OK", worker.getMessage());
assertEquals(obj.memory.buffer.byteLength, 2 * kPageSize);
worker.terminate();
})();
// PostMessage from two different workers, and assert that the grow
// operations are performed on the same memory object.
(function TestWorkersWithGrowEarlyWorkerTerminate() {
print(arguments.callee.name);
let workerScript = workerHelpers +
`onmessage = function(obj) {
assertIsWasmSharedMemory(obj.memory);
obj.memory.grow(1);
assertIsWasmSharedMemory(obj.memory);
assertTrue(obj.memory.buffer.byteLength === obj.expected_size);
postMessage("OK");
};`;
let workers = [new Worker(workerScript, {type: 'string'}),
new Worker(workerScript, {type: 'string'})];
let memory = new WebAssembly.Memory({initial: 1, maximum: 5, shared: true});
let expected_pages = 1;
for (let worker of workers) {
assertEquals(memory.buffer.byteLength, expected_pages++ * kPageSize);
let obj = {memory: memory, expected_size: expected_pages * kPageSize};
worker.postMessage(obj);
assertEquals("OK", worker.getMessage());
assertEquals(memory.buffer.byteLength, expected_pages * kPageSize);
worker.terminate();
}
assertEquals(memory.buffer.byteLength, expected_pages * kPageSize);
})();
// PostMessage of Multiple memories and grow
(function TestGrowSharedWithMultipleMemories() {
print(arguments.callee.name);
let workerScript = workerHelpers +
`onmessage = function(obj) {
let expected_size = 0;
let kPageSize = 0x10000;
for (let memory of obj.memories) {
assertIsWasmSharedMemory(memory);
assertTrue(expected_size === memory.grow(2));
expected_size+=2;
assertIsWasmSharedMemory(memory);
assertTrue(memory.buffer.byteLength === expected_size * kPageSize);
}
postMessage("OK");
};`;
let worker = new Worker(workerScript, {type: 'string'});
let memories = [new WebAssembly.Memory({initial: 0, maximum: 2, shared: true}),
new WebAssembly.Memory({initial: 2, maximum: 10, shared: true}),
new WebAssembly.Memory({initial: 4, maximum: 12, shared: true})];
let obj = {memories: memories};
worker.postMessage(obj);
assertEquals("OK", worker.getMessage());
assertEquals(2 * kPageSize, memories[0].buffer.byteLength);
assertEquals(4 * kPageSize, memories[1].buffer.byteLength);
assertEquals(6 * kPageSize, memories[2].buffer.byteLength);
worker.terminate();
})();
// SharedMemory Object shared between different instances
(function TestPostMessageJSAndWasmInterop() {
print(arguments.callee.name);
let worker = new Worker(workerHelpers +
`onmessage = function(obj) {
let kPageSize = 0x10000;
assertIsWasmSharedMemory(obj.memory);
let instance = new WebAssembly.Instance(
obj.module, {m: {memory: obj.memory}});
assertTrue(5 === obj.memory.grow(10));
assertIsWasmSharedMemory(obj.memory);
assertTrue(obj.memory.buffer.byteLength === 15 * kPageSize);
assertTrue(15 === instance.exports.grow(5));
assertIsWasmSharedMemory(obj.memory);
assertTrue(obj.memory.buffer.byteLength === 20 * kPageSize);
postMessage("OK");
}`, {type: 'string'});
let memory = new WebAssembly.Memory({initial: 5, maximum: 50, shared: true});
var builder = new WasmModuleBuilder();
builder.addImportedMemory("m", "memory", 5, 100, "shared");
builder.addFunction("grow", kSig_i_i)
.addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
var module = new WebAssembly.Module(builder.toBuffer());
let obj = {memory: memory, module: module};
assertEquals(obj.memory.buffer.byteLength, 5 * kPageSize);
worker.postMessage(obj);
assertEquals("OK", worker.getMessage());
worker.terminate();
assertEquals(obj.memory.buffer.byteLength, 20 * kPageSize);
})();
(function TestConsecutiveJSAndWasmSharedGrow() {
print(arguments.callee.name);
let worker = new Worker(workerHelpers +
`onmessage = function(obj) {
let kPageSize = 0x10000;
assertIsWasmSharedMemory(obj.memory);
let instance = new WebAssembly.Instance(
obj.module, {m: {memory: obj.memory}});
assertTrue(5 === obj.memory.grow(10));
assertIsWasmSharedMemory(obj.memory);
assertTrue(obj.memory.buffer.byteLength === 15 * kPageSize);
assertTrue(15 === instance.exports.grow(5));
assertIsWasmSharedMemory(obj.memory);
assertTrue(obj.memory.buffer.byteLength === 20 * kPageSize);
postMessage("OK");
}`, {type: 'string'});
let memory = new WebAssembly.Memory({initial: 5, maximum: 50, shared: true});
var builder = new WasmModuleBuilder();
builder.addImportedMemory("m", "memory", 5, 100, "shared");
builder.addFunction("grow", kSig_i_i)
.addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
var module = new WebAssembly.Module(builder.toBuffer());
let obj = {memory: memory, module: module};
assertEquals(obj.memory.buffer.byteLength, 5 * kPageSize);
worker.postMessage(obj);
assertEquals("OK", worker.getMessage());
assertEquals(obj.memory.buffer.byteLength, 20 * kPageSize);
})();
(function TestConsecutiveWasmSharedGrow() {
print(arguments.callee.name);
let worker = new Worker(workerHelpers +
`onmessage = function(obj) {
let kPageSize = 0x10000;
assertIsWasmSharedMemory(obj.memory);
let instance = new WebAssembly.Instance(
obj.module, {m: {memory: obj.memory}});
assertTrue(5 === obj.memory.grow(10));
assertIsWasmSharedMemory(obj.memory);
assertTrue(obj.memory.buffer.byteLength === 15 * kPageSize);
assertTrue(17 === instance.exports.grow_twice(2));
assertIsWasmSharedMemory(obj.memory);
assertTrue(obj.memory.buffer.byteLength === 19 * kPageSize);
postMessage("OK");
}`, {type: 'string'});
let memory = new WebAssembly.Memory({initial: 5, maximum: 50, shared: true});
var builder = new WasmModuleBuilder();
builder.addImportedMemory("m", "memory", 5, 100, "shared");
builder.addFunction("grow_twice", kSig_i_i)
.addBody([kExprGetLocal, 0,
kExprMemoryGrow, kMemoryZero,
kExprDrop,
kExprGetLocal, 0,
kExprMemoryGrow, kMemoryZero])
.exportFunc();
var module = new WebAssembly.Module(builder.toBuffer());
let obj = {memory: memory, module: module};
assertEquals(obj.memory.buffer.byteLength, 5 * kPageSize);
worker.postMessage(obj);
assertEquals("OK", worker.getMessage());
assertEquals(obj.memory.buffer.byteLength, 19 * kPageSize);
let instance = new WebAssembly.Instance(module, {m: {memory: memory}});
assertEquals(21, instance.exports.grow_twice(2));
assertEquals(obj.memory.buffer.byteLength, 23 * kPageSize);
})();
(function TestConsecutiveSharedGrowAndMemorySize() {
print(arguments.callee.name);
let worker = new Worker(workerHelpers +
`onmessage = function(obj) {
let kPageSize = 0x10000;
assertIsWasmSharedMemory(obj.memory);
let instance = new WebAssembly.Instance(
obj.module, {m: {memory: obj.memory}});
assertTrue(5 === obj.memory.grow(10));
assertTrue(15 === instance.exports.memory_size());
assertIsWasmSharedMemory(obj.memory);
assertTrue(obj.memory.buffer.byteLength === 15 * kPageSize);
assertTrue(19 === instance.exports.grow_and_size(2));
assertIsWasmSharedMemory(obj.memory);
assertTrue(obj.memory.buffer.byteLength === 19 * kPageSize);
postMessage("OK");
}`, {type: 'string'});
let memory = new WebAssembly.Memory({initial: 5, maximum: 50, shared: true});
var builder = new WasmModuleBuilder();
builder.addImportedMemory("m", "memory", 5, 100, "shared");
builder.addFunction("grow_and_size", kSig_i_i)
.addBody([kExprGetLocal, 0,
kExprMemoryGrow, kMemoryZero,
kExprDrop,
kExprGetLocal, 0,
kExprMemoryGrow, kMemoryZero,
kExprDrop,
kExprMemorySize, kMemoryZero])
.exportFunc();
builder.addFunction("memory_size", kSig_i_v)
.addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
var module = new WebAssembly.Module(builder.toBuffer());
let obj = {memory: memory, module: module};
assertEquals(obj.memory.buffer.byteLength, 5 * kPageSize);
worker.postMessage(obj);
assertEquals("OK", worker.getMessage());
assertEquals(memory.buffer.byteLength, 19 * kPageSize);
let instance = new WebAssembly.Instance(module, {m: {memory: memory}});
assertEquals(23, instance.exports.grow_and_size(2));
assertEquals(obj.memory.buffer.byteLength, 23 * kPageSize);
assertEquals(23, memory.grow(2));
assertEquals(25, instance.exports.memory_size());
})();
// Only spot checking here because currently the underlying buffer doesn't move.
// In the case that the underlying buffer does move, more comprehensive memory
// integrity checking and bounds checks testing are needed.
(function TestSpotCheckMemoryWithSharedGrow() {
print(arguments.callee.name);
let worker = new Worker(workerHelpers +
`onmessage = function(obj) {
let kPageSize = 0x10000;
assertIsWasmSharedMemory(obj.memory);
let instance = new WebAssembly.Instance(
obj.module, {m: {memory: obj.memory}});
assertTrue(5 === obj.memory.grow(10));
assertIsWasmSharedMemory(obj.memory);
assertTrue(obj.memory.buffer.byteLength === 15 * kPageSize);
// Store again, and verify that the previous stores are still reflected.
instance.exports.atomic_store(15 * kPageSize - 4, 0xACED);
assertTrue(0xACED === instance.exports.atomic_load(0));
assertTrue(0xACED === instance.exports.atomic_load(5 * kPageSize - 4));
assertTrue(0xACED === instance.exports.atomic_load(15 * kPageSize - 4));
assertTrue(15 === instance.exports.grow(2));
assertIsWasmSharedMemory(obj.memory);
assertTrue(obj.memory.buffer.byteLength === 17 * kPageSize);
// Validate previous writes.
instance.exports.atomic_store(17 * kPageSize - 4, 0xACED);
assertTrue(0xACED === instance.exports.atomic_load(0));
assertTrue(0xACED === instance.exports.atomic_load(5 * kPageSize - 4));
assertTrue(0xACED === instance.exports.atomic_load(15 * kPageSize - 4));
assertTrue(0xACED === instance.exports.atomic_load(17 * kPageSize - 4));
postMessage("OK");
}`, {type: 'string'});
let memory = new WebAssembly.Memory({initial: 5, maximum: 50, shared: true});
var builder = new WasmModuleBuilder();
builder.addImportedMemory("m", "memory", 5, 100, "shared");
builder.addFunction("grow", kSig_i_i)
.addBody([kExprGetLocal, 0, kExprMemoryGrow, kMemoryZero])
.exportFunc();
builder.addFunction("atomic_load", kSig_i_i)
.addBody([kExprGetLocal, 0, kAtomicPrefix, kExprI32AtomicLoad, 2, 0])
.exportFunc();
builder.addFunction("atomic_store", kSig_v_ii)
.addBody([kExprGetLocal, 0, kExprGetLocal, 1,
kAtomicPrefix, kExprI32AtomicStore, 2, 0])
.exportFunc();
var module = new WebAssembly.Module(builder.toBuffer());
let instance = new WebAssembly.Instance(module, {m: {memory: memory}});
// Store at first and last accessible 32 bit offset.
instance.exports.atomic_store(0, 0xACED);
instance.exports.atomic_store(5 * kPageSize - 4, 0xACED);
// Verify that these were stored.
assertEquals(0xACED, instance.exports.atomic_load(0));
assertEquals(0xACED, instance.exports.atomic_load(5 * kPageSize - 4));
// Verify bounds.
assertTraps(kTrapMemOutOfBounds,
() => instance.exports.atomic_load(5 * kPageSize - 3));
let obj = {memory: memory, module: module};
assertEquals(obj.memory.buffer.byteLength, 5 * kPageSize);
// PostMessage
worker.postMessage(obj);
assertEquals("OK", worker.getMessage());
assertEquals(memory.buffer.byteLength, 17 * kPageSize);
assertEquals(17, instance.exports.grow(2));
assertEquals(obj.memory.buffer.byteLength, 19 * kPageSize);
// Validate previous writes, and check bounds.
assertTrue(0xACED === instance.exports.atomic_load(0));
assertTrue(0xACED === instance.exports.atomic_load(5 * kPageSize - 4));
assertTrue(0xACED === instance.exports.atomic_load(15 * kPageSize - 4));
assertTrue(0xACED === instance.exports.atomic_load(17 * kPageSize - 4));
assertTraps(kTrapMemOutOfBounds,
() => instance.exports.atomic_load(19 * kPageSize - 3));
assertEquals(19, memory.grow(6));
assertEquals(obj.memory.buffer.byteLength, 25 * kPageSize);
assertTraps(kTrapMemOutOfBounds,
() => instance.exports.atomic_load(25 * kPageSize - 3));
})();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment