Commit 401190ba authored by Andreas Haas's avatar Andreas Haas Committed by Commit Bot

[wasm] Fix return value of concurrent memory.grow

When memory.grow was executed concurrently on multiple threads a data
race could happen such that two memory.grow operations result in the
same return value. With this CL the return value of memory.grow is
unique, given that memory.grow actually grows the memory.

As a concrete example, assume a shared WebAssembly memory initially has
a size of 100. Assume two threads call memory.grow concurrently with a
parameter `10`. Then with the existing code, memory would grow correctly
to a size of 120, but the data race may cause both memory.grow
operations to return 100. With the change in this CL one memory.grow
operation would return 100, the other would return 110.

R=gdeepti@chromium.org
CC=rreverser@google.com

Bug: chromium:1067621
Change-Id: Ib22b5135714a56799e0818ccb39e5dce327e5f8e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2144113Reviewed-by: 's avatarBen Smith <binji@chromium.org>
Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67135}
parent da3e611c
......@@ -459,33 +459,63 @@ std::unique_ptr<BackingStore> BackingStore::CopyWasmMemory(Isolate* isolate,
}
// Try to grow the size of a wasm memory in place, without realloc + copy.
bool BackingStore::GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages,
size_t max_pages) {
base::Optional<size_t> BackingStore::GrowWasmMemoryInPlace(Isolate* isolate,
size_t delta_pages,
size_t max_pages) {
// This function grows wasm memory by
// * changing the permissions of additional {delta_pages} pages to kReadWrite;
// * increment {byte_length_};
//
// As this code is executed concurrently, the following steps are executed:
// 1) Read the current value of {byte_length_};
// 2) Change the permission of all pages from {buffer_start_} to
// {byte_length_} + {delta_pages} * {page_size} to kReadWrite;
// * This operation may be executed racefully. The OS takes care of
// synchronization.
// 3) Try to update {byte_length_} with a compare_exchange;
// 4) Repeat 1) to 3) until the compare_exchange in 3) succeeds;
//
// The result of this function is the {byte_length_} before growing in pages.
// The result of this function appears like the result of an RMW-update on
// {byte_length_}, i.e. two concurrent calls to this function will result in
// different return values if {delta_pages} != 0.
//
// Invariants:
// * Permissions are always set incrementally, i.e. for any page {b} with
// kReadWrite permission, all pages between the first page {a} and page {b}
// also have kReadWrite permission.
// * {byte_length_} is always lower or equal than the amount of memory with
// permissions set to kReadWrite;
// * This is guaranteed by incrementing {byte_length_} with a
// compare_exchange after changing the permissions.
// * This invariant is the reason why we cannot use a fetch_add.
DCHECK(is_wasm_memory_);
max_pages = std::min(max_pages, byte_capacity_ / wasm::kWasmPageSize);
if (delta_pages == 0) return true; // degenerate grow.
if (delta_pages > max_pages) return false; // would never work.
// Do a compare-exchange loop, because we also need to adjust page
// permissions. Note that multiple racing grows both try to set page
// permissions for the entire range (to be RW), so the operating system
// should deal with that raciness. We know we succeeded when we can
// compare/swap the old length with the new length.
size_t old_length = byte_length_.load(std::memory_order_relaxed);
if (delta_pages == 0)
return {old_length / wasm::kWasmPageSize}; // degenerate grow.
if (delta_pages > max_pages) return {}; // would never work.
size_t new_length = 0;
while (true) {
size_t current_pages = old_length / wasm::kWasmPageSize;
// Check if we have exceed the supplied maximum.
if (current_pages > (max_pages - delta_pages)) return false;
if (current_pages > (max_pages - delta_pages)) return {};
new_length = (current_pages + delta_pages) * wasm::kWasmPageSize;
// Try to adjust the permissions on the memory.
if (!i::SetPermissions(GetPlatformPageAllocator(), buffer_start_,
new_length, PageAllocator::kReadWrite)) {
return false;
return {};
}
if (byte_length_.compare_exchange_weak(old_length, new_length,
std::memory_order_acq_rel)) {
......@@ -499,7 +529,7 @@ bool BackingStore::GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages,
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(new_length - old_length);
}
return true;
return {old_length / wasm::kWasmPageSize};
}
void BackingStore::AttachSharedWasmMemoryObject(
......@@ -512,10 +542,9 @@ void BackingStore::AttachSharedWasmMemoryObject(
}
void BackingStore::BroadcastSharedWasmMemoryGrow(
Isolate* isolate, std::shared_ptr<BackingStore> backing_store,
size_t new_pages) {
GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow(
isolate, backing_store, new_pages);
Isolate* isolate, std::shared_ptr<BackingStore> backing_store) {
GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow(isolate,
backing_store);
}
void BackingStore::RemoveSharedWasmMemoryObjects(Isolate* isolate) {
......@@ -733,8 +762,7 @@ void GlobalBackingStoreRegistry::AddSharedWasmMemoryObject(
}
void GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow(
Isolate* isolate, std::shared_ptr<BackingStore> backing_store,
size_t new_pages) {
Isolate* isolate, std::shared_ptr<BackingStore> backing_store) {
{
// The global lock protects the list of isolates per backing store.
base::MutexGuard scope_lock(&impl()->mutex_);
......
......@@ -9,6 +9,7 @@
#include "include/v8-internal.h"
#include "include/v8.h"
#include "src/base/optional.h"
#include "src/handles/handles.h"
namespace v8 {
......@@ -84,8 +85,9 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
bool free_on_destruct() const { return free_on_destruct_; }
// Attempt to grow this backing store in place.
bool GrowWasmMemoryInPlace(Isolate* isolate, size_t delta_pages,
size_t max_pages);
base::Optional<size_t> GrowWasmMemoryInPlace(Isolate* isolate,
size_t delta_pages,
size_t max_pages);
// Wrapper around ArrayBuffer::Allocator::Reallocate.
bool Reallocate(Isolate* isolate, size_t new_byte_length);
......@@ -104,8 +106,7 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
// after the backing store has been grown. Memory objects in this
// isolate are updated synchronously.
static void BroadcastSharedWasmMemoryGrow(Isolate* isolate,
std::shared_ptr<BackingStore>,
size_t new_pages);
std::shared_ptr<BackingStore>);
// TODO(wasm): address space limitations should be enforced in page alloc.
// These methods enforce a limit on the total amount of address space,
......@@ -243,8 +244,7 @@ class GlobalBackingStoreRegistry {
// Broadcast updates to all attached memory objects.
static void BroadcastSharedWasmMemoryGrow(
Isolate* isolate, std::shared_ptr<BackingStore> backing_store,
size_t new_pages);
Isolate* isolate, std::shared_ptr<BackingStore> backing_store);
// Update all shared memory objects in the given isolate.
static void UpdateSharedWasmMemoryObjects(Isolate* isolate);
......
......@@ -877,18 +877,17 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
std::shared_ptr<BackingStore> backing_store = old_buffer->GetBackingStore();
if (!backing_store) return -1;
// Compute new size.
size_t new_pages = old_pages + pages;
// Try to handle shared memory first.
if (old_buffer->is_shared()) {
if (FLAG_wasm_grow_shared_memory) {
base::Optional<size_t> result =
backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages);
// Shared memories can only be grown in place; no copying.
if (backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages)) {
BackingStore::BroadcastSharedWasmMemoryGrow(isolate, backing_store,
new_pages);
if (result.has_value()) {
BackingStore::BroadcastSharedWasmMemoryGrow(isolate, backing_store);
// Broadcasting the update should update this memory object too.
CHECK_NE(*old_buffer, memory_object->array_buffer());
size_t new_pages = result.value() + pages;
// If the allocation succeeded, then this can't possibly overflow:
size_t new_byte_length = new_pages * wasm::kWasmPageSize;
// This is a less than check, as it is not guaranteed that the SAB
......@@ -897,21 +896,29 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
// It is also possible that a call to Grow was in progress when
// handling this call.
CHECK_LE(new_byte_length, memory_object->array_buffer().byte_length());
return static_cast<int32_t>(old_pages); // success
// As {old_pages} was read racefully, we return here the synchronized
// value provided by {GrowWasmMemoryInPlace}, to provide the atomic
// read-modify-write behavior required by the spec.
return static_cast<int32_t>(result.value()); // success
}
}
return -1;
}
base::Optional<size_t> result =
backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages);
// Try to grow non-shared memory in-place.
if (backing_store->GrowWasmMemoryInPlace(isolate, pages, maximum_pages)) {
if (result.has_value()) {
// Detach old and create a new one with the grown backing store.
old_buffer->Detach(true);
Handle<JSArrayBuffer> new_buffer =
isolate->factory()->NewJSArrayBuffer(std::move(backing_store));
memory_object->update_instances(isolate, new_buffer);
return static_cast<int32_t>(old_pages); // success
DCHECK_EQ(result.value(), old_pages);
return static_cast<int32_t>(result.value()); // success
}
size_t new_pages = old_pages + pages;
// Try allocating a new backing store and copying.
std::unique_ptr<BackingStore> new_backing_store =
backing_store->CopyWasmMemory(isolate, new_pages);
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --experimental-wasm-threads
load('test/mjsunit/wasm/wasm-module-builder.js');
const kNumberOfWorker = 4;
const workerOnMessage = function(msg) {
if (msg.module) {
let module = msg.module;
let mem = msg.mem;
this.instance = new WebAssembly.Instance(module, {m: {memory: mem}});
postMessage({instantiated: true});
} else {
const kNumberOfRuns = 20;
let result = new Array(kNumberOfRuns);
for (let i = 0; i < kNumberOfRuns; ++i) {
result[i] = instance.exports.grow();
}
postMessage({result: result});
}
};
function spawnWorkers() {
let workers = [];
for (let i = 0; i < kNumberOfWorker; i++) {
let worker = new Worker(
'onmessage = ' + workerOnMessage.toString(), {type: 'string'});
workers.push(worker);
}
return workers;
}
function instantiateModuleInWorkers(workers, module, shared_memory) {
for (let worker of workers) {
worker.postMessage({module: module, mem: shared_memory});
let msg = worker.getMessage();
if (!msg.instantiated) throw 'Worker failed to instantiate';
}
}
function triggerWorkers(workers) {
for (i = 0; i < workers.length; i++) {
let worker = workers[i];
worker.postMessage({});
}
}
(function TestConcurrentGrowMemoryResult() {
let builder = new WasmModuleBuilder();
builder.addImportedMemory('m', 'memory', 1, 500, 'shared');
builder.addFunction('grow', kSig_i_v)
.addBody([kExprI32Const, 1, kExprMemoryGrow, kMemoryZero])
.exportFunc();
const module = builder.toModule();
const shared_memory =
new WebAssembly.Memory({initial: 1, maximum: 500, shared: true});
// Spawn off the workers and run the sequences.
let workers = spawnWorkers();
instantiateModuleInWorkers(workers, module, shared_memory);
triggerWorkers(workers);
let all_results = [];
for (let worker of workers) {
let msg = worker.getMessage();
all_results = all_results.concat(msg.result);
}
all_results.sort((a, b) => a - b);
for (let i = 1; i < all_results.length; ++i) {
assertEquals(all_results[i - 1] + 1, all_results[i]);
}
// Terminate all workers.
for (let worker of workers) {
worker.terminate();
}
})();
......@@ -21,8 +21,10 @@ TEST_F(BackingStoreTest, GrowWasmMemoryInPlace) {
EXPECT_EQ(1 * wasm::kWasmPageSize, backing_store->byte_length());
EXPECT_EQ(2 * wasm::kWasmPageSize, backing_store->byte_capacity());
bool success = backing_store->GrowWasmMemoryInPlace(isolate(), 1, 2);
EXPECT_TRUE(success);
base::Optional<size_t> result =
backing_store->GrowWasmMemoryInPlace(isolate(), 1, 2);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), 1u);
EXPECT_EQ(2 * wasm::kWasmPageSize, backing_store->byte_length());
}
......@@ -34,8 +36,9 @@ TEST_F(BackingStoreTest, GrowWasmMemoryInPlace_neg) {
EXPECT_EQ(1 * wasm::kWasmPageSize, backing_store->byte_length());
EXPECT_EQ(2 * wasm::kWasmPageSize, backing_store->byte_capacity());
bool success = backing_store->GrowWasmMemoryInPlace(isolate(), 2, 2);
EXPECT_FALSE(success);
base::Optional<size_t> result =
backing_store->GrowWasmMemoryInPlace(isolate(), 2, 2);
EXPECT_FALSE(result.has_value());
EXPECT_EQ(1 * wasm::kWasmPageSize, backing_store->byte_length());
}
......@@ -47,8 +50,10 @@ TEST_F(BackingStoreTest, GrowSharedWasmMemoryInPlace) {
EXPECT_EQ(2 * wasm::kWasmPageSize, backing_store->byte_length());
EXPECT_EQ(3 * wasm::kWasmPageSize, backing_store->byte_capacity());
bool success = backing_store->GrowWasmMemoryInPlace(isolate(), 1, 3);
EXPECT_TRUE(success);
base::Optional<size_t> result =
backing_store->GrowWasmMemoryInPlace(isolate(), 1, 3);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), 2u);
EXPECT_EQ(3 * wasm::kWasmPageSize, backing_store->byte_length());
}
......@@ -81,10 +86,11 @@ class GrowerThread : public base::Thread {
while (true) {
size_t current_length = backing_store_->byte_length();
if (current_length >= max_length) break;
bool result =
base::Optional<size_t> result =
backing_store_->GrowWasmMemoryInPlace(isolate_, increment_, max_);
size_t new_length = backing_store_->byte_length();
if (result) {
if (result.has_value()) {
CHECK_LE(current_length / wasm::kWasmPageSize, result.value());
CHECK_GE(new_length, current_length + increment_);
} else {
CHECK_EQ(max_length, new_length);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment