Commit 1e7effd1 authored by Marja Hölttä's avatar Marja Hölttä Committed by V8 LUCI CQ

[rab/gsab] Fix gsab maxByteLength after transferring to worker

Bug: v8:11111
Change-Id: I41a318d3858e48035ae67e937420e2963a13d871
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3035091
Commit-Queue: Marja Hölttä <marja@chromium.org>
Reviewed-by: 's avatarShu-yu Guo <syg@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75878}
parent 66856bac
...@@ -108,8 +108,8 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target, ...@@ -108,8 +108,8 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
} }
constexpr bool kIsWasmMemory = false; constexpr bool kIsWasmMemory = false;
backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory( backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory(
isolate, byte_length, page_size, initial_pages, max_pages, isolate, byte_length, max_byte_length, page_size, initial_pages,
kIsWasmMemory, shared); max_pages, kIsWasmMemory, shared);
} }
if (!backing_store) { if (!backing_store) {
// Allocation of backing store failed. // Allocation of backing store failed.
...@@ -475,6 +475,9 @@ BUILTIN(SharedArrayBufferPrototypeGetByteLength) { ...@@ -475,6 +475,9 @@ BUILTIN(SharedArrayBufferPrototypeGetByteLength) {
// 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception. // 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
CHECK_SHARED(true, array_buffer, kMethodName); CHECK_SHARED(true, array_buffer, kMethodName);
DCHECK_EQ(array_buffer->max_byte_length(),
array_buffer->GetBackingStore()->max_byte_length());
// 4. Let length be ArrayBufferByteLength(O, SeqCst). // 4. Let length be ArrayBufferByteLength(O, SeqCst).
size_t byte_length; size_t byte_length;
if (array_buffer->is_resizable()) { if (array_buffer->is_resizable()) {
......
...@@ -267,6 +267,7 @@ std::unique_ptr<BackingStore> BackingStore::Allocate( ...@@ -267,6 +267,7 @@ std::unique_ptr<BackingStore> BackingStore::Allocate(
auto result = new BackingStore(buffer_start, // start auto result = new BackingStore(buffer_start, // start
byte_length, // length byte_length, // length
byte_length, // max length
byte_length, // capacity byte_length, // capacity
shared, // shared shared, // shared
ResizableFlag::kNotResizable, // resizable ResizableFlag::kNotResizable, // resizable
...@@ -305,8 +306,9 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory( ...@@ -305,8 +306,9 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
maximum_pages = std::min(engine_max_pages, maximum_pages); maximum_pages = std::min(engine_max_pages, maximum_pages);
auto result = TryAllocateAndPartiallyCommitMemory( auto result = TryAllocateAndPartiallyCommitMemory(
isolate, initial_pages * wasm::kWasmPageSize, wasm::kWasmPageSize, isolate, initial_pages * wasm::kWasmPageSize,
initial_pages, maximum_pages, true, shared); maximum_pages * wasm::kWasmPageSize, wasm::kWasmPageSize, initial_pages,
maximum_pages, true, shared);
// Shared Wasm memories need an anchor for the memory object list. // Shared Wasm memories need an anchor for the memory object list.
if (result && shared == SharedFlag::kShared) { if (result && shared == SharedFlag::kShared) {
result->type_specific_data_.shared_wasm_memory_data = result->type_specific_data_.shared_wasm_memory_data =
...@@ -336,9 +338,9 @@ void BackingStore::ReleaseReservation(uint64_t num_bytes) { ...@@ -336,9 +338,9 @@ void BackingStore::ReleaseReservation(uint64_t num_bytes) {
} }
std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory( std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
Isolate* isolate, size_t byte_length, size_t page_size, Isolate* isolate, size_t byte_length, size_t max_byte_length,
size_t initial_pages, size_t maximum_pages, bool is_wasm_memory, size_t page_size, size_t initial_pages, size_t maximum_pages,
SharedFlag shared) { bool is_wasm_memory, SharedFlag shared) {
// Enforce engine limitation on the maximum number of pages. // Enforce engine limitation on the maximum number of pages.
if (maximum_pages > std::numeric_limits<size_t>::max() / page_size) { if (maximum_pages > std::numeric_limits<size_t>::max() / page_size) {
return nullptr; return nullptr;
...@@ -445,16 +447,17 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory( ...@@ -445,16 +447,17 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
ResizableFlag resizable = ResizableFlag resizable =
is_wasm_memory ? ResizableFlag::kNotResizable : ResizableFlag::kResizable; is_wasm_memory ? ResizableFlag::kNotResizable : ResizableFlag::kResizable;
auto result = new BackingStore(buffer_start, // start auto result = new BackingStore(buffer_start, // start
byte_length, // length byte_length, // length
byte_capacity, // capacity max_byte_length, // max_byte_length
shared, // shared byte_capacity, // capacity
resizable, // resizable shared, // shared
is_wasm_memory, // is_wasm_memory resizable, // resizable
true, // free_on_destruct is_wasm_memory, // is_wasm_memory
guards, // has_guard_regions true, // free_on_destruct
false, // custom_deleter guards, // has_guard_regions
false); // empty_deleter false, // custom_deleter
false); // empty_deleter
TRACE_BS( TRACE_BS(
"BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n", "BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n",
...@@ -707,6 +710,7 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation( ...@@ -707,6 +710,7 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
SharedFlag shared, bool free_on_destruct) { SharedFlag shared, bool free_on_destruct) {
auto result = new BackingStore(allocation_base, // start auto result = new BackingStore(allocation_base, // start
allocation_length, // length allocation_length, // length
allocation_length, // max length
allocation_length, // capacity allocation_length, // capacity
shared, // shared shared, // shared
ResizableFlag::kNotResizable, // resizable ResizableFlag::kNotResizable, // resizable
...@@ -728,6 +732,7 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation( ...@@ -728,6 +732,7 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
bool is_empty_deleter = (deleter == v8::BackingStore::EmptyDeleter); bool is_empty_deleter = (deleter == v8::BackingStore::EmptyDeleter);
auto result = new BackingStore(allocation_base, // start auto result = new BackingStore(allocation_base, // start
allocation_length, // length allocation_length, // length
allocation_length, // max length
allocation_length, // capacity allocation_length, // capacity
shared, // shared shared, // shared
ResizableFlag::kNotResizable, // resizable ResizableFlag::kNotResizable, // resizable
...@@ -746,6 +751,7 @@ std::unique_ptr<BackingStore> BackingStore::EmptyBackingStore( ...@@ -746,6 +751,7 @@ std::unique_ptr<BackingStore> BackingStore::EmptyBackingStore(
SharedFlag shared) { SharedFlag shared) {
auto result = new BackingStore(nullptr, // start auto result = new BackingStore(nullptr, // start
0, // length 0, // length
0, // max length
0, // capacity 0, // capacity
shared, // shared shared, // shared
ResizableFlag::kNotResizable, // resizable ResizableFlag::kNotResizable, // resizable
......
...@@ -61,9 +61,9 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase { ...@@ -61,9 +61,9 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
// Tries to allocate `maximum_pages` of memory and commit `initial_pages`. // Tries to allocate `maximum_pages` of memory and commit `initial_pages`.
static std::unique_ptr<BackingStore> TryAllocateAndPartiallyCommitMemory( static std::unique_ptr<BackingStore> TryAllocateAndPartiallyCommitMemory(
Isolate* isolate, size_t byte_length, size_t page_size, Isolate* isolate, size_t byte_length, size_t max_byte_length,
size_t initial_pages, size_t maximum_pages, bool is_wasm_memory, size_t page_size, size_t initial_pages, size_t maximum_pages,
SharedFlag shared); bool is_wasm_memory, SharedFlag shared);
// Create a backing store that wraps existing allocated memory. // Create a backing store that wraps existing allocated memory.
// If {free_on_destruct} is {true}, the memory will be freed using the // If {free_on_destruct} is {true}, the memory will be freed using the
...@@ -90,6 +90,7 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase { ...@@ -90,6 +90,7 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
std::memory_order memory_order = std::memory_order_relaxed) const { std::memory_order memory_order = std::memory_order_relaxed) const {
return byte_length_.load(memory_order); return byte_length_.load(memory_order);
} }
size_t max_byte_length() const { return max_byte_length_; }
size_t byte_capacity() const { return byte_capacity_; } size_t byte_capacity() const { return byte_capacity_; }
bool is_shared() const { return is_shared_; } bool is_shared() const { return is_shared_; }
bool is_resizable() const { return is_resizable_; } bool is_resizable() const { return is_resizable_; }
...@@ -165,12 +166,13 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase { ...@@ -165,12 +166,13 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
private: private:
friend class GlobalBackingStoreRegistry; friend class GlobalBackingStoreRegistry;
BackingStore(void* buffer_start, size_t byte_length, size_t byte_capacity, BackingStore(void* buffer_start, size_t byte_length, size_t max_byte_length,
SharedFlag shared, ResizableFlag resizable, bool is_wasm_memory, size_t byte_capacity, SharedFlag shared, ResizableFlag resizable,
bool free_on_destruct, bool has_guard_regions, bool is_wasm_memory, bool free_on_destruct,
bool custom_deleter, bool empty_deleter) bool has_guard_regions, bool custom_deleter, bool empty_deleter)
: buffer_start_(buffer_start), : buffer_start_(buffer_start),
byte_length_(byte_length), byte_length_(byte_length),
max_byte_length_(max_byte_length),
byte_capacity_(byte_capacity), byte_capacity_(byte_capacity),
is_shared_(shared == SharedFlag::kShared), is_shared_(shared == SharedFlag::kShared),
is_resizable_(resizable == ResizableFlag::kResizable), is_resizable_(resizable == ResizableFlag::kResizable),
...@@ -185,6 +187,8 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase { ...@@ -185,6 +187,8 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
DCHECK_IMPLIES(is_wasm_memory_, !is_resizable_); DCHECK_IMPLIES(is_wasm_memory_, !is_resizable_);
DCHECK_IMPLIES(is_resizable_, !custom_deleter_); DCHECK_IMPLIES(is_resizable_, !custom_deleter_);
DCHECK_IMPLIES(is_resizable_, free_on_destruct_); DCHECK_IMPLIES(is_resizable_, free_on_destruct_);
DCHECK_IMPLIES(!is_wasm_memory && !is_resizable_,
byte_length_ == max_byte_length_);
} }
BackingStore(const BackingStore&) = delete; BackingStore(const BackingStore&) = delete;
BackingStore& operator=(const BackingStore&) = delete; BackingStore& operator=(const BackingStore&) = delete;
...@@ -192,6 +196,9 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase { ...@@ -192,6 +196,9 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
void* buffer_start_ = nullptr; void* buffer_start_ = nullptr;
std::atomic<size_t> byte_length_{0}; std::atomic<size_t> byte_length_{0};
// Max byte length of the corresponding JSArrayBuffer(s).
size_t max_byte_length_ = 0;
// Amount of the memory allocated
size_t byte_capacity_ = 0; size_t byte_capacity_ = 0;
struct DeleterInfo { struct DeleterInfo {
......
...@@ -59,6 +59,7 @@ void JSArrayBuffer::Setup(SharedFlag shared, ResizableFlag resizable, ...@@ -59,6 +59,7 @@ void JSArrayBuffer::Setup(SharedFlag shared, ResizableFlag resizable,
if (!backing_store) { if (!backing_store) {
set_backing_store(GetIsolate(), nullptr); set_backing_store(GetIsolate(), nullptr);
set_byte_length(0); set_byte_length(0);
set_max_byte_length(0);
} else { } else {
Attach(std::move(backing_store)); Attach(std::move(backing_store));
} }
...@@ -72,6 +73,9 @@ void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) { ...@@ -72,6 +73,9 @@ void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
DCHECK_NOT_NULL(backing_store); DCHECK_NOT_NULL(backing_store);
DCHECK_EQ(is_shared(), backing_store->is_shared()); DCHECK_EQ(is_shared(), backing_store->is_shared());
DCHECK_EQ(is_resizable(), backing_store->is_resizable()); DCHECK_EQ(is_resizable(), backing_store->is_resizable());
DCHECK_IMPLIES(
!backing_store->is_wasm_memory() && !backing_store->is_resizable(),
backing_store->byte_length() == backing_store->max_byte_length());
DCHECK(!was_detached()); DCHECK(!was_detached());
Isolate* isolate = GetIsolate(); Isolate* isolate = GetIsolate();
set_backing_store(isolate, backing_store->buffer_start()); set_backing_store(isolate, backing_store->buffer_start());
...@@ -82,6 +86,7 @@ void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) { ...@@ -82,6 +86,7 @@ void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
} else { } else {
set_byte_length(backing_store->byte_length()); set_byte_length(backing_store->byte_length());
} }
set_max_byte_length(backing_store->max_byte_length());
if (backing_store->is_wasm_memory()) set_is_detachable(false); if (backing_store->is_wasm_memory()) set_is_detachable(false);
if (!backing_store->free_on_destruct()) set_is_external(true); if (!backing_store->free_on_destruct()) set_is_external(true);
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
......
...@@ -534,6 +534,7 @@ const ctors = [[ArrayBuffer, (b) => b.resizable], ...@@ -534,6 +534,7 @@ const ctors = [[ArrayBuffer, (b) => b.resizable],
assert(!(gsab instanceof ArrayBuffer)); assert(!(gsab instanceof ArrayBuffer));
assert(gsab instanceof SharedArrayBuffer); assert(gsab instanceof SharedArrayBuffer);
assert(10 == gsab.byteLength); assert(10 == gsab.byteLength);
assert(20 == gsab.maxByteLength);
gsab.grow(15); gsab.grow(15);
postMessage('ok'); postMessage('ok');
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment